id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
8710
|
import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.utils import random_interferometer
from strawberryfields.apps import data, sample, subgraph, plot
import plotly
import networkx as nx
import numpy as np
class GBS:
def __init__(self, samples =[], min_pho = 16, max_pho = 30, subgraph_size = 8, max_count = 2000):
self.samples = samples
self.min_pho = min_pho
self.max_pho = max_pho
self.subgraph_size = subgraph_size
self.max_count = max_count
def graphDensity(self, samples, min_pho, max_pho, subgraph_size, max_count):
dense = subgraph.search(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)
dense_freq = []
for k in range(subgraph_size, min_pho+1):
dense_freq.append([k,len(dense[k])])
return dense, dense_freq
def graphFreqScore(self, d_freqs, max_freq):
x,y = [], []
for i in range(len(d_freqs)):
for j in range(len(d_freqs[i])):
n,f = d_freqs[i][j][0],d_freqs[i][j][1]
x.append(n*f)
N = len(d_freq[i])
y.append((1/max_freq)*(np.sum(x)/N))
x = []
min_y = np.min(y)
y = [min_y/x for x in y]
return y, y.index(max(y))
def runJob(self, eng):
num_subsystem = 8
prog = sf.Program(num_subsystem, name="remote_job")
U = random_interferometer(4)
with prog.context as q:
# Initial squeezed states
# Allowed values are r=1.0 or r=0.0
ops.S2gate(1.0) | (q[0], q[4])
ops.S2gate(1.0) | (q[1], q[5])
ops.S2gate(1.0) | (q[3], q[7])
# Interferometer on the signal modes (0-3)
ops.Interferometer(U) | (q[0], q[1], q[2], q[3])
ops.BSgate(0.543, 0.123) | (q[2], q[0])
ops.Rgate(0.453) | q[1]
ops.MZgate(0.65, -0.54) | (q[2], q[3])
# *Same* interferometer on the idler modes (4-7)
ops.Interferometer(U) | (q[4], q[5], q[6], q[7])
ops.BSgate(0.543, 0.123) | (q[6], q[4])
ops.Rgate(0.453) | q[5]
ops.MZgate(0.65, -0.54) | (q[6], q[7])
ops.MeasureFock() | q
eng = eng
results =eng.run(prog, shots=10)
# state = results.state
# measurements = results.samples
return results.samples
|
8741
|
import boto3
from logger import logger
class States:
def __init__(self, boto3_session=None):
self.boto3_session = boto3_session or boto3.Session()
self.client = self.boto3_session.client('stepfunctions')
def fail(self, task_token, error, cause):
params = dict(taskToken=task_token, error=error, cause=cause)
logger.info('SEND TASK FAILURE %s', logger.json(params))
return self.client.send_task_failure(**params)
def heartbeat(self, task_token):
params = dict(taskToken=task_token)
logger.info('SEND TASK HEARTBEAT %s', logger.json(params))
return self.client.send_task_heartbeat(**params)
def succeed(self, task_token, output):
params = dict(taskToken=task_token, output=output)
logger.info('SEND TASK SUCCESS %s', logger.json(params))
return self.client.send_task_success(**params)
|
8743
|
from typing import Union
import pandas as pd
from kts.core.frame import KTSFrame
AnyFrame = Union[pd.DataFrame, KTSFrame]
|
8782
|
import pytz
from rest_auth.serializers import TokenSerializer
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import ValidationError
from rest_framework.fields import (
CharField,
CurrentUserDefault,
HiddenField,
UUIDField,
ChoiceField,
)
from rest_framework.serializers import ModelSerializer, Serializer
from rest_framework.validators import UniqueValidator
from django.contrib.auth.hashers import check_password
from open.users.models import User
class SimpleUserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
)
class UserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
"signed_up_from",
"date_joined",
"username",
"email",
"created",
"modified",
)
class UserTokenSerializer(TokenSerializer):
user = UserReadSerializer()
class Meta:
model = Token
fields = ["key", "user"]
# TODO - this view and serializer is on hold as you figure out registration (later)
class UserCreateSerializer(ModelSerializer):
username = CharField(validators=[UniqueValidator(queryset=User.objects.all())])
# need to make email optional ... prob should think through signup form a little
email = CharField(
validators=[UniqueValidator(queryset=User.objects.all())], required=False
)
password = CharField(write_only=True, min_length=8)
signed_up_from = CharField(
write_only=True, min_length=8, required=False, default="", trim_whitespace=True
)
timezone_string = ChoiceField(
choices=pytz.all_timezones, required=False, default="US/Eastern"
)
class Meta:
model = User
fields = ["username", "email", "password", "signed_up_from", "timezone_string"]
# TODO test - does this work with just username / no email, etc.
def create(self, validated_data):
username = validated_data.pop("username")
password = validated_data.pop("password")
is_betterself_user = False
if validated_data["signed_up_from"] == "betterself":
is_betterself_user = True
validated_data["is_betterself_user"] = is_betterself_user
user = User.objects.create(username=username, **validated_data)
user.set_password(password)
user.save()
return user
class UserDeleteSerializer(Serializer):
# most of this is actually redundant, i don't need to have a validation step, but i do this
# out of paranoia reasons that someone may delete their account by mistake
password = CharField()
user = HiddenField(default=CurrentUserDefault())
uuid = UUIDField()
def validate(self, data):
user = data["user"]
validated_password = check_password(data["password"], user.password)
if not validated_password:
raise ValidationError("Invalid Password Entered")
validated_uuid = str(user.uuid) == str(data["uuid"])
if not validated_uuid:
raise ValidationError("Invalid UUID", str(user.uuid))
validate_user = user.username != "<EMAIL>"
if not validate_user:
raise ValidationError(
f"This is a protected user and cannot be deleted. {user.username}"
)
return data
|
8784
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .discriminator import Discriminator
from .identity import Identity
class MultiScaleDiscriminator(nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[Discriminator() for _ in range(3)]
)
self.pooling = nn.ModuleList(
[Identity()] +
[nn.AvgPool1d(kernel_size=4, stride=2, padding=2) for _ in range(1, 3)]
)
def forward(self, x):
ret = list()
for pool, disc in zip(self.pooling, self.discriminators):
x = pool(x)
ret.append(disc(x))
return ret # [(feat, score), (feat, score), (feat, score)]
|
8791
|
from typing import Optional, Dict, Any, List, Union
from allennlp.common.checks import ConfigurationError
class MetricTracker:
"""
This class tracks a metric during training for the dual purposes of early stopping
and for knowing whether the current value is the best so far. It mimics the PyTorch
`state_dict` / `load_state_dict` interface, so that it can be checkpointed along with
your model and optimizer.
Some metrics improve by increasing; others by decreasing. You can provide a
`metric_name` that starts with "+" to indicate an increasing metric, or "-"
to indicate a decreasing metric.
# Parameters
metric_name : `Union[str, List[str]]`
Specifies the metric or metrics to track. Metric names have to start with
"+" for increasing metrics or "-" for decreasing ones. If you specify more
than one, it tracks the sum of the increasing metrics metrics minus the sum
of the decreasing metrics.
patience : `int`, optional (default = `None`)
If provided, then `should_stop_early()` returns True if we go this
many epochs without seeing a new best value.
"""
def __init__(
self,
metric_name: Union[str, List[str]],
patience: Optional[int] = None,
) -> None:
self._patience = patience
self._best_so_far: Optional[float] = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch: Optional[int] = None
self.best_epoch_metrics: Dict[str, float] = {}
if isinstance(metric_name, str):
metric_name = [metric_name]
self.tracked_metrics = []
for name in metric_name:
if name.startswith("+"):
self.tracked_metrics.append((1.0, name[1:]))
elif name.startswith("-"):
self.tracked_metrics.append((-1.0, name[1:]))
else:
raise ConfigurationError("metric_name must start with + or -")
def clear(self) -> None:
"""
Clears out the tracked metrics, but keeps the patience
"""
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
self.best_epoch_metrics.clear()
def state_dict(self) -> Dict[str, Any]:
"""
A `Trainer` can use this to serialize the state of the metric tracker.
"""
return {
"best_so_far": self._best_so_far,
"epochs_with_no_improvement": self._epochs_with_no_improvement,
"is_best_so_far": self._is_best_so_far,
"epoch_number": self._epoch_number,
"best_epoch": self.best_epoch,
"best_epoch_metrics": self.best_epoch_metrics,
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
A `Trainer` can use this to hydrate a metric tracker from a serialized state.
"""
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = state_dict["epochs_with_no_improvement"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._epoch_number = state_dict["epoch_number"]
self.best_epoch = state_dict["best_epoch"]
# Even though we don't promise backwards compatibility for the --recover flag,
# it's particularly easy and harmless to provide it here, so we do it.
self.best_epoch_metrics = state_dict.get("best_epoch_metrics", {})
def add_metrics(self, metrics: Dict[str, float]) -> None:
"""
Record a new value of the metric and update the various things that depend on it.
"""
combined_score = self.combined_score(metrics)
new_best = (self._best_so_far is None) or (combined_score > self._best_so_far)
if new_best:
self._best_so_far = combined_score
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch = self._epoch_number
else:
self._epochs_with_no_improvement += 1
self._is_best_so_far = False
self._epoch_number += 1
def is_best_so_far(self) -> bool:
"""
Returns true if the most recent value of the metric is the best so far.
"""
return self._is_best_so_far
def should_stop_early(self) -> bool:
"""
Returns true if improvement has stopped for long enough.
"""
if self._patience is None:
return False
else:
return self._epochs_with_no_improvement >= self._patience
def combined_score(self, metrics: Dict[str, float]) -> float:
try:
return sum(
factor * metrics[metric_name] for factor, metric_name in self.tracked_metrics
)
except KeyError as e:
raise ConfigurationError(
f"You configured the trainer to use the {e.args[0]} "
"metric for early stopping, but the model did not produce that metric."
)
|
8798
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
l1tGct = DQMEDAnalyzer('L1TGCT',
gctCentralJetsSource = cms.InputTag("gctDigis","cenJets"),
gctForwardJetsSource = cms.InputTag("gctDigis","forJets"),
gctTauJetsSource = cms.InputTag("gctDigis","tauJets"),
gctIsoTauJetsSource = cms.InputTag("gctDigis","fake"),
gctEnergySumsSource = cms.InputTag("gctDigis"),
gctIsoEmSource = cms.InputTag("gctDigis","isoEm"),
gctNonIsoEmSource = cms.InputTag("gctDigis","nonIsoEm"),
monitorDir = cms.untracked.string("L1T/L1TGCT"),
verbose = cms.untracked.bool(False),
stage1_layer2_ = cms.bool(False),
DQMStore = cms.untracked.bool(True),
disableROOToutput = cms.untracked.bool(True),
filterTriggerType = cms.int32(1)
)
|
8804
|
from setuptools import setup
setup(
name='modestpy',
version='0.1',
description='FMI-compliant model identification package',
url='https://github.com/sdu-cfei/modest-py',
keywords='fmi fmu optimization model identification estimation',
author='<NAME>, Center for Energy Informatics SDU',
author_email='<EMAIL>, <EMAIL>',
license='BSD',
platforms=['Windows', 'Linux'],
packages=[
'modestpy',
'modestpy.estim',
'modestpy.estim.ga_parallel',
'modestpy.estim.ga',
'modestpy.estim.ps',
'modestpy.estim.scipy',
'modestpy.fmi',
'modestpy.utilities',
'modestpy.test'],
include_package_data=True,
install_requires=[
'fmpy[complete]',
'scipy',
'pandas',
'matplotlib',
'numpy',
'pyDOE',
'modestga'
],
classifiers=[
'Programming Language :: Python :: 3'
]
)
|
8810
|
import copy
import inspect
import json
import logging
import pytest
import re
import os
import shutil
import subprocess
import time
from datetime import datetime, timedelta
from configparser import ConfigParser, ExtendedInterpolation
from typing import Dict, List, Optional
from pyhttpd.certs import CertificateSpec
from .md_cert_util import MDCertUtil
from pyhttpd.env import HttpdTestSetup, HttpdTestEnv
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class MDTestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["proxy_connect", "md"])
if "pebble" == self.env.acme_server:
self._make_pebble_conf()
def _make_pebble_conf(self):
our_dir = os.path.dirname(inspect.getfile(MDTestSetup))
conf_src_dir = os.path.join(our_dir, 'pebble')
conf_dest_dir = os.path.join(self.env.gen_dir, 'pebble')
if not os.path.exists(conf_dest_dir):
os.makedirs(conf_dest_dir)
for name in os.listdir(conf_src_dir):
src_path = os.path.join(conf_src_dir, name)
m = re.match(r'(.+).template', name)
if m:
self._make_template(src_path, os.path.join(conf_dest_dir, m.group(1)))
elif os.path.isfile(src_path):
shutil.copy(src_path, os.path.join(conf_dest_dir, name))
class MDTestEnv(HttpdTestEnv):
MD_S_UNKNOWN = 0
MD_S_INCOMPLETE = 1
MD_S_COMPLETE = 2
MD_S_EXPIRED = 3
MD_S_ERROR = 4
EMPTY_JOUT = {'status': 0, 'output': []}
DOMAIN_SUFFIX = "%d.org" % time.time()
LOG_FMT_TIGHT = '%(levelname)s: %(message)s'
@classmethod
def get_acme_server(cls):
return os.environ['ACME'] if 'ACME' in os.environ else "pebble"
@classmethod
def has_acme_server(cls):
return cls.get_acme_server() != 'none'
@classmethod
def has_acme_eab(cls):
return cls.get_acme_server() == 'pebble'
@classmethod
def is_pebble(cls) -> bool:
return cls.get_acme_server() == 'pebble'
@classmethod
def lacks_ocsp(cls):
return cls.is_pebble()
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(MDTestEnv)),
interesting_modules=["md"])
self._acme_server = self.get_acme_server()
self._acme_tos = "accepted"
self._acme_ca_pemfile = os.path.join(self.gen_dir, "apache/acme-ca.pem")
if "pebble" == self._acme_server:
self._acme_url = "https://localhost:14000/dir"
self._acme_eab_url = "https://localhost:14001/dir"
elif "boulder" == self._acme_server:
self._acme_url = "http://localhost:4001/directory"
self._acme_eab_url = None
else:
raise Exception(f"unknown ACME server type: {self._acme_server}")
self._acme_server_down = False
self._acme_server_ok = False
self._a2md_bin = os.path.join(self.bin_dir, 'a2md')
self._default_domain = f"test1.{self.http_tld}"
self._store_dir = "./md"
self.set_store_dir_default()
self.add_cert_specs([
CertificateSpec(domains=[f"expired.{self._http_tld}"],
valid_from=timedelta(days=-100),
valid_to=timedelta(days=-10)),
CertificateSpec(domains=["localhost"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
#"AH10045", # mod_md complains that there is no vhost for an MDomain
"AH10105", # mod_md does not find a vhost with SSL enabled for an MDomain
"AH10085" # mod_ssl complains about fallback certificates
])
if self.lacks_ocsp():
self.httpd_error_log.set_ignored_patterns([
re.compile(r'.*certificate with serial \S+ has no OCSP responder URL.*'),
])
if setup_dirs:
self._setup = MDTestSetup(env=self)
self._setup.make()
self.issue_certs()
self.clear_store()
def set_store_dir_default(self):
dirpath = "md"
if self.httpd_is_at_least("2.5.0"):
dirpath = os.path.join("state", dirpath)
self.set_store_dir(dirpath)
def set_store_dir(self, dirpath):
self._store_dir = os.path.join(self.server_dir, dirpath)
if self.acme_url:
self.a2md_stdargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile, "-j"])
self.a2md_rawargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile])
def get_apxs_var(self, name: str) -> str:
p = subprocess.run([self._apxs, "-q", name], capture_output=True, text=True)
if p.returncode != 0:
return ""
return p.stdout.strip()
@property
def acme_server(self):
return self._acme_server
@property
def acme_url(self):
return self._acme_url
@property
def acme_tos(self):
return self._acme_tos
@property
def a2md_bin(self):
return self._a2md_bin
@property
def acme_ca_pemfile(self):
return self._acme_ca_pemfile
@property
def store_dir(self):
return self._store_dir
def get_request_domain(self, request):
return "%s-%s" % (re.sub(r'[_]', '-', request.node.originalname), MDTestEnv.DOMAIN_SUFFIX)
def get_method_domain(self, method):
return "%s-%s" % (re.sub(r'[_]', '-', method.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_module_domain(self, module):
return "%s-%s" % (re.sub(r'[_]', '-', module.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_class_domain(self, c):
return "%s-%s" % (re.sub(r'[_]', '-', c.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
# --------- cmd execution ---------
_a2md_args = []
_a2md_args_raw = []
def a2md_stdargs(self, args):
self._a2md_args = [] + args
def a2md_rawargs(self, args):
self._a2md_args_raw = [] + args
def a2md(self, args, raw=False) -> ExecResult:
preargs = self._a2md_args
if raw:
preargs = self._a2md_args_raw
log.debug("running: {0} {1}".format(preargs, args))
return self.run(preargs + args)
def check_acme(self):
if self._acme_server_ok:
return True
if self._acme_server_down:
pytest.skip(msg="ACME server not running")
return False
if self.is_live(self.acme_url, timeout=timedelta(seconds=0.5)):
self._acme_server_ok = True
return True
else:
self._acme_server_down = True
pytest.fail(msg="ACME server not running", pytrace=False)
return False
def get_ca_pem_file(self, hostname: str) -> Optional[str]:
pem_file = super().get_ca_pem_file(hostname)
if pem_file is None:
pem_file = self.acme_ca_pemfile
return pem_file
# --------- access local store ---------
def purge_store(self):
log.debug("purge store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if os.path.exists(self._store_dir):
shutil.rmtree(self._store_dir, ignore_errors=False)
os.makedirs(self._store_dir)
def clear_store(self):
log.debug("clear store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
for dirpath in ["challenges", "tmp", "archive", "domains", "accounts", "staging", "ocsp"]:
shutil.rmtree(os.path.join(self._store_dir, dirpath), ignore_errors=True)
def clear_ocsp_store(self):
assert len(self._store_dir) > 1
dirpath = os.path.join(self._store_dir, "ocsp")
log.debug("clear ocsp store dir: %s" % dir)
if os.path.exists(dirpath):
shutil.rmtree(dirpath, ignore_errors=True)
def authz_save(self, name, content):
dirpath = os.path.join(self._store_dir, 'staging', name)
os.makedirs(dirpath)
open(os.path.join(dirpath, 'authz.json'), "w").write(content)
def path_store_json(self):
return os.path.join(self._store_dir, 'md_store.json')
def path_account(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.json')
def path_account_key(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.pem')
def store_domains(self):
return os.path.join(self._store_dir, 'domains')
def store_archives(self):
return os.path.join(self._store_dir, 'archive')
def store_stagings(self):
return os.path.join(self._store_dir, 'staging')
def store_challenges(self):
return os.path.join(self._store_dir, 'challenges')
def store_domain_file(self, domain, filename):
return os.path.join(self.store_domains(), domain, filename)
def store_archived_file(self, domain, version, filename):
return os.path.join(self.store_archives(), "%s.%d" % (domain, version), filename)
def store_staged_file(self, domain, filename):
return os.path.join(self.store_stagings(), domain, filename)
def path_fallback_cert(self, domain):
return os.path.join(self._store_dir, 'domains', domain, 'fallback-pubcert.pem')
def path_job(self, domain):
return os.path.join(self._store_dir, 'staging', domain, 'job.json')
def replace_store(self, src):
shutil.rmtree(self._store_dir, ignore_errors=False)
shutil.copytree(src, self._store_dir)
def list_accounts(self):
return os.listdir(os.path.join(self._store_dir, 'accounts'))
def check_md(self, domain, md=None, state=-1, ca=None, protocol=None, agreement=None, contacts=None):
domains = None
if isinstance(domain, list):
domains = domain
domain = domains[0]
if md:
domain = md
path = self.store_domain_file(domain, 'md.json')
with open(path) as f:
md = json.load(f)
assert md
if domains:
assert md['domains'] == domains
if state >= 0:
assert md['state'] == state
if ca:
assert md['ca']['url'] == ca
if protocol:
assert md['ca']['proto'] == protocol
if agreement:
assert md['ca']['agreement'] == agreement
if contacts:
assert md['contacts'] == contacts
def pkey_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "privkey.{0}.pem".format(pkeyspec)
return 'privkey.pem'
def cert_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "pubcert.{0}.pem".format(pkeyspec)
return 'pubcert.pem'
def check_md_complete(self, domain, pkey=None):
md = self.get_md_status(domain)
assert md
assert 'state' in md, "md is unexpected: {0}".format(md)
assert md['state'] is MDTestEnv.MD_S_COMPLETE, "unexpected state: {0}".format(md['state'])
assert os.path.isfile(self.store_domain_file(domain, self.pkey_fname(pkey)))
assert os.path.isfile(self.store_domain_file(domain, self.cert_fname(pkey)))
def check_md_credentials(self, domain):
if isinstance(domain, list):
domains = domain
domain = domains[0]
else:
domains = [domain]
# check private key, validate certificate, etc
MDCertUtil.validate_privkey(self.store_domain_file(domain, 'privkey.pem'))
cert = MDCertUtil(self.store_domain_file(domain, 'pubcert.pem'))
cert.validate_cert_matches_priv_key(self.store_domain_file(domain, 'privkey.pem'))
# check SANs and CN
assert cert.get_cn() == domain
# compare lists twice in opposite directions: SAN may not respect ordering
san_list = list(cert.get_san_list())
assert len(san_list) == len(domains)
assert set(san_list).issubset(domains)
assert set(domains).issubset(san_list)
# check valid dates interval
not_before = cert.get_not_before()
not_after = cert.get_not_after()
assert not_before < datetime.now(not_before.tzinfo)
assert not_after > datetime.now(not_after.tzinfo)
# --------- check utilities ---------
def check_json_contains(self, actual, expected):
# write all expected key:value bindings to a copy of the actual data ...
# ... assert it stays unchanged
test_json = copy.deepcopy(actual)
test_json.update(expected)
assert actual == test_json
def check_file_access(self, path, exp_mask):
actual_mask = os.lstat(path).st_mode & 0o777
assert oct(actual_mask) == oct(exp_mask)
def check_dir_empty(self, path):
assert os.listdir(path) == []
def get_http_status(self, domain, path, use_https=True):
r = self.get_meta(domain, path, use_https, insecure=True)
return r.response['status']
def get_cert(self, domain, tls=None, ciphers=None):
return MDCertUtil.load_server_cert(self._httpd_addr, self.https_port,
domain, tls=tls, ciphers=ciphers)
def get_server_cert(self, domain, proto=None, ciphers=None):
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if ciphers is not None:
args.extend(["-cipher", ciphers])
r = self.run(args)
# noinspection PyBroadException
try:
return MDCertUtil.parse_pem_cert(r.stdout)
except:
return None
def verify_cert_key_lenghts(self, domain, pkeys):
for p in pkeys:
cert = self.get_server_cert(domain, proto="tls1_2", ciphers=p['ciphers'])
if 0 == p['keylen']:
assert cert is None
else:
assert cert, "no cert returned for cipher: {0}".format(p['ciphers'])
assert cert.get_key_length() == p['keylen'], "key length, expected {0}, got {1}".format(
p['keylen'], cert.get_key_length()
)
def get_meta(self, domain, path, use_https=True, insecure=False):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}", insecure=insecure)
assert r.exit_code == 0
assert r.response
assert r.response['header']
return r
def get_content(self, domain, path, use_https=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}")
assert r.exit_code == 0
return r.stdout
def get_json_content(self, domain, path, use_https=True, insecure=False,
debug_log=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
url = f"{schema}://{domain}:{port}{path}"
r = self.curl_get(url, insecure=insecure, debug_log=debug_log)
if r.exit_code != 0:
log.error(f"curl get on {url} returned {r.exit_code}"
f"\nstdout: {r.stdout}"
f"\nstderr: {r.stderr}")
assert r.exit_code == 0, r.stderr
return r.json
def get_certificate_status(self, domain) -> Dict:
return self.get_json_content(domain, "/.httpd/certificate-status", insecure=True)
def get_md_status(self, domain, via_domain=None, use_https=True, debug_log=False) -> Dict:
if via_domain is None:
via_domain = self._default_domain
return self.get_json_content(via_domain, f"/md-status/{domain}",
use_https=use_https, debug_log=debug_log)
def get_server_status(self, query="/", via_domain=None, use_https=True):
if via_domain is None:
via_domain = self._default_domain
return self.get_content(via_domain, "/server-status%s" % query, use_https=use_https)
def await_completion(self, names, must_renew=False, restart=True, timeout=60,
via_domain=None, use_https=True):
try_until = time.time() + timeout
renewals = {}
names = names.copy()
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
mds = self.get_md_status(name, via_domain=via_domain, use_https=use_https)
if mds is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in mds:
renewal = mds['renewal']
renewals[name] = True
if 'finished' in renewal and renewal['finished'] is True:
if (not must_renew) or (name in renewals):
log.debug(f"domain cert was renewed: {name}")
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
if restart:
time.sleep(0.1)
return self.apache_restart() == 0
return True
def is_renewing(self, name):
stat = self.get_certificate_status(name)
return 'renewal' in stat
def await_renewal(self, names, timeout=60):
try_until = time.time() + timeout
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
md = self.get_md_status(name)
if md is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in md:
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
return True
def await_error(self, domain, timeout=60, via_domain=None, use_https=True, errors=1):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
md = self.get_md_status(domain, via_domain=via_domain, use_https=use_https)
if md:
if 'state' in md and md['state'] == MDTestEnv.MD_S_ERROR:
return md
if 'renewal' in md and 'errors' in md['renewal'] \
and md['renewal']['errors'] >= errors:
return md
time.sleep(0.1)
return None
def await_file(self, fpath, timeout=60):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
if os.path.isfile(fpath):
return True
time.sleep(0.1)
def check_file_permissions(self, domain):
md = self.a2md(["list", domain]).json['output'][0]
assert md
acct = md['ca']['account']
assert acct
self.check_file_access(self.path_store_json(), 0o600)
# domains
self.check_file_access(self.store_domains(), 0o700)
self.check_file_access(os.path.join(self.store_domains(), domain), 0o700)
self.check_file_access(self.store_domain_file(domain, 'privkey.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'pubcert.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'md.json'), 0o600)
# archive
self.check_file_access(self.store_archived_file(domain, 1, 'md.json'), 0o600)
# accounts
self.check_file_access(os.path.join(self._store_dir, 'accounts'), 0o755)
self.check_file_access(os.path.join(self._store_dir, 'accounts', acct), 0o755)
self.check_file_access(self.path_account(acct), 0o644)
self.check_file_access(self.path_account_key(acct), 0o644)
# staging
self.check_file_access(self.store_stagings(), 0o755)
def get_ocsp_status(self, domain, proto=None, cipher=None, ca_file=None):
stat = {}
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", ca_file if ca_file else self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if cipher is not None:
args.extend(["-cipher", cipher])
r = self.run(args, debug_log=False)
ocsp_regex = re.compile(r'OCSP response: +([^=\n]+)\n')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
if 'ocsp' not in stat:
ocsp_regex = re.compile(r'OCSP Response Status:\s*(.+)')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
verify_regex = re.compile(r'Verify return code:\s*(.+)')
matches = verify_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['verify'] = m.group(1)
return stat
def await_ocsp_status(self, domain, timeout=10, ca_file=None):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
break
stat = self.get_ocsp_status(domain, ca_file=ca_file)
if 'ocsp' in stat and stat['ocsp'] != "no response sent":
return stat
time.sleep(0.1)
raise TimeoutError(f"ocsp respopnse not available: {domain}")
def create_self_signed_cert(self, name_list, valid_days, serial=1000, path=None):
dirpath = path
if not path:
dirpath = os.path.join(self.store_domains(), name_list[0])
return MDCertUtil.create_self_signed_cert(dirpath, name_list, valid_days, serial)
|
8869
|
from mock import patch
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.core import exceptions
from django_dynamic_fixture import G
from django_webtest import WebTest
from icekit.models import Layout
from icekit.page_types.layout_page.models import LayoutPage
from icekit.utils import fluent_contents
from . import models
User = get_user_model()
class MapItemTestCase(WebTest):
def setUp(self):
self.embed_code = '''
<iframe
src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670"
width="600"
height="450"
frameborder="0"
style="border:0"
allowfullscreen
></iframe>
'''
self.cleaned_embed_code = '<iframe allowfullscreen="" frameborder="0" src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" style="border: 0;"></iframe>'
self.layout_1 = G(
Layout,
template_name='icekit/layouts/default.html',
)
self.layout_1.content_types.add(
ContentType.objects.get_for_model(LayoutPage))
self.layout_1.save()
self.staff_1 = User.objects.create(
email='<EMAIL>',
is_staff=True,
is_active=True,
is_superuser=True,
)
self.page_1 = LayoutPage()
self.page_1.title = 'Test Page'
self.page_1.slug = 'test-page'
self.page_1.parent_site = Site.objects.first()
self.page_1.layout = self.layout_1
self.page_1.author = self.staff_1
self.page_1.status = LayoutPage.PUBLISHED
self.page_1.save()
self.map_1 = fluent_contents.create_content_instance(
models.MapItem,
self.page_1,
_embed_code=self.embed_code,
)
self.map_item = models.MapItem(
parent_type=ContentType.objects.get_for_model(type(self.page_1)),
parent_id=self.page_1.id,
placeholder=self.page_1.get_placeholder_by_slot('main')[0],
_embed_code=self.embed_code,
)
self.page_1.publish()
def test_map_renders(self):
response = self.app.get(self.page_1.get_published().get_absolute_url())
response.mustcontain(self.cleaned_embed_code)
def test_cleaned_embed_code(self):
self.assertEqual(self.map_1._cleaned_embed_code.strip(), self.cleaned_embed_code)
|
8881
|
import GeneralStats as gs
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosistest
import pandas as pd
if __name__ == "__main__":
gen=gs.GeneralStats()
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
print("data = ", data)
print("data1 = ", data1)
res=gen.average(data,rowvar=True)
res1=gen.average(data1,rowvar=True)
print("data平均值 = ",res)
print("data1平均值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.median(data,rowvar=True)
res1=gen.median(data1,rowvar=True)
print("data中位值 = ",res)
print("data1中位值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.mode(data,rowvar=True)
res1=gen.mode(data1,rowvar=True)
print("data众数值 = ",res)
print("data1众数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数
res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数
print("data 0.5分位数值 = ",res)
print("data1 0.5分位数值 = ",res1)
res=gen.quantile(data,0.25,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower')
print("data 0.25分位数值s = ",res)
print("data1 0.25分位数值 = ",res1)
res=gen.quantile(data,0.75,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower')
print("data 0.75分位数值 = ",res)
print("data1 0.75分位数值 = ",res1)
res=gen.quantile(data,1.0,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower')
print("data 1.0分位数值 = ",res)
print("data1 1.0分位数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.range(data,rowvar=True)
res1=gen.range(data1,rowvar=True)
print("data极差 = ",res)
print("data1极差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.variance(data,rowvar=True)
res1=gen.variance(data1,rowvar=True)
print("data方差 = ",res)
print("data1方差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.standard_dev(data,rowvar=True)
res1=gen.standard_dev(data1,rowvar=True)
print("data标准差 = ",res)
print("data1标准差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.skewness(data,rowvar=True)
res1=gen.skewness(data1,rowvar=True)
print("data偏度 = ",res)
print("data1偏度 = ",res1)
res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])])
print("使用scipy skew方法验证的data偏度 = ",res)
res1=np.array(skew(data1))
print("使用scipy skew方法验证的data1偏度 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([53, 61, 49, 66, 78, 47])
res=gen.kurtosis(data,rowvar=True)
res1=gen.kurtosis(data1,rowvar=True)
print("data峰度 = ",res)
print("data1峰度 = ",res1)
data_0=pd.Series(data[0])
data_1=pd.Series(data[1])
data_2=pd.Series(data[2])
data_3=pd.Series(data[3])
print("使用pandas kurt方法验证的data峰度 = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()])
data1=pd.Series(data1)
print("使用pandas kurt方法验证的data1峰度 = ",data1.kurt())
|
8884
|
import os
import re
from typing import Tuple
from pfio._typing import Union
from pfio.container import Container
from pfio.io import IO, create_fs_handler
class FileSystemDriverList(object):
def __init__(self):
# TODO(tianqi): dynamically create this list
# as well as the patterns upon loading the pfio module.
self.scheme_list = ["hdfs", "posix"]
self.posix_pattern = re.compile(r"file:\/\/(?P<path>.+)")
self.hdfs_pattern = re.compile(r"(?P<path>hdfs:\/\/.+)")
self.pattern_list = {"hdfs": self.hdfs_pattern,
"posix": self.posix_pattern, }
def _determine_fs_type(self, path: str) -> Tuple[str, str, bool]:
if None is not path:
for fs_type, pattern in self.pattern_list.items():
ret = pattern.match(path)
if ret:
return (fs_type, ret.groupdict()["path"], True)
return ("posix", path, False)
def format_path(self, fs: IO, path: str) -> Tuple[str, bool]:
fs_type = fs.type
if fs_type in self.pattern_list.keys():
pattern = self.pattern_list[fs_type]
ret = pattern.match(path)
if ret:
return (ret.groupdict()["path"], True)
else:
return (path, False)
else:
return (path, False)
def get_handler_from_path(self, path: str) -> Tuple[IO, str, bool]:
(fs_type, actual_path, is_URI) = self._determine_fs_type(path)
handler = create_fs_handler(fs_type)
return (handler, actual_path, is_URI)
def get_handler_for_root(self,
uri_or_handler_name: str) -> Tuple[IO, str, bool]:
if uri_or_handler_name in self.pattern_list.keys():
return (create_fs_handler(uri_or_handler_name), "", False)
else:
(new_handler, actual_path, is_URI) = self.get_handler_from_path(
uri_or_handler_name)
new_handler.root = actual_path
return (new_handler, actual_path, is_URI)
def is_supported_scheme(self, scheme: str) -> bool:
return scheme in self.scheme_list
class DefaultContext(object):
def __init__(self):
self._fs_handler_list = FileSystemDriverList()
self._root = ""
self._default_context = \
self._fs_handler_list.get_handler_for_root("posix")[0]
def set_root(self, uri_or_handler: Union[str, IO]) -> None:
# TODO(check) if root is directory
if isinstance(uri_or_handler, IO):
handler = uri_or_handler
self._root = ""
else:
(handler, self._root, is_URI) = \
self.get_handler_by_name(uri_or_handler)
assert handler is not None
if self._root:
if not handler.isdir(self._root):
raise RuntimeError("the URI does not point to a directory")
self._default_context = handler
def get_handler(self, path: str = "") -> Tuple[IO, str]:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
return (self._default_context, actual_path)
else:
return (handler, formatted_path)
def open_as_container(self, path: str) -> Container:
(handler, formatted_path,
is_URI) = self._fs_handler_list.get_handler_from_path(path)
if not is_URI:
actual_path = os.path.join(self._root, formatted_path)
handler = self._default_context
else:
actual_path = formatted_path
self._root = ""
return handler.open_as_container(actual_path)
def get_handler_by_name(self, path: str) -> Tuple[IO, str, bool]:
return self._fs_handler_list.get_handler_for_root(path)
def get_root_dir(self) -> str:
return self._root
def is_supported_scheme(self, scheme: str) -> bool:
return self._fs_handler_list.is_supported_scheme(scheme)
|
8890
|
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.client import Client
from model_mommy import mommy
from devices.models import Device
from users.models import Lageruser
class HistoryTests(TestCase):
def setUp(self):
self.client = Client()
self.admin = Lageruser.objects.create_superuser('test', '<EMAIL>', "test")
self.client.login(username="test", password="<PASSWORD>")
def test_global_view(self):
response = self.client.get('/history/global/')
self.assertEqual(response.status_code, 200)
def test_list_view(self):
content_type = ContentType.objects.get(model='device')
device = mommy.make(Device)
response = self.client.get('/history/%i/%i/' % (content_type.pk, device.pk))
self.assertEqual(response.status_code, 200)
def test_detail_view(self):
device = mommy.make(Device)
response = self.client.post('/devices/%i/edit/' % device.pk, data={
'name': 'test',
'creator': self.admin.pk,
})
self.assertEqual(response.status_code, 302)
response = self.client.get('/history/version/1/')
self.assertEqual(response.status_code, 200)
|
8901
|
from backend.common.models.mytba import MyTBAModel
class Favorite(MyTBAModel):
"""
In order to make strongly consistent DB requests, instances of this class
should be created with a parent that is the associated Account key.
"""
def __init__(self, *args, **kwargs):
super(Favorite, self).__init__(*args, **kwargs)
|
8935
|
from ad9833 import AD9833
# DUMMY classes for testing without board
class SBI(object):
def __init__(self):
pass
def send(self, data):
print(data)
class Pin(object):
def __init__(self):
pass
def low(self):
print(" 0")
def high(self):
print(" 1")
# Code
SBI1 = SBI()
PIN3 = Pin()
wave = AD9833(SBI1, PIN3)
wave.set_freq(14500)
wave.set_type(2)
wave.send()
print(wave.shape_type)
|
8957
|
from distdeepq import models # noqa
from distdeepq.build_graph import build_act, build_train # noqa
from distdeepq.simple import learn, load, make_session # noqa
from distdeepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
from distdeepq.static import *
from distdeepq.plots import PlotMachine
|
8973
|
from argparse import ArgumentParser
import os
import numpy as np
from joblib import dump
from mldftdat.workflow_utils import SAVE_ROOT
from mldftdat.models.gp import *
from mldftdat.data import load_descriptors, filter_descriptors
import yaml
def parse_settings(args):
fname = args.datasets_list[0]
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
with open(os.path.join(fname, 'settings.yaml'), 'r') as f:
d = yaml.load(f, Loader=yaml.Loader)
args.gg_a0 = d.get('a0')
args.gg_amin = d.get('amin')
args.gg_facmul = d.get('fac_mul')
def parse_dataset(args, i, val=False):
if val:
fname = args.validation_set[2*i]
n = int(args.validation_set[2*i+1])
else:
fname = args.datasets_list[2*i]
n = int(args.datasets_list[2*i+1])
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
X, y, rho_data = load_descriptors(fname)
if val:
# offset in case repeat datasets are used
X, y, rho_data = X[n//2+1:,:], y[n//2+1:], rho_data[:,n//2+1:]
X, y, rho, rho_data = filter_descriptors(X, y, rho_data,
tol=args.density_cutoff)
print(X.shape, n)
if args.randomize:
inds = np.arange(X.shape[0])
np.random.shuffle(inds)
X = X[inds,:]
y = y[inds]
rho = rho[inds]
rho_data = rho_data[:,inds]
return X[::n,:], y[::n], rho[::n], rho_data[:,::n]
def parse_list(lststr, T=int):
return [T(substr) for substr in lststr.split(',')]
def main():
parser = ArgumentParser(description='Trains a GP exchange model')
parser.add_argument('save_file', type=str)
parser.add_argument('feature_file', type=str,
help='serialized FeatureList object in yaml format')
parser.add_argument('datasets_list', nargs='+',
help='pairs of dataset names and inverse sampling densities')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('-r', '--randomize', action='store_true')
parser.add_argument('-c', '--density-cutoff', type=float, default=1e-4)
#parser.add_argument('-m', '--model-class', type=str, default=None)
#parser.add_argument('-k', '--kernel', help='kernel initialization strategy', type=str, default=None)
parser.add_argument('-s', '--seed', help='random seed', default=0, type=int)
parser.add_argument('-vs', '--validation-set', nargs='+')
parser.add_argument('-d', '--delete-k', action='store_true',
help='Delete L (LL^T=K the kernel matrix) to save disk space. Need to refit when reloading to calculate covariance.')
parser.add_argument('--heg', action='store_true', help='HEG exact constraint')
parser.add_argument('--tail', action='store_true', help='atomic tail exact constraint')
parser.add_argument('-o', '--desc-order', default=None,
help='comma-separated list of descriptor order with no spaces. must start with 0,1.')
parser.add_argument('-l', '--length-scale', default=None,
help='comma-separated list initial length-scale guesses')
parser.add_argument('--length-scale-mul', type=float, default=1.0,
help='Used for automatic length-scale initial guess')
parser.add_argument('-a', '--agpr', action='store_true',
help='Whether to use Additive RBF. If False, use RBF')
parser.add_argument('-as', '--agpr-scale', default=None)
parser.add_argument('-ao', '--agpr-order', default=2, type=int)
parser.add_argument('-an', '--agpr-nsingle', default=1, type=int)
parser.add_argument('-x', '--xed-y-code', default='CHACHIYO', type=str)
parser.add_argument('-on', '--optimize-noise', action='store_true',
help='Whether to optimzie exponent of density noise.')
parser.add_argument('-v', '--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
parse_settings(args)
np.random.seed(args.seed)
feature_list = FeatureList.load(args.feature_file)
if args.length_scale is not None:
args.length_scale = parse_list(args.length_scale, T=float)
if args.agpr_scale is not None:
args.agpr_scale = parse_list(args.agpr_scale, T=float)
if args.desc_order is not None:
args.desc_order = parse_list(args.desc_order)
assert len(args.datasets_list) % 2 == 0, 'Need pairs of entries for datasets list.'
assert len(args.datasets_list) != 0, 'Need training data'
nd = len(args.datasets_list) // 2
if args.validation_set is None:
nv = 0
else:
assert len(args.validation_set) % 2 == 0, 'Need pairs of entries for datasets list.'
nv = len(args.validation_set) // 2
X, y, rho, rho_data = parse_dataset(args, 0)
for i in range(1, nd):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i)
X = np.append(X, Xn, axis=0)
y = np.append(y, yn, axis=0)
rho = np.append(rho, rhon, axis=0)
rho_data = np.append(rho_data, rho_datan, axis=1)
if nv != 0:
Xv, yv, rhov, rho_datav = parse_dataset(args, 0, val=True)
for i in range(1, nv):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i, val=True)
Xv = np.append(Xv, Xn, axis=0)
yv = np.append(yv, yn, axis=0)
rhov = np.append(rhov, rhon, axis=0)
rho_datav = np.append(rho_datav, rho_datan, axis=1)
gpcls = DFTGPR
gpr = gpcls.from_settings(X, feature_list, args)
gpr.fit(X, y, add_heg=args.heg, add_tail=args.tail)
#if args.heg:
# gpr.add_heg_limit()
print('FINAL KERNEL', gpr.gp.kernel_)
if nv != 0:
pred = gpr.xed_to_y(gpr.predict(Xv), Xv)
abserr = np.abs(pred - gpr.xed_to_y(yv, Xv))
print('MAE VAL SET', np.mean(abserr))
# Always attach the arguments to the object to keep track of settings.
gpr.args = args
if args.delete_k:
gpr.L_ = None
dump(gpr, args.save_file)
if __name__ == '__main__':
main()
|
8980
|
from __future__ import print_function # Python 2/3 compatibility
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
#initializing the graph object
graph = Graph()
#creating connection with the remote
remoteConn = DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g')
g = graph.traversal().withRemote(DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g'))
print('Connection created.')
#clearing out all the vertices to start fresh
g.V().drop().iterate()
print('Deleting everything and starting clean.')
#Adding some vertices (nodes)
gerald = g.addV('person').property('age','81').property('first_name','Gerald').property('stays_in','Portland').next()
edith = g.addV('person').property('age','78').property('first_name','Edith').property('stays_in','Portland').next()
peter = g.addV('person').property('age','52').property('first_name','Shane').property('stays_in','Seattle').next()
mary = g.addV('person').property('age','50').property('first_name','Mary').property('stays_in','Seattle').next()
betty = g.addV('person').property('age','19').property('first_name','Betty').property('stays_in','Chicago').next()
print('Added some vertices (nodes).')
#Adding relationships (edges)
edge = g.V().has('first_name', 'Gerald').addE('husband_of').to(g.V().has('first_name', 'Edith')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Edith').addE('wife_of').to(g.V().has('first_name', 'Gerald')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Gerald')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Gerald').addE('father_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Edith')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Edith').addE('mother_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('husband_of').to(g.V().has('first_name', 'Mary')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Mary').addE('wife_of').to(g.V().has('first_name', 'Shane')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Shane').addE('father_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Shane')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Mary').addE('mother_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Mary')).property('known_since','1991').next()
#print out all the node's first names
print('\n Printing first name from all nodes:')
print(g.V().first_name.toList())
#print out all the properties of person whose's first name is Shane
print('\n Printing all properties of person whose first name is Shane:')
print(g.V().has('person','first_name','Shane').valueMap().next())
#traversing the graph starting with Betty to then Shane to then Edith
print('\n Finding Betty and then looking up her parents:')
print(g.V().has('first_name', 'Betty').out('daughter_of').out('son_of').valueMap().toList())
#Print out all the nodes
print('\n Printing out all the nodes:')
people = g.V().valueMap().toList()
print(people)
#Print out all the connections (edges)
print('\n Print out all the connections (edges):')
connections = g.E().valueMap().toList()
print(connections)
#Closing the connection
remoteConn.close()
print('Connection closed!')
|
9012
|
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
p1, p2 = nums[0], nums[nums[0]]
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[nums[p2]]
p2 = 0
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[p2]
return nums[p1]
|
9015
|
import json
from threading import Semaphore
import ee
from flask import request
from google.auth import crypt
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
service_account_credentials = None
import logging
export_semaphore = Semaphore(5)
get_info_semaphore = Semaphore(2)
def init_service_account_credentials(args):
global service_account_credentials
with open(args['gee_key_path'], 'r') as file_:
key_data = file_.read()
signer = crypt.RSASigner.from_string(key_data)
service_account_credentials = service_account.Credentials(
signer=signer,
service_account_email=args['gee_email'],
token_uri=ee.oauth.TOKEN_URI,
scopes=ee.oauth.SCOPES + ['https://www.googleapis.com/auth/drive']
)
def init_ee():
credentials = service_account_credentials
if 'sepal-user' in request.headers:
user = json.loads(request.headers['sepal-user'])
googleTokens = user.get('googleTokens', None)
if googleTokens:
credentials = Credentials(googleTokens['accessToken'])
ee.InitializeThread(credentials)
def to_asset_id(asset_path):
asset_roots = ee.data.getAssetRoots()
if not asset_roots:
raise Exception('User has no GEE asset roots')
return asset_roots[0]['id'] + '/' + asset_path
def delete_asset_collection(asset_id):
logging.info('Recursively deleting ' + asset_id)
if ee.data.getInfo(asset_id):
images = ee.data.getList({
'id': asset_id,
'fields': 'id'
})
for image in images:
ee.data.deleteAsset(image['id'])
logging.info('Deleted ' + image['id'])
ee.data.deleteAsset(asset_id)
logging.info('Deleted ' + asset_id)
def create_asset_image_collection(asset_id):
delete_asset_collection(asset_id)
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_IMAGE_COLL,
mk_parents=True
)
def create_asset_folder(asset_id):
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_FOLDER,
mk_parents=True
)
def get_info(ee_object):
try:
get_info_semaphore.acquire()
return ee_object.getInfo()
finally:
get_info_semaphore.release()
|
9044
|
from __future__ import absolute_import
import torch
from torch.nn import functional
class FPN(torch.nn.Module):
def __init__(self, out_channels):
super(FPN, self).__init__()
self.out_channels = out_channels
self.P5 = torch.nn.MaxPool2d(kernel_size=1, stride=2, padding=0)
self.P4_conv1 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P4_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.P3_conv1 = torch.nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P3_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.P2_conv1 = torch.nn.Conv2d(256, self.out_channels, kernel_size=1, stride=1, padding=0)
self.P2_conv2 = torch.nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
normal_init(self.P4_conv1, 0, 0.01)
normal_init(self.P4_conv2, 0, 0.01)
normal_init(self.P3_conv1, 0, 0.01)
normal_init(self.P3_conv2, 0, 0.01)
normal_init(self.P2_conv1, 0, 0.01)
normal_init(self.P2_conv2, 0, 0.01)
def forward(self, C2, C3, C4):
p4_out = self.P4_conv1(C4)
p5_out = self.P5(p4_out)
p3_out = self._upsample_add(p4_out, self.P3_conv1(C3))
p2_out = self._upsample_add(p3_out, self.P2_conv1(C2))
p4_out = self.P4_conv2(p4_out)
p3_out = self.P3_conv2(p3_out)
p2_out = self.P2_conv2(p2_out)
return p2_out, p3_out, p4_out, p5_out
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_,_,H,W = y.size()
return functional.interpolate(x, size=(H,W), mode='bilinear') + y
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
|
9046
|
from distutils.core import setup
setup(
name="arweave-python-client",
packages = ['arweave'], # this must be the same as the name above
version="1.0.15.dev0",
description="Client interface for sending transactions on the Arweave permaweb",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/MikeHibbert/arweave-python-client",
download_url="https://github.com/MikeHibbert/arweave-python-client",
keywords=['arweave', 'crypto'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'arrow',
'python-jose',
'pynacl',
'pycryptodome',
'cryptography',
'requests',
'psutil'
],
)
|
9061
|
from PHPUnitKit.tests import unittest
from PHPUnitKit.plugin import is_valid_php_version_file_version
class TestIsValidPhpVersionFileVersion(unittest.TestCase):
def test_invalid_values(self):
self.assertFalse(is_valid_php_version_file_version(''))
self.assertFalse(is_valid_php_version_file_version(' '))
self.assertFalse(is_valid_php_version_file_version('foobar'))
self.assertFalse(is_valid_php_version_file_version('masterfoo'))
self.assertFalse(is_valid_php_version_file_version('.'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('x.x'))
self.assertFalse(is_valid_php_version_file_version('x.x.x'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('snapshot'))
def test_master_branch_version(self):
self.assertTrue(is_valid_php_version_file_version('master'))
def test_specific_semver_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.0.0'))
self.assertTrue(is_valid_php_version_file_version('5.0.1'))
self.assertTrue(is_valid_php_version_file_version('5.0.7'))
self.assertTrue(is_valid_php_version_file_version('5.0.30'))
self.assertTrue(is_valid_php_version_file_version('5.0.32'))
self.assertTrue(is_valid_php_version_file_version('5.1.0'))
self.assertTrue(is_valid_php_version_file_version('5.1.1'))
self.assertTrue(is_valid_php_version_file_version('5.1.3'))
self.assertTrue(is_valid_php_version_file_version('5.1.27'))
self.assertTrue(is_valid_php_version_file_version('7.0.0'))
self.assertTrue(is_valid_php_version_file_version('7.1.19'))
def test_minor_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.6'))
self.assertTrue(is_valid_php_version_file_version('7.1'))
self.assertTrue(is_valid_php_version_file_version('7.2'))
def test_major_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.x'))
self.assertTrue(is_valid_php_version_file_version('6.x'))
self.assertTrue(is_valid_php_version_file_version('7.x'))
self.assertTrue(is_valid_php_version_file_version('8.x'))
def test_major_dot_minor_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('7.0.x'))
self.assertTrue(is_valid_php_version_file_version('7.1.x'))
self.assertTrue(is_valid_php_version_file_version('7.2.x'))
def test_snapshot_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.4snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.5snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.6snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.1snapshot'))
|
9063
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0006_bootstrapgallerypluginmodel'),
]
operations = [
]
|
9064
|
Credits = [
('Bootstrap', 'https://getbootstrap.com', 'The Bootstrap team', 'MIT'),
('Bottle', 'http://bottlepy.org', '<NAME>', 'MIT'),
('Cheroot', 'https://github.com/cherrypy/cheroot', 'CherryPy Team', 'BSD 3-Clause "New" or "Revised" License'),
('Click', 'https://github.com/pallets/click', 'Pallets', 'BSD 3-Clause "New" or "Revised" License'),
('ConfigUpdater', 'https://github.com/pyscaffold/configupdater', '<NAME>', 'MIT'),
('Glide', 'https://github.com/glidejs/glide', '@jedrzejchalubek', 'MIT'),
('JQuery', 'https://jquery.com', 'The jQuery Foundation', 'MIT'),
('jquery.pep.js', 'http://pep.briangonzalez.org', '@briangonzalez', 'MIT'),
('js-md5', 'https://github.com/emn178/js-md5', '@emn178', 'MIT'),
('PySocks', 'https://github.com/Anorov/PySocks', '@Anorov', 'Custom DAN HAIM'),
('RapydScript-NG', 'https://github.com/kovidgoyal/rapydscript-ng', '@kovidgoyal',
'BSD 2-Clause "Simplified" License'),
('Requests', 'https://requests.kennethreitz.org', '<NAME>', 'Apache License, Version 2.0'),
('scrollMonitor', 'https://github.com/stutrek/scrollmonitor', '@stutrek', 'MIT'),
('Smoothie Charts', 'https://github.com/joewalnes/smoothie', '@drewnoakes', 'MIT'),
('stem', 'https://stem.torproject.org', '<NAME> and The Tor Project', 'GNU LESSER GENERAL PUBLIC LICENSE')
]
|
9102
|
from core.celery.config import ERIGONES_TASK_USER
from que.tasks import execute, get_task_logger
from vms.models import SnapshotDefine, Snapshot, BackupDefine, Backup, IPAddress
logger = get_task_logger(__name__)
def is_vm_missing(vm, msg):
"""
Check failed command output and return True if VM is not on compute node.
"""
check_str = vm.hostname + ': No such zone configured'
return check_str in msg
def vm_delete_snapshots_of_removed_disks(vm):
"""
This helper function deletes snapshots for VM with changing disk IDs. Bug #chili-363
++ Bug #chili-220 - removing snapshot and backup definitions for removed disks.
"""
removed_disk_ids = [Snapshot.get_real_disk_id(i) for i in vm.create_json_update_disks().get('remove_disks', [])]
if removed_disk_ids:
Snapshot.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
SnapshotDefine.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
Backup.objects.filter(vm=vm, disk_id__in=removed_disk_ids, last=True).update(last=False)
BackupDefine.objects.filter(vm=vm, disk_id__in=removed_disk_ids).delete()
return removed_disk_ids
def _reset_allowed_ip_usage(vm, ip):
"""Helper function used below. It sets the IP usage back to VM [1] only if other VMs, which use the address in
allowed_ips are in notcreated state."""
if all(other_vm.is_notcreated() for other_vm in ip.vms.exclude(uuid=vm.uuid)):
ip.usage = IPAddress.VM
ip.save()
def _is_ip_ok(ip_queryset, vm_ip, vm_network_uuid):
"""Helper function used below. Return True if vm_ip (string) is "dhcp" or is found in the IPAddress queryset
and has the expected usage flag and subnet uuid."""
if vm_ip == 'dhcp':
return True
return any(ip.ip == vm_ip and ip.subnet.uuid == vm_network_uuid and ip.usage == IPAddress.VM_REAL
for ip in ip_queryset)
def vm_update_ipaddress_usage(vm):
"""
This helper function is responsible for updating IPAddress.usage and IPAddress.vm of server IPs (#chili-615,1029),
by removing association from IPs that, are not set on any NIC and:
- when a VM is deleted all IP usages are set to IPAddress.VM (in DB) and
- when a VM is created or updated all IP usages are set to IPAddress.VM_REAL (on hypervisor) and
Always call this function _only_ after vm.json_active is synced with vm.json!!!
In order to properly understand this code you have understand the association between an IPAddress and Vm model.
This function may raise a ValueError if the VM and IP address were not properly associated (e.g. via vm_define_nic).
"""
current_ips = set(vm.json_active_get_ips(primary_ips=True, allowed_ips=False))
current_ips.update(vm.json_get_ips(primary_ips=True, allowed_ips=False))
current_allowed_ips = set(vm.json_active_get_ips(primary_ips=False, allowed_ips=True))
current_allowed_ips.update(vm.json_get_ips(primary_ips=False, allowed_ips=True))
# Return old IPs back to IP pool, so they can be used again
vm.ipaddress_set.exclude(ip__in=current_ips).update(vm=None, usage=IPAddress.VM)
# Remove association of removed vm.allowed_ips
for ip in vm.allowed_ips.exclude(ip__in=current_allowed_ips):
ip.vms.remove(vm)
_reset_allowed_ip_usage(vm, ip)
if vm.is_notcreated():
# Server was deleted from hypervisor
vm.ipaddress_set.filter(usage=IPAddress.VM_REAL).update(usage=IPAddress.VM)
for ip in vm.allowed_ips.filter(usage=IPAddress.VM_REAL):
_reset_allowed_ip_usage(vm, ip)
return
# Server was updated or created
vm.ipaddress_set.filter(usage=IPAddress.VM).update(usage=IPAddress.VM_REAL)
vm.allowed_ips.filter(usage=IPAddress.VM).update(usage=IPAddress.VM_REAL)
# The VM configuration may be changed directly on the hypervisor, thus the VM could have
# new NICs and IP addresses which configuration bypassed our API - issue #168.
vm_ips = vm.ipaddress_set.select_related('subnet').filter(usage=IPAddress.VM_REAL)
vm_allowed_ips = vm.allowed_ips.select_related('subnet').filter(usage=IPAddress.VM_REAL)
# For issue #168 we have to check the VM<->IPAddress association in a loop for each NIC, because we need to
# match the NIC.network_uuid with a Subnet.
for nic_id, nic in enumerate(vm.json_active_get_nics(), 1):
network_uuid = nic.get('network_uuid', None)
if network_uuid:
ip = nic.get('ip', '')
allowed_ips = nic.get('allowed_ips', [])
if ip:
logger.debug('VM: %s | NIC ID: %s | NIC network: %s | IP address: %s', vm, nic_id, network_uuid, ip)
if not _is_ip_ok(vm_ips, ip, network_uuid):
raise ValueError('VM %s NIC ID %s IP address %s is not properly associated with VM!' %
(vm, nic_id, ip))
for ip in allowed_ips:
logger.debug('VM: %s | NIC ID: %s | NIC network: %s | IP address: %s', vm, nic_id, network_uuid, ip)
if not _is_ip_ok(vm_allowed_ips, ip, network_uuid):
raise ValueError('VM %s NIC ID %s allowed IP address %s is not properly associated with VM!' %
(vm, nic_id, ip))
else:
raise ValueError('VM %s NIC ID %s does not have a network uuid!' % (vm, nic_id))
def vm_deploy(vm, force_stop=False):
"""
Internal API call used for finishing VM deploy;
Actually cleaning the json and starting the VM.
"""
if force_stop: # VM is running without OS -> stop
cmd = 'vmadm stop %s -F >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
else: # VM is stopped and deployed -> start
cmd = 'vmadm start %s >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
msg = 'Deploy server'
lock = 'vmadm deploy ' + vm.uuid
meta = {
'output': {
'returncode': 'returncode',
'stderr': 'message',
'stdout': 'json'
},
'replace_stderr': ((vm.uuid, vm.hostname),),
'msg': msg, 'vm_uuid': vm.uuid
}
callback = ('api.vm.base.tasks.vm_deploy_cb', {'vm_uuid': vm.uuid})
return execute(ERIGONES_TASK_USER, None, cmd, meta=meta, lock=lock, callback=callback,
queue=vm.node.fast_queue, nolog=True, ping_worker=False, check_user_tasks=False)
def vm_reset(vm):
"""
Internal API call used for VM reboots in emergency situations.
"""
cmd = 'vmadm stop %s -F; vmadm start %s' % (vm.uuid, vm.uuid)
return execute(ERIGONES_TASK_USER, None, cmd, callback=False, queue=vm.node.fast_queue, nolog=True,
check_user_tasks=False)
def vm_update(vm):
"""
Internal API used for updating VM if there were changes in json detected.
"""
logger.info('Running PUT vm_manage(%s), because something (vnc port?) has changed', vm)
from api.vm.base.views import vm_manage
from api.utils.request import get_dummy_request
from api.utils.views import call_api_view
request = get_dummy_request(vm.dc, method='PUT', system_user=True)
res = call_api_view(request, 'PUT', vm_manage, vm.hostname)
if res.status_code == 201:
logger.warn('PUT vm_manage(%s) was successful: %s', vm, res.data)
else:
logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm, res.status_code, res.status_text, res.data)
|
9109
|
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
|
9123
|
import ipfsapi
c = ipfsapi.connect()
peer_id = c.key_list()['Keys'][1]['Id']
c.name_publish('QmYjYGKXqo36GDt6f6qvp9qKAsrc72R9y88mQSLvogu8Ub', key='another_key')
result = c.cat('/ipns/' + peer_id)
print(result)
|
9150
|
import torch
DEVICE = torch.device("cuda")
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
SAVED_CHECKPOINTS += [10*1000, 20*1000, 30*1000, 40*1000, 50*1000, 60*1000, 70*1000, 80*1000, 90*1000]
SAVED_CHECKPOINTS += [25*1000, 50*1000, 75*1000]
SAVED_CHECKPOINTS = set(SAVED_CHECKPOINTS)
|
9189
|
import re
# regex for a user or channel mention at the beginning of a message
# example matches: " <@UJQ07L30Q> ", "<#C010P8N1ABB|interns>"
# interactive playground: https://regex101.com/r/2Z7eun/2
MENTION_PATTERN = r"(?:^\s?<@(.*?)>\s?)|(?:^\s?<#(.*?)\|.*?>\s?)"
def get_set_element(_set):
"""get the element from the set to which the iterator points; returns an
arbitrary item
"""
for element in _set:
return element
def get_person_from_match(user_id, match):
"""given a Match, return the Person corresponding to the passed user ID
"""
if match.person_1.user_id == user_id:
return match.person_1
elif match.person_2.user_id == user_id:
return match.person_2
else:
raise Exception(f"Person with user ID \"{user_id}\" is not part of "
f"the passed match ({match}).")
def get_other_person_from_match(user_id, match):
"""given a Match, return the Person corresponding to the user who is NOT
the passed user ID (i.e. the other Person)
"""
if match.person_1.user_id == user_id:
return match.person_2
elif match.person_2.user_id == user_id:
return match.person_1
else:
raise Exception(f"Person with user ID \"{user_id}\" is not part of "
f"the passed match ({match}).")
def blockquote(message):
"""return `message` with markdown blockquote formatting (start each line
with "> ")
"""
if message:
return re.sub(r"^", "> ", message, flags=re.MULTILINE)
else:
return None
def get_mention(message):
"""get the user or channel ID mentioned at the beginning of a message, if
any
"""
match = re.search(MENTION_PATTERN, message)
if match:
# return the first not-None value in the match group tuple, be it a
# user or channel mention
# https://stackoverflow.com/a/18533669
return next(group for group in match.group(1, 2) if group is not None)
else:
return None
def remove_mention(message):
"""remove the user or channel mention from the beginning of a message, if
any
"""
return re.sub(MENTION_PATTERN, "", message, count=1)
|
9200
|
from . import FishBase
from . import FishGlobals
class FishCollection:
def __init__(self):
self.fishList = []
def __len__(self):
return len(self.fishList)
def getFish(self):
return self.fishList
def makeFromNetLists(self, genusList, speciesList, weightList):
self.fishList = []
for genus, species, weight in zip(genusList, speciesList, weightList):
self.fishList.append(FishBase.FishBase(genus, species, weight))
def getNetLists(self):
genusList = []
speciesList = []
weightList = []
for fish in self.fishList:
genusList.append(fish.getGenus())
speciesList.append(fish.getSpecies())
weightList.append(fish.getWeight())
return [genusList, speciesList, weightList]
def hasFish(self, genus, species):
for fish in self.fishList:
if fish.getGenus() == genus and fish.getSpecies() == species:
return 1
return 0
def hasGenus(self, genus):
for fish in self.fishList:
if fish.getGenus() == genus:
return 1
return 0
def __collect(self, newFish, updateCollection):
for fish in self.fishList:
if fish.getGenus() == newFish.getGenus() and fish.getSpecies() == newFish.getSpecies():
if fish.getWeight() < newFish.getWeight():
if updateCollection:
fish.setWeight(newFish.getWeight())
return FishGlobals.COLLECT_NEW_RECORD
else:
return FishGlobals.COLLECT_NO_UPDATE
if updateCollection:
self.fishList.append(newFish)
return FishGlobals.COLLECT_NEW_ENTRY
def collectFish(self, newFish):
return self.__collect(newFish, updateCollection=1)
def getCollectResult(self, newFish):
return self.__collect(newFish, updateCollection=0)
def __str__(self):
numFish = len(self.fishList)
txt = 'Fish Collection (%s fish):' % numFish
for fish in self.fishList:
txt += '\n' + str(fish)
return txt
|
9212
|
import random
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from torch_geometric.utils import coalesce, degree, remove_self_loops
from .num_nodes import maybe_num_nodes
def negative_sampling(edge_index: Tensor,
num_nodes: Optional[Union[int, Tuple[int, int]]] = None,
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False) -> Tensor:
r"""Samples random negative edges of a graph given by :attr:`edge_index`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int or Tuple[int, int], optional): The number of nodes,
*i.e.* :obj:`max_val + 1` of :attr:`edge_index`.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`.
(default: :obj:`None`)
num_neg_samples (int, optional): The (approximate) number of negative
samples to return.
If set to :obj:`None`, will try to return a negative edge for every
positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
assert method in ['sparse', 'dense']
size = num_nodes
bipartite = isinstance(size, (tuple, list))
size = maybe_num_nodes(edge_index) if size is None else size
size = (size, size) if not bipartite else size
force_undirected = False if bipartite else force_undirected
idx, population = edge_index_to_vector(edge_index, size, bipartite,
force_undirected)
if idx.numel() >= population:
return edge_index.new_empty((2, 0))
if num_neg_samples is None:
num_neg_samples = edge_index.size(1)
if force_undirected:
num_neg_samples = num_neg_samples // 2
prob = 1. - idx.numel() / population # Probability to sample a negative.
sample_size = int(1.1 * num_neg_samples / prob) # (Over)-sample size.
neg_idx = None
if method == 'dense':
# The dense version creates a mask of shape `population` to check for
# invalid samples.
mask = idx.new_ones(population, dtype=torch.bool)
mask[idx] = False
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, idx.device)
rnd = rnd[mask[rnd]] # Filter true negatives.
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
mask[neg_idx] = False
else: # 'sparse'
# The sparse version checks for invalid samples via `np.isin`.
idx = idx.to('cpu')
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, device='cpu')
mask = np.isin(rnd, idx)
if neg_idx is not None:
mask |= np.isin(rnd, neg_idx.to('cpu'))
mask = torch.from_numpy(mask).to(torch.bool)
rnd = rnd[~mask].to(edge_index.device)
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
return vector_to_edge_index(neg_idx, size, bipartite, force_undirected)
def batched_negative_sampling(
edge_index: Tensor,
batch: Union[Tensor, Tuple[Tensor, Tensor]],
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False,
) -> Tensor:
r"""Samples random negative edges of multiple graphs given by
:attr:`edge_index` and :attr:`batch`.
Args:
edge_index (LongTensor): The edge indices.
batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph connecting two different node types.
num_neg_samples (int, optional): The number of negative samples to
return. If set to :obj:`None`, will try to return a negative edge
for every positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
if isinstance(batch, Tensor):
src_batch, dst_batch = batch, batch
else:
src_batch, dst_batch = batch[0], batch[1]
split = degree(src_batch[edge_index[0]], dtype=torch.long).tolist()
edge_indices = torch.split(edge_index, split, dim=1)
num_src = degree(src_batch, dtype=torch.long)
cum_src = torch.cat([src_batch.new_zeros(1), num_src.cumsum(0)[:-1]])
if isinstance(batch, Tensor):
num_nodes = num_src.tolist()
cumsum = cum_src
else:
num_dst = degree(dst_batch, dtype=torch.long)
cum_dst = torch.cat([dst_batch.new_zeros(1), num_dst.cumsum(0)[:-1]])
num_nodes = torch.stack([num_src, num_dst], dim=1).tolist()
cumsum = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1)
neg_edge_indices = []
for i, edge_index in enumerate(edge_indices):
edge_index = edge_index - cumsum[i]
neg_edge_index = negative_sampling(edge_index, num_nodes[i],
num_neg_samples, method,
force_undirected)
neg_edge_index += cumsum[i]
neg_edge_indices.append(neg_edge_index)
return torch.cat(neg_edge_indices, dim=1)
def structured_negative_sampling(edge_index, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True):
r"""Samples a negative edge :obj:`(i,k)` for every positive edge
:obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a
tuple of the form :obj:`(i,j,k)`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: (LongTensor, LongTensor, LongTensor)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index.cpu()
pos_idx = row * num_nodes + col
if not contains_neg_self_loops:
loop_idx = torch.arange(num_nodes) * (num_nodes + 1)
pos_idx = torch.cat([pos_idx, loop_idx], dim=0)
rand = torch.randint(num_nodes, (row.size(0), ), dtype=torch.long)
neg_idx = row * num_nodes + rand
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = mask.nonzero(as_tuple=False).view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.randint(num_nodes, (rest.size(0), ), dtype=torch.long)
rand[rest] = tmp
neg_idx = row[rest] * num_nodes + tmp
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = rest[mask]
return edge_index[0], edge_index[1], rand.to(edge_index.device)
def structured_negative_sampling_feasible(
edge_index: Tensor, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True) -> bool:
r"""Returns :obj:`True` if
:meth:`~torch_geometric.utils.structured_negative_sampling` is feasible
on the graph given by :obj:`edge_index`.
:obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible
if atleast one node is connected to all other nodes.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
max_num_neighbors = num_nodes
edge_index = coalesce(edge_index, num_nodes=num_nodes)
if not contains_neg_self_loops:
edge_index, _ = remove_self_loops(edge_index)
max_num_neighbors -= 1 # Reduce number of valid neighbors
deg = degree(edge_index[0], num_nodes)
# True if there exists no node that is connected to all other nodes.
return bool(torch.all(deg < max_num_neighbors))
###############################################################################
def sample(population: int, k: int, device=None) -> Tensor:
if population <= k:
return torch.arange(population, device=device)
else:
return torch.tensor(random.sample(range(population), k), device=device)
def edge_index_to_vector(
edge_index: Tensor,
size: Tuple[int, int],
bipartite: bool,
force_undirected: bool = False,
) -> Tuple[Tensor, int]:
row, col = edge_index
if bipartite: # No need to account for self-loops.
idx = (row * size[1]).add_(col)
population = size[0] * size[1]
return idx, population
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
# We only operate on the upper triangular matrix:
mask = row < col
row, col = row[mask], col[mask]
offset = torch.arange(1, num_nodes, device=row.device).cumsum(0)[row]
idx = row.mul_(num_nodes).add_(col).sub_(offset)
population = (num_nodes * (num_nodes + 1)) // 2 - num_nodes
return idx, population
else:
assert size[0] == size[1]
num_nodes = size[0]
# We remove self-loops as we do not want to take them into account
# when sampling negative values.
mask = row != col
row, col = row[mask], col[mask]
col[row < col] -= 1
idx = row.mul_(num_nodes - 1).add_(col)
population = num_nodes * num_nodes - num_nodes
return idx, population
def vector_to_edge_index(idx: Tensor, size: Tuple[int, int], bipartite: bool,
force_undirected: bool = False) -> Tensor:
if bipartite: # No need to account for self-loops.
row = idx.div(size[1], rounding_mode='floor')
col = idx % size[1]
return torch.stack([row, col], dim=0)
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
offset = torch.arange(1, num_nodes, device=idx.device).cumsum(0)
end = torch.arange(num_nodes, num_nodes * num_nodes, num_nodes,
device=idx.device)
row = torch.bucketize(idx, end.sub_(offset), right=True)
col = offset[row].add_(idx) % num_nodes
return torch.stack([torch.cat([row, col]), torch.cat([col, row])], 0)
else:
assert size[0] == size[1]
num_nodes = size[0]
row = idx.div(num_nodes - 1, rounding_mode='floor')
col = idx % (num_nodes - 1)
col[row <= col] += 1
return torch.stack([row, col], dim=0)
|
9215
|
class WebException(Exception):
pass
class ParserException(Exception):
"""
解析异常
"""
pass
class ApiException(Exception):
"""
api异常
"""
pass
class WsException(Exception):
"""
轮询异常
"""
pass
class SsoException(Exception):
"""
sso异常
"""
pass
class LibException(Exception):
"""
lib异常
"""
pass
class AccountException(Exception):
"""
账号异常(账号失效)
"""
pass
class FlowException(Exception):
"""
认证流量异常
"""
pass
|
9223
|
import pytest
from onnx import TensorProto
from onnx import helper as oh
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.streamline.reorder import MoveTransposePastJoinAdd
from finn.util.basic import gen_finn_dt_tensor
def create_model(perm):
if perm == [0, 3, 1, 2]:
in_shape = [1, 128, 1, 256]
out_shape = [1, 256, 128, 1]
if perm == [0, 2, 3, 1]:
in_shape = [1, 256, 128, 1]
out_shape = [1, 128, 1, 256]
Transpose1_node = oh.make_node(
"Transpose", inputs=["in_transpose1"], outputs=["out_transpose1"], perm=perm
)
Transpose2_node = oh.make_node(
"Transpose", inputs=["in_transpose2"], outputs=["out_transpose2"], perm=perm
)
Join1_node = oh.make_node(
"Add", inputs=["out_transpose1", "out_transpose2"], outputs=["out_join1"]
)
in_transpose1 = oh.make_tensor_value_info(
"in_transpose1", TensorProto.FLOAT, in_shape
)
in_transpose2 = oh.make_tensor_value_info(
"in_transpose2", TensorProto.FLOAT, in_shape
)
out_transpose1 = oh.make_tensor_value_info(
"out_transpose1", TensorProto.FLOAT, out_shape
)
out_transpose2 = oh.make_tensor_value_info(
"out_transpose2", TensorProto.FLOAT, out_shape
)
out_join1 = oh.make_tensor_value_info("out_join1", TensorProto.FLOAT, out_shape)
graph = oh.make_graph(
nodes=[Transpose1_node, Transpose2_node, Join1_node],
name="test_graph",
inputs=[in_transpose1, in_transpose2],
outputs=[out_join1],
value_info=[
out_transpose1,
out_transpose2,
],
)
onnx_model = oh.make_model(graph, producer_name="test_model")
model = ModelWrapper(onnx_model)
return model
# Permutation of transpose node
@pytest.mark.parametrize("perm", [[0, 3, 1, 2], [0, 2, 3, 1]])
def test_move_identical_op_past_join_op(perm):
model = create_model(perm)
# Create input data
input0_tensor_name = model.graph.input[0].name
input1_tensor_name = model.graph.input[1].name
# Note: it is assumed that both tensors have the same shape and data type
input_shape = model.get_tensor_shape(input0_tensor_name)
input_dtype = model.get_tensor_datatype(input0_tensor_name)
input_val = gen_finn_dt_tensor(input_dtype, input_shape)
input_dict = {}
input_dict[input0_tensor_name] = input_val
input_dict[input1_tensor_name] = input_val
model_transformed = model.transform(MoveTransposePastJoinAdd())
assert oxe.compare_execution(model, model_transformed, input_dict)
# Check if order changed
node0_input0_model = model.find_consumers(model.graph.input[0].name)[0].op_type
node1_input1_model = model.find_consumers(model.graph.input[1].name)[0].op_type
node0_input0_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[0].name
)[0].op_type
node1_input1_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[1].name
)[0].op_type
assert node0_input0_model != node0_input0_model_transformed
assert node1_input1_model != node1_input1_model_transformed
|
9238
|
import unittest
from nanoservice import Responder
from nanoservice import Requester
class BaseTestCase(unittest.TestCase):
def setUp(self):
addr = 'inproc://test'
self.client = Requester(addr)
self.service = Responder(addr)
self.service.register('divide', lambda x, y: x / y)
self.service.register('echo', lambda x: x)
def tearDown(self):
self.client.socket.close()
self.service.socket.close()
class TestClient(BaseTestCase):
def test_build_payload(self):
payload = self.client.build_payload('echo', 'My Name')
method, args, ref = payload
self.assertTrue(method == 'echo')
self.assertTrue(len(payload) == 3)
def test_encoder(self):
data = {'name': '<NAME>'}
encoded = self.client.encode(data)
decoded = self.client.decode(encoded)
self.assertEqual(data, decoded)
def test_call_wo_receive(self):
# Requester side ops
method, args = 'echo', 'hello world'
payload = self.client.build_payload(method, args)
self.client.socket.send(self.client.encode(payload))
# Responder side ops
method, args, ref = self.service.receive()
self.assertEqual(method, 'echo')
self.assertEqual(args, 'hello world')
self.assertEqual(ref, payload[2])
def test_basic_socket_operation(self):
msg = 'abc'
self.client.socket.send(msg)
res = self.service.socket.recv().decode('utf-8')
self.assertEqual(msg, res)
def test_timeout(self):
c = Requester('inproc://timeout', timeouts=(1, 1))
c.socket.send('hello')
self.assertRaises(Exception, c.socket.recv)
if __name__ == '__main__':
unittest.main()
|
9241
|
import unittest
class LexerTestCase(unittest.TestCase):
def makeLexer(self, text):
from spi import Lexer
lexer = Lexer(text)
return lexer
def test_tokens(self):
from spi import TokenType
records = (
('234', TokenType.INTEGER_CONST, 234),
('3.14', TokenType.REAL_CONST, 3.14),
('*', TokenType.MUL, '*'),
('DIV', TokenType.INTEGER_DIV, 'DIV'),
('/', TokenType.FLOAT_DIV, '/'),
('+', TokenType.PLUS, '+'),
('-', TokenType.MINUS, '-'),
('(', TokenType.LPAREN, '('),
(')', TokenType.RPAREN, ')'),
(':=', TokenType.ASSIGN, ':='),
('.', TokenType.DOT, '.'),
('number', TokenType.ID, 'number'),
(';', TokenType.SEMI, ';'),
('BEGIN', TokenType.BEGIN, 'BEGIN'),
('END', TokenType.END, 'END'),
('PROCEDURE', TokenType.PROCEDURE, 'PROCEDURE'),
)
for text, tok_type, tok_val in records:
lexer = self.makeLexer(text)
token = lexer.get_next_token()
self.assertEqual(token.type, tok_type)
self.assertEqual(token.value, tok_val)
def test_lexer_exception(self):
from spi import LexerError
lexer = self.makeLexer('<')
with self.assertRaises(LexerError):
lexer.get_next_token()
class ParserTestCase(unittest.TestCase):
def makeParser(self, text):
from spi import Lexer, Parser
lexer = Lexer(text)
parser = Parser(lexer)
return parser
def test_expression_invalid_syntax_01(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 10 * ; {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, ';')
self.assertEqual(the_exception.token.lineno, 6)
def test_expression_invalid_syntax_02(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 1 (1 + 2); {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, '(')
self.assertEqual(the_exception.token.lineno, 6)
def test_maximum_one_VAR_block_is_allowed(self):
from spi import ParserError, ErrorCode
# zero VARs
parser = self.makeParser(
"""
PROGRAM Test;
BEGIN
END.
"""
)
parser.parse()
# one VAR
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
END.
"""
)
parser.parse()
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
VAR
b : INTEGER;
BEGIN
a := 5;
b := a + 10;
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, 'VAR')
self.assertEqual(the_exception.token.lineno, 5) # second VAR
class SemanticAnalyzerTestCase(unittest.TestCase):
def runSemanticAnalyzer(self, text):
from spi import Lexer, Parser, SemanticAnalyzer
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
return semantic_analyzer
def test_semantic_duplicate_id_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
a : REAL; {Duplicate identifier}
BEGIN
a := 5;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.DUPLICATE_ID)
self.assertEqual(the_exception.token.value, 'a')
self.assertEqual(the_exception.token.lineno, 5)
def test_semantic_id_not_found_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 5 + b;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.ID_NOT_FOUND)
self.assertEqual(the_exception.token.value, 'b')
class TestCallStack:
def __init__(self):
self._records = []
def push(self, ar):
self._records.append(ar)
def pop(self):
# do nothing
pass
def peek(self):
return self._records[-1]
class InterpreterTestCase(unittest.TestCase):
def makeInterpreter(self, text):
from spi import Lexer, Parser, SemanticAnalyzer, Interpreter
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
interpreter = Interpreter(tree)
interpreter.call_stack = TestCallStack()
return interpreter
def test_integer_arithmetic_expressions(self):
for expr, result in (
('3', 3),
('2 + 7 * 4', 30),
('7 - 8 DIV 4', 5),
('14 + 2 * 3 - 6 DIV 2', 17),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1))', 22),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1)) DIV (2 + 3) - 5 - 3 + (8)', 10),
('7 + (((3 + 2)))', 12),
('- 3', -3),
('+ 3', 3),
('5 - - - + - 3', 8),
('5 - - - + - (3 + 4) - +2', 10),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_float_arithmetic_expressions(self):
for expr, result in (
('3.14', 3.14),
('2.14 + 7 * 4', 30.14),
('7.14 - 8 / 4', 5.14),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : REAL;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_procedure_call(self):
text = """\
program Main;
procedure Alpha(a : integer; b : integer);
var x : integer;
begin
x := (a + b ) * 2;
end;
begin { Main }
Alpha(3 + 5, 7);
end. { Main }
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], 8)
self.assertEqual(ar['b'], 7)
self.assertEqual(ar['x'], 30)
self.assertEqual(ar.nesting_level, 2)
def test_program(self):
text = """\
PROGRAM Part12;
VAR
number : INTEGER;
a, b : INTEGER;
y : REAL;
PROCEDURE P1;
VAR
a : REAL;
k : INTEGER;
PROCEDURE P2;
VAR
a, z : INTEGER;
BEGIN {P2}
z := 777;
END; {P2}
BEGIN {P1}
END; {P1}
BEGIN {Part12}
number := 2;
a := number ;
b := 10 * a + 10 * number DIV 4;
y := 20 / 7 + 3.14
END. {Part12}
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(len(ar.members.keys()), 4)
self.assertEqual(ar['number'], 2)
self.assertEqual(ar['a'], 2)
self.assertEqual(ar['b'], 25)
self.assertAlmostEqual(ar['y'], float(20) / 7 + 3.14) # 5.9971...
if __name__ == '__main__':
unittest.main()
|
9245
|
def contains_word(first_word, second_word, bibliographic_entry):
contains_first_word = first_word in bibliographic_entry
contains_second_word = second_word in bibliographic_entry
if contains_first_word and contains_second_word:
return 2
elif contains_first_word or contains_second_word:
return 1
else:
return 0
if __name__ == "__main__":
bibliographic_entry = "<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., " \
"<NAME>. (2017). Research Articles in Simplified HTML: a Web-first format for " \
"HTML-based scholarly articles. PeerJ Computer Science 3: e132. e2513. " \
"DOI: https://doi.org/10.7717/peerj-cs.132"
print(contains_word("Peroni", "Osborne", bibliographic_entry))
print(contains_word("Peroni", "Asprino", bibliographic_entry))
print(contains_word("Reforgiato", "Osborne", bibliographic_entry))
print(contains_word("Reforgiato", "Asprino", bibliographic_entry))
|
9256
|
from YouTubeFacesDB import generate_ytf_database
###############################################################################
# Create the dataset
###############################################################################
generate_ytf_database(
directory= '../data',#'/scratch/vitay/Datasets/YouTubeFaces', # Location of the YTF dataset
filename='ytfdb.h5', # Name of the HDF5 file to write to
labels=10, # Number of labels to randomly select
max_number=-1, # Maximum number of images to use
size=(100, 100), # Size of the images
color=False, # Black and white
bw_first=True, # Final shape is (1, w, h)
cropped=True # The original images are cropped to the faces
)
|
9262
|
from rest_framework import serializers
from punkweb_boards.conf.settings import SHOUTBOX_DISABLED_TAGS
from punkweb_boards.models import (
BoardProfile,
Category,
Subcategory,
Thread,
Post,
Conversation,
Message,
Report,
Shout,
)
class BoardProfileSerializer(serializers.ModelSerializer):
post_count = serializers.ReadOnlyField()
can_shout = serializers.ReadOnlyField()
rendered_username = serializers.ReadOnlyField()
rendered_rank = serializers.ReadOnlyField()
class Meta:
model = BoardProfile
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
exclude = ("auth_req",)
class SubcategorySerializer(serializers.ModelSerializer):
last_thread = serializers.ReadOnlyField(source="last_thread.id")
last_thread_title = serializers.ReadOnlyField(source="last_thread.title")
last_thread_created = serializers.ReadOnlyField(
source="last_thread.created"
)
last_thread_user = serializers.ReadOnlyField(
source="last_thread.user.profile.rendered_username"
)
parent_name = serializers.ReadOnlyField(source="parent.name")
thread_count = serializers.ReadOnlyField()
post_count = serializers.ReadOnlyField()
can_post = serializers.SerializerMethodField()
def get_can_post(self, obj):
return obj.can_post(self.context.get("request").user)
class Meta:
model = Subcategory
exclude = ("auth_req",)
class ThreadSerializer(serializers.ModelSerializer):
last_post = serializers.ReadOnlyField(source="last_post.id")
last_post_created = serializers.ReadOnlyField(source="last_post.created")
last_post_username = serializers.ReadOnlyField(
source="last_post.user.username"
)
last_post_rendered_username = serializers.ReadOnlyField(
source="last_post.user.profile.rendered_username"
)
user_username = serializers.ReadOnlyField(source="user.username")
user_rendered_username = serializers.ReadOnlyField(
source="user.profile.rendered_username"
)
user_image = serializers.ReadOnlyField(source="user.profile.avatar")
user_post_count = serializers.ReadOnlyField(
source="user.profile.post_count"
)
user_join_date = serializers.ReadOnlyField(source="user.created")
flagged = serializers.ReadOnlyField(source="reported")
posts_count = serializers.ReadOnlyField()
can_edit = serializers.SerializerMethodField()
def get_can_edit(self, obj):
return obj.can_edit(self.context.get("request").user)
class Meta:
model = Thread
fields = "__all__"
read_only_fields = (
"pinned",
"closed",
"user",
"upvoted_by",
"downvoted_by",
)
class PostSerializer(serializers.ModelSerializer):
flagged = serializers.ReadOnlyField(source="reported")
can_edit = serializers.SerializerMethodField()
def get_can_edit(self, obj):
return obj.can_edit(self.context.get("request").user)
class Meta:
model = Post
fields = "__all__"
read_only_fields = ("user", "upvoted_by", "downvoted_by")
class ConversationSerializer(serializers.ModelSerializer):
last_message = serializers.ReadOnlyField(source="last_message.id")
last_message_title = serializers.ReadOnlyField(source="last_message.title")
last_message_created = serializers.ReadOnlyField(
source="last_message.created"
)
last_message_user = serializers.ReadOnlyField(
source="last_message.user.profile.rendered_username"
)
message_count = serializers.ReadOnlyField()
class Meta:
model = Conversation
fields = "__all__"
read_only_fields = ("unread_by",)
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = "__all__"
read_only_fields = ("user",)
class ShoutSerializer(serializers.ModelSerializer):
username = serializers.ReadOnlyField(source="user.username")
rendered_username = serializers.ReadOnlyField(
source="user.profile.rendered_username"
)
class Meta:
model = Shout
fields = (
"id",
"user",
"username",
"rendered_username",
"content",
"_content_rendered",
"created",
"modified",
)
read_only_fields = ("user",)
def create(self, validated_data):
for key in SHOUTBOX_DISABLED_TAGS:
key_tag = "[{}]".format(key).lower()
if (
key_tag[: len(key_tag) - 1]
in validated_data.get("content").lower()
):
raise serializers.ValidationError(
{
"notAllowed": "{} is not allowed in the shoutbox".format(
key_tag
)
}
)
return Shout.objects.create(**validated_data)
|
9278
|
from collections import MutableMapping, Container
from datetime import datetime, timedelta
from pyvalid import accepts
class LimitedTimeTable(MutableMapping, Container):
def __init__(self, time_span):
self.__storage = dict()
self.__time_span = None
self.time_span = time_span
@property
def time_span(self):
return self.__time_span
@time_span.setter
@accepts(object, timedelta)
def time_span(self, value):
self.__time_span = value
@property
def oldest(self):
value = None
if self.__len__() > 0:
value = min(self.__storage.keys())
return value
@property
def newest(self):
value = None
if self.__len__() > 0:
value = max(self.__storage.keys())
return value
def oldest_keys(self, size):
for key in self.__get_slice(0, size):
yield key
def oldest_values(self, size):
for key in self.oldest_keys(size):
yield self.__storage.get(key)
def oldest_items(self, size):
for key in self.oldest_keys(size):
yield (key, self.__storage.get(key))
def newest_keys(self, size):
for key in self.__get_slice(-size, None):
yield key
def newest_values(self, size):
for key in self.newest_keys(size):
yield self.__storage.get(key)
def newest_items(self, size):
for key in self.newest_keys(size):
yield (key, self.__storage.get(key))
def __get_slice(self, start, end):
keys = sorted(self.keys())
return keys[start:end]
def __getitem__(self, item):
return self.__storage.__getitem__(item)
@accepts(object, datetime, object)
def __setitem__(self, key, value):
now = datetime.now()
if key > now:
raise ValueError('Can\'t set item from future!')
oldest = self.oldest
if (oldest is not None) and (oldest != key):
longest_time_span = now - oldest
# Item is too old for current timetable
if longest_time_span >= self.time_span:
self.__delitem__(oldest)
return self.__storage.__setitem__(key, value)
def __delitem__(self, key):
return self.__storage.__delitem__(key)
def __len__(self):
return self.__storage.__len__()
def __iter__(self):
return self.__storage.__iter__()
def __contains__(self, item):
return self.__storage.__contains__(item)
__all__ = ['LimitedTimeTable']
|
9327
|
import numpy as np
import scipy.interpolate
import scipy.ndimage
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0).astype(np.float32)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype=np.float32)
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size);
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
if __name__ == '__main__':
import sys
import time
from scipy.misc import imsave
from image_analogy.img_utils import load_image, preprocess_image, deprocess_image
content_image_path, style_image_path, output_prefix = sys.argv[1:]
jump_size = 1.0
num_steps = 7
patch_size = 1
patch_stride = 1
feat_chans = 512
feat_style_shape = (feat_chans, 12, 18)
feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
feat_in_shape = (feat_chans, 17, 10)
feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
for i in range(num_steps):
matcher.update_with_patches(feat_in_normed)
r = matcher.get_reconstruction()
content_img_img = load_image(content_image_path)
content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
style_img = load_image(style_image_path)
style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
style_img = preprocess_image(
load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
pg = make_patch_grid(content_img, patch_size)
result = combine_patches_grid(pg, content_img.shape[::-1])
outimg = deprocess_image(result, contrast_percent=0)
imsave(output_prefix + '_bestre.png', outimg)
# # #
matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
for i in range(num_steps):
start = time.time()
matcher.update(content_img, reverse_propagation=bool(i % 2))
print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean())
end = time.time()
#print end-start
start = time.time()
result = matcher.get_reconstruction(patches=matcher.target_patches)
print(result.shape)
end = time.time()
print(end-start)
outimg = deprocess_image(result, contrast_percent=0)
# # imsave takes (rows, cols, channels)
imsave(output_prefix + '_best.png', outimg)
|
9339
|
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
dp = [[0]*n for _ in range(m)]
res = 0
for i in range(m):
dp[i][0] = int(matrix[i][0])
for j in range(n):
dp[0][j] = int(matrix[0][j])
for i in range(1, m):
for j in range(1, n):
if matrix[i][j] == '1':
dp[i][j] = min(dp[i-1][j],dp[i-1][j-1],dp[i][j-1])+1
res = max(res, dp[i][j])
return res**2
|
9375
|
import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
from scipy.spatial.distance import pdist, squareform
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.fig = None
self.line1 = None
self.filter_len = int(config['filter_length'])
self.nx_system = 4
self.n_nodes = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max # 0.5 * self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
self.pooling = []
if config.getboolean('sum_pooling'):
self.pooling.append(np.nansum)
if config.getboolean('min_pooling'):
self.pooling.append(np.nanmin)
if config.getboolean('max_pooling'):
self.pooling.append(np.nanmax)
self.n_pools = len(self.pooling)
# number of features and outputs
self.n_features = int(config['N_features'])
self.nx = int(self.n_features / self.n_pools / self.filter_len)
self.nu = int(config['N_outputs']) # outputs
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x = np.zeros((self.n_nodes, self.nx_system))
self.u = np.zeros((self.n_nodes, self.nu))
self.mean_vel = np.zeros((self.n_nodes, self.nu))
# TODO
self.max_accel = 40
self.max_z = 200
# self.b = np.ones((self.n_nodes,1))
# self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(self.n_nodes, 2), dtype=np.float32 )
# self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(
# self.n_nodes, self.nx * self.filter_len * self.n_pools) , dtype=np.float32)
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2,) , dtype=np.float32 )
self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(self.n_features, ), dtype=np.float32)
self.seed()
def render(self, mode='human'):
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
x = self.x
x_ = np.zeros((self.n_nodes, self.nx_system))
#u = np.vstack((np.zeros((self.n_leaders, 2)), u))
# x position
x_[:, 0] = x[:, 0] + x[:, 2] * self.dt
# y position
x_[:, 1] = x[:, 1] + x[:, 3] * self.dt
# x velocity
x_[:, 2] = x[:, 2] + 0.1 * u[:, 0] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# y velocity
x_[:, 3] = x[:, 3] + 0.1 * u[:, 1] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# TODO - check the 0.1
self.x = x_
self.x_agg = self.aggregate(self.x, self.x_agg)
self.u = u
return self._get_obs(), -self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
return np.sum(np.var(self.x[:, 2:4], axis=0)) #+ np.sum(np.square(self.u)) * 0.00001
#return np.sum(np.square(self.x[:,2:4] - self.mean_vel))
def _get_obs(self):
reshaped = self.x_agg.reshape((self.n_nodes, self.n_features))
clipped = np.clip(reshaped, a_min=-self.max_z, a_max=self.max_z)
return clipped #[self.n_leaders:, :]
def reset(self):
x = np.zeros((self.n_nodes, self.nx_system))
degree = 0
min_dist = 0
while degree < 2 or min_dist < 0.1: # < 0.25: # 0.25: #0.5: #min_dist < 0.25:
# randomly initialize the state of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_nodes,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_nodes,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[1]
# compute distances between agents
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
# no self loops
a_net = a_net + 2 * self.comm_radius * np.eye(self.n_nodes)
# compute minimum distance between agents and degree of network
min_dist = np.min(np.min(a_net))
a_net = a_net < self.comm_radius
degree = np.min(np.sum(a_net.astype(int), axis=1))
self.mean_vel = np.mean(x[:,2:4],axis=0)
self.x = x
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x_agg = self.aggregate(self.x, self.x_agg)
return self._get_obs()
# def render(self, mode='human'):
# pass
def close(self):
pass
def aggregate(self, xt, x_agg):
"""
Perform aggegration operation for all possible pooling operations using helper functions get_pool and get_comms
Args:
x_agg (): Last time step's aggregated info
xt (): Current state of all agents
Returns:
Aggregated state values
"""
x_features = self.get_x_features(xt)
a_net = self.get_connectivity(xt)
for k in range(0, self.n_pools):
comm_data = self.get_comms(np.dstack((x_features, self.get_features(x_agg[:, :, k]))), a_net)
x_agg[:, :, k] = self.get_pool(comm_data, self.pooling[k])
return x_agg
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current states of all agents
Returns: adjacency matrix of network
"""
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
a_net = (a_net < self.comm_radius).astype(float)
np.fill_diagonal(a_net, 0)
return a_net
def get_x_features(self, xt): # TODO
"""
Compute the non-linear features necessary for implementing Turner 2003
Args:
xt (): current state of all agents
Returns: matrix of features for each agent
"""
diff = xt.reshape((self.n_nodes, 1, self.nx_system)) - xt.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(diff[:, :, 0], diff[:, :, 0]) + np.multiply(diff[:, :, 1], diff[:, :, 1]) + np.eye(
self.n_nodes)
return np.dstack((diff[:, :, 2], np.divide(diff[:, :, 0], np.multiply(r2, r2)), np.divide(diff[:, :, 0], r2),
diff[:, :, 3], np.divide(diff[:, :, 1], np.multiply(r2, r2)), np.divide(diff[:, :, 1], r2)))
def get_features(self, agg):
"""
Matrix of
Args:
agg (): the aggregated matrix from the last time step
Returns: matrix of aggregated features from all nodes at current time
"""
return np.tile(agg[:, :-self.nx].reshape((self.n_nodes, 1, -1)), (1, self.n_nodes, 1)) # TODO check indexing
def get_comms(self, mat, a_net):
"""
Enforces that agents who are not connected in the network cannot observe each others' states
Args:
mat (): matrix of state information for the whole graph
a_net (): adjacency matrix for flock network (weighted networks unsupported for now)
Returns:
mat (): sparse matrix with NaN values where agents can't communicate
"""
a_net[a_net == 0] = np.nan
return mat * a_net.reshape(self.n_nodes, self.n_nodes, 1)
def get_pool(self, mat, func):
"""
Perform pooling operations on the matrix of state information. The replacement of values with NaNs for agents who
can't communicate must already be enforced.
Args:
mat (): matrix of state information
func (): pooling function (np.nansum(), np.nanmin() or np.nanmax()). Must ignore NaNs.
Returns:
information pooled from neighbors for each agent
"""
return func(mat, axis=1).reshape((self.n_nodes, self.n_features)) # TODO check this axis = 1
def controller(self):
"""
The controller for flocking from Turner 2003.
Args:
x (): the current state
Returns: the optimal action
"""
x = self.x
s_diff = x.reshape((self.n_nodes, 1, self.nx_system)) - x.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(s_diff[:, :, 0], s_diff[:, :, 0]) + np.multiply(s_diff[:, :, 1], s_diff[:, :, 1]) + np.eye(
self.n_nodes)
p = np.dstack((s_diff, self.potential_grad(s_diff[:, :, 0], r2), self.potential_grad(s_diff[:, :, 1], r2)))
p_sum = np.nansum(p, axis=1).reshape((self.n_nodes, self.nx_system + 2))
return np.hstack(((- p_sum[:, 4] - p_sum[:, 2]).reshape((-1, 1)), (- p_sum[:, 3] - p_sum[:, 5]).reshape(-1, 1)))
def potential_grad(self, pos_diff, r2):
"""
Computes the gradient of the potential function for flocking proposed in Turner 2003.
Args:
pos_diff (): difference in a component of position among all agents
r2 (): distance squared between agents
Returns: corresponding component of the gradient of the potential
"""
grad = -2.0 * np.divide(pos_diff, np.multiply(r2, r2)) + 2 * np.divide(pos_diff, r2)
grad[r2 > self.comm_radius] = 0
return grad
|
9395
|
from datetime import datetime
# ensure an rpc peer is added
def addpeer(p, rpcpeer):
pid = rpcpeer['id']
if pid not in p.persist['peerstate']:
p.persist['peerstate'][pid] = {
'connected': rpcpeer['connected'],
'last_seen': datetime.now() if rpcpeer['connected'] else None,
'avail': 1.0 if rpcpeer['connected'] else 0.0
}
# exponetially smooth online/offline states of peers
def trace_availability(p, rpcpeers):
p.persist['availcount'] += 1
leadwin = max(min(p.avail_window, p.persist['availcount'] * p.avail_interval), p.avail_interval)
samples = leadwin / p.avail_interval
alpha = 1.0 / samples
beta = 1.0 - alpha
for rpcpeer in rpcpeers['peers']:
pid = rpcpeer['id']
addpeer(p, rpcpeer)
if rpcpeer['connected']:
p.persist['peerstate'][pid]['last_seen'] = datetime.now()
p.persist['peerstate'][pid]['connected'] = True
p.persist['peerstate'][pid]['avail'] = 1.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
else:
p.persist['peerstate'][pid]['connected'] = False
p.persist['peerstate'][pid]['avail'] = 0.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
|
9402
|
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
class MobileNetV2SSD:
def __init__(self, img, num_classes, img_shape):
self.img = img
self.num_classes = num_classes
self.img_shape = img_shape
def ssd_net(self, scale=1.0):
# 300x300
bottleneck_params_list = [(1, 16, 1, 1),
(6, 24, 2, 2),
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1)]
# conv1
input = self.conv_bn_layer(input=self.img,
num_filters=int(32 * scale),
filter_size=3,
stride=2,
padding=1,
if_act=True)
# bottleneck sequences
in_c = int(32 * scale)
for layer_setting in bottleneck_params_list:
t, c, n, s = layer_setting
input = self.invresi_blocks(input=input, in_c=in_c, t=t, c=int(c * scale), n=n, s=s)
in_c = int(c * scale)
# 19x19
module11 = input
tmp = self.invresi_blocks(input=input, in_c=in_c, t=6, c=int(160 * scale), n=3, s=2)
# 10x10
module13 = self.invresi_blocks(input=tmp, in_c=int(160 * scale), t=6, c=int(320 * scale), n=1, s=1)
module14 = self.extra_block(module13, 256, 512, 1)
# 5x5
module15 = self.extra_block(module14, 128, 256, 1)
# 3x3
module16 = self.extra_block(module15, 128, 256, 1)
# 2x2
module17 = self.extra_block(module16, 64, 128, 1)
mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
inputs=[module11, module13, module14, module15, module16, module17],
image=self.img,
num_classes=self.num_classes,
min_ratio=20,
max_ratio=90,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]],
base_size=self.img_shape[2],
offset=0.5,
flip=True)
return mbox_locs, mbox_confs, box, box_var
def conv_bn_layer(self, input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True,
use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
bn = fluid.layers.batch_norm(input=conv)
if if_act:
return fluid.layers.relu6(bn)
else:
return bn
def shortcut(self, input, data_residual):
return fluid.layers.elementwise_add(input, data_residual)
def inverted_residual_unit(self,
input,
num_in_filter,
num_filters,
ifshortcut,
stride,
filter_size,
padding,
expansion_factor):
num_expfilter = int(round(num_in_filter * expansion_factor))
channel_expand = self.conv_bn_layer(input=input,
num_filters=num_expfilter,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
bottleneck_conv = self.conv_bn_layer(input=channel_expand,
num_filters=num_expfilter,
filter_size=filter_size,
stride=stride,
padding=padding,
num_groups=num_expfilter,
if_act=True,
use_cudnn=False)
linear_out = self.conv_bn_layer(input=bottleneck_conv,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=False)
if ifshortcut:
out = self.shortcut(input=input, data_residual=linear_out)
return out
else:
return linear_out
def invresi_blocks(self, input, in_c, t, c, n, s):
first_block = self.inverted_residual_unit(input=input,
num_in_filter=in_c,
num_filters=c,
ifshortcut=False,
stride=s,
filter_size=3,
padding=1,
expansion_factor=t)
last_residual_block = first_block
last_c = c
for i in range(1, n):
last_residual_block = self.inverted_residual_unit(input=last_residual_block,
num_in_filter=last_c,
num_filters=c,
ifshortcut=True,
stride=1,
filter_size=3,
padding=1,
expansion_factor=t)
return last_residual_block
def conv_bn(self, input, filter_size, num_filters, stride, padding, num_groups=1, act='relu', use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def extra_block(self, input, num_filters1, num_filters2, num_groups):
# 1x1 conv
pointwise_conv = self.conv_bn(input=input,
filter_size=1,
num_filters=int(num_filters1),
stride=1,
num_groups=int(num_groups),
padding=0)
# 3x3 conv
normal_conv = self.conv_bn(input=pointwise_conv,
filter_size=3,
num_filters=int(num_filters2),
stride=2,
num_groups=int(num_groups),
padding=1)
return normal_conv
def build_ssd(img, num_classes, img_shape):
ssd_model = MobileNetV2SSD(img, num_classes, img_shape)
return ssd_model.ssd_net()
if __name__ == '__main__':
data = fluid.data(name='data', shape=[None, 3, 300, 300])
build_ssd(data, 21, img_shape=[3, 300, 300])
|
9513
|
import sys
from os.path import dirname, abspath, join
cur_folder = dirname(abspath(__file__))
sys.path.insert(0, join(dirname(cur_folder), 'src'))
sys.path.insert(0, dirname(cur_folder))
print(cur_folder)
|
9523
|
import os
import logging
import dateutil
import pickle
from six.moves.urllib.parse import urlparse
from libtaxii import get_message_from_http_response, VID_TAXII_XML_11
from libtaxii.messages_11 import PollRequest, PollFulfillmentRequest
from libtaxii.messages_11 import PollResponse, generate_message_id
from libtaxii.clients import HttpClient
from certau import version_string
class SimpleTaxiiClient(HttpClient):
"""A simple interface to libtaxii for sending TAXII client messages.
Args:
username: a username for HTTP basic authentication
password: a password for HTTP basic authentication
key_file: a file containing a private key
(for SSL certificate-based authentication)
cert_file: a file containing a certificate
(for SSL certificate-based authentication)
ca_file: a file containing the CA's certificate
(for verifying the server's certificate)
"""
def __init__(self, username=None, password=<PASSWORD>,
key_file=None, cert_file=None, ca_file=None):
super(SimpleTaxiiClient, self).__init__()
self._logger = logging.getLogger()
self.username = username
self.password = password
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
def setup_authentication(self, use_ssl):
"""Setup the appropriate credentials and authentication type.
Initialises the authentication settings for the connection.
Args:
use_ssl: should this connection use SSL
"""
self.set_use_https(use_ssl)
credentials = dict()
if self.username and self.password:
credentials['username'] = self.username
credentials['password'] = self.password
if use_ssl and self.key_file and self.cert_file:
credentials['key_file'] = self.key_file
credentials['cert_file'] = self.cert_file
if credentials:
self.set_auth_credentials(credentials)
if self.username and self.password:
if use_ssl and self.key_file and self.cert_file:
self.set_auth_type(HttpClient.AUTH_CERT_BASIC)
self._logger.debug("TAXII authentication using private key "
"(%s), certificate (%s), and credentials "
"for user '%s'", self.key_file,
self.cert_file, self.username)
else:
self.set_auth_type(HttpClient.AUTH_BASIC)
self._logger.debug("TAXII authentication using credentials "
"for user '%s'", self.username)
elif use_ssl and self.key_file and self.cert_file:
self.set_auth_type(HttpClient.AUTH_CERT)
self._logger.debug("TAXII authentication using private key (%s) "
"and certificate (%s) only", self.key_file,
self.cert_file)
else:
self.set_auth_type(HttpClient.AUTH_NONE)
self._logger.debug("no TAXII authentication")
# CA certificate verification
if use_ssl and self.ca_file:
self.set_verify_server(verify_server=True, ca_file=self.ca_file)
self._logger.debug("SSL - verification using CA file (%s)",
self.ca_file)
@staticmethod
def create_poll_request(collection, subscription_id=None,
begin_timestamp=None, end_timestamp=None):
"""Create a poll request message using supplied parameters."""
request_kwargs = dict(
message_id=generate_message_id(),
collection_name=collection,
exclusive_begin_timestamp_label=begin_timestamp,
inclusive_end_timestamp_label=end_timestamp,
)
if subscription_id:
request_kwargs['subscription_id'] = subscription_id
else:
request_kwargs['poll_parameters'] = PollRequest.PollParameters()
return PollRequest(**request_kwargs)
@staticmethod
def create_fulfillment_request(collection, result_id, part_number):
return PollFulfillmentRequest(
message_id=generate_message_id(),
collection_name=collection,
result_id=result_id,
result_part_number=part_number,
)
def send_taxii_message(self, request, host, path, port):
# Send the request message and return the response
http_response = self.call_taxii_service2(
host=host,
path=path,
message_binding=VID_TAXII_XML_11,
post_data=request.to_xml(),
port=port,
user_agent='{} (libtaxii)'.format(version_string)
)
response = get_message_from_http_response(
http_response=http_response,
in_response_to=request.message_id,
)
return response
@staticmethod
def get_poll_time(filename, poll_url, collection):
if os.path.isfile(filename):
with open(filename, 'rb') as state_file:
poll_state = pickle.load(state_file)
if isinstance(poll_state, dict) and poll_url in poll_state:
if collection in poll_state[poll_url]:
time_string = poll_state[poll_url][collection]
return dateutil.parser.parse(time_string)
return None
@staticmethod
def save_poll_time(filename, poll_url, collection, timestamp):
if timestamp is not None:
poll_state = dict()
if os.path.isfile(filename):
with open(filename, 'rb') as state_file:
poll_state = pickle.load(state_file)
if not isinstance(poll_state, dict):
raise Exception('unexpected content encountered when '
'reading TAXII poll state file')
if poll_url not in poll_state:
poll_state[poll_url] = dict()
poll_state[poll_url][collection] = str(timestamp)
with open(filename, 'wb') as state_file:
pickle.dump(poll_state, state_file, protocol=2)
def poll(self, poll_url, collection, subscription_id=None,
begin_timestamp=None, end_timestamp=None, state_file=None):
"""Send the TAXII poll request to the server using the given URL."""
# Parse the poll_url to get the parts required by libtaxii
url_parts = urlparse(poll_url)
# Allow credentials to be provided in poll_url
if url_parts.username and url_parts.password:
self.username = url_parts.username
self.password = url_parts.password
self._logger.debug('updating username and password from poll_url')
if url_parts.scheme not in ['http', 'https']:
raise Exception('invalid scheme in poll_url (%s); expected '
'"http" or "https"', poll_url)
use_ssl = True if url_parts.scheme == 'https' else False
# Initialise the authentication settings
self.setup_authentication(use_ssl)
if state_file and not begin_timestamp:
begin_timestamp = self.get_poll_time(
filename=state_file,
poll_url=poll_url,
collection=collection,
)
request = self.create_poll_request(
collection=collection,
subscription_id=subscription_id,
begin_timestamp=begin_timestamp,
end_timestamp=end_timestamp,
)
self._logger.debug('sending poll request (url=%s, collection=%s)',
poll_url, collection)
response = self.send_taxii_message(
request=request,
host=url_parts.hostname,
path=url_parts.path,
port=url_parts.port,
)
first = True
poll_end_time = None
while True:
if not isinstance(response, PollResponse):
raise Exception('didn\'t get a poll response')
self._logger.debug('received poll response '
'(content_blocks=%d, result_id=%s, more=%s)',
len(response.content_blocks),
response.result_id,
'True' if response.more else 'False')
# Save end timestamp from first PollResponse
if first:
poll_end_time = response.inclusive_end_timestamp_label
if len(response.content_blocks) == 0:
if first:
self._logger.info('poll response contained '
'no content blocks')
break
for content_block in response.content_blocks:
yield content_block
if not response.more:
break
# Send a fulfilment request
if first:
# Initialise fulfilment request values
part_number = response.result_part_number
result_id = response.result_id
first = False
part_number += 1
request = self.create_fulfillment_request(
collection=collection,
result_id=result_id,
part_number=part_number,
)
self._logger.debug('sending fulfilment request '
'(result_id=%s, part_number=%d)',
result_id, part_number)
response = self.send_taxii_message(
request=request,
host=url_parts.hostname,
path=url_parts.path,
port=url_parts.port,
)
# Update the timestamp for the latest poll
if state_file and poll_end_time:
self.save_poll_time(
filename=state_file,
poll_url=poll_url,
collection=collection,
timestamp=poll_end_time,
)
|
9548
|
description = 'Mezei spin flipper using TTI power supply'
group = 'optional'
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(
dct1 = device('nicos.devices.entangle.PowerSupply',
description = 'current in first channel of supply (flipper current)',
tangodevice = tango_base + 'tti1/out1',
timeout = 1,
precision = 0.01,
),
dct2 = device('nicos.devices.entangle.PowerSupply',
description = 'current in second channel of supply (compensation current)',
tangodevice = tango_base + 'tti1/out2',
timeout = 1,
precision = 0.01,
),
flip = device('nicos.devices.polarized.MezeiFlipper',
description = 'Mezei flipper before sample (in shielding table)',
flip = 'dct1',
corr = 'dct2',
),
)
|
9563
|
import abc
import logging
from typing import Any, Dict, List, Optional
from homeassistant.components.water_heater import WaterHeaterEntity
from homeassistant.const import (
TEMP_FAHRENHEIT,
TEMP_CELSIUS
)
from gehomesdk import ErdCode, ErdMeasurementUnits
from ...const import DOMAIN
from .ge_erd_entity import GeEntity
_LOGGER = logging.getLogger(__name__)
class GeWaterHeater(GeEntity, WaterHeaterEntity, metaclass=abc.ABCMeta):
"""Mock temperature/operation mode supporting device as a water heater"""
@property
def heater_type(self) -> str:
raise NotImplementedError
@property
def operation_list(self) -> List[str]:
raise NotImplementedError
@property
def unique_id(self) -> str:
return f"{DOMAIN}_{self.serial_or_mac}_{self.heater_type}"
@property
def name(self) -> Optional[str]:
return f"{self.serial_or_mac} {self.heater_type.title()}"
@property
def temperature_unit(self):
measurement_system = self.appliance.get_erd_value(ErdCode.TEMPERATURE_UNIT)
if measurement_system == ErdMeasurementUnits.METRIC:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def supported_features(self):
raise NotImplementedError
|
9577
|
import sys
import socket
conn = socket.create_connection(('0.0.0.0', 8080))
msgs = [
# 0 Keep-Alive, Transfer-Encoding chunked
'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n',
# 1,2,3 Close, EOF "encoding"
'GET / HTTP/1.1\r\n\r\n',
'GET / HTTP/1.1\r\nConnection: close\r\n\r\n',
'GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n',
# 4 Bad Request
'GET /%20%20% HTTP/1.1\r\n\r\n',
# 5 Bug #14
'GET /%20abc HTTP/1.0\r\n\r\n',
# 6 Content-{Length, Type}
'GET / HTTP/1.0\r\nContent-Length: 11\r\n'
'Content-Type: text/blah\r\nContent-Fype: bla\r\n'
'Content-Tength: bla\r\n\r\nhello world',
# 7 POST memory leak
'POST / HTTP/1.0\r\nContent-Length: 1000\r\n\r\n%s' % ('a'*1000),
# 8,9 CVE-2015-0219
'GET / HTTP/1.1\r\nFoo_Bar: bad\r\n\r\n',
'GET / HTTP/1.1\r\nFoo-Bar: good\r\nFoo_Bar: bad\r\n\r\n'
]
conn.send(msgs[int(sys.argv[1])].encode())
while 1:
data = conn.recv(100)
if not data: break
print(repr(data))
if data.endswith(b'0\r\n\r\n'):
if raw_input('new request? Y/n') == 'n':
exit()
conn.send(b'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n')
|
9586
|
BIG_CONSTANT = "YES"
def group_by(xs, grouper):
groups = {}
for x in xs:
group = grouper(x)
if group not in groups:
groups[group] = []
groups[group].append(x)
return groups
print(group_by([1, 2, 3, 4, 5, 6], lambda x: "even" if x % 2 == 0 else "odd"))
|
9591
|
from typing import Optional
import pandas as pd
from ruptures import Binseg
from ruptures.base import BaseCost
from sklearn.linear_model import LinearRegression
from etna.transforms.base import PerSegmentWrapper
from etna.transforms.decomposition.change_points_trend import BaseEstimator
from etna.transforms.decomposition.change_points_trend import TDetrendModel
from etna.transforms.decomposition.change_points_trend import _OneSegmentChangePointsTrendTransform
class _OneSegmentTrendTransform(_OneSegmentChangePointsTrendTransform):
"""_OneSegmentTrendTransform adds trend as a feature."""
def __init__(
self,
in_column: str,
out_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _OneSegmentTrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column
change_point_model:
model to get trend change points
detrend_model:
model to get trend from data
change_point_model_predict_params:
params for change_point_model predict method
"""
self.out_column = out_column
super().__init__(
in_column=in_column,
change_point_model=change_point_model,
detrend_model=detrend_model,
**change_point_model_predict_params,
)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Add column with trend, got from the detrend_model.
Parameters
----------
df:
data to get trend from
Returns
-------
pd.DataFrame:
df with trend column
"""
df._is_copy = False
series = df[self.in_column]
trend_series = self._predict_per_interval_model(series=series)
df[self.out_column] = trend_series
return df
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Inverse transform dataframe.
Parameters
----------
df:
one segment dataframe
Returns
-------
pd.DataFrame:
given dataframe
"""
return df
class _TrendTransform(PerSegmentWrapper):
"""_TrendTransform adds trend as a feature. Creates column 'regressor_<in_column>_trend'."""
def __init__(
self,
in_column: str,
out_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _TrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column
change_point_model:
model to get trend change points
detrend_model:
model to get trend in data
change_point_model_predict_params:
params for change_point_model predict method
"""
super().__init__(
transform=_OneSegmentTrendTransform(
in_column=in_column,
out_column=out_column,
change_point_model=change_point_model,
detrend_model=detrend_model,
**change_point_model_predict_params,
)
)
class TrendTransform(_TrendTransform):
"""TrendTransform adds trend as a feature.
TrendTransform uses Binseg model as a change point detection model in _TrendTransform.
"""
def __init__(
self,
in_column: str,
out_column: Optional[str] = None,
detrend_model: TDetrendModel = LinearRegression(),
model: str = "ar",
custom_cost: Optional[BaseCost] = None,
min_size: int = 2,
jump: int = 1,
n_bkps: int = 5,
pen: Optional[float] = None,
epsilon: Optional[float] = None,
):
"""Init TrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column. Don't forget to add regressor prefix if necessary.
If not given, use 'regressor_{self.__repr__()}'
detrend_model:
model to get trend in data
model:
binseg segment model, ["l1", "l2", "rbf",...]. Not used if 'custom_cost' is not None.
custom_cost:
binseg custom cost function
min_size:
minimum segment length necessary to decide it is a stable trend segment
jump:
jump value can speed up computations: if jump==k, the algo will use every k-th value for change points search.
n_bkps:
number of change points to find
pen:
penalty value (>0)
epsilon:
reconstruction budget (>0)
"""
self.in_column = in_column
self.out_column = out_column
self.detrend_model = detrend_model
self.model = model
self.custom_cost = custom_cost
self.min_size = min_size
self.jump = jump
self.n_bkps = n_bkps
self.pen = pen
self.epsilon = epsilon
super().__init__(
in_column=self.in_column,
out_column=self.out_column if self.out_column is not None else f"regressor_{self.__repr__()}",
change_point_model=Binseg(
model=self.model, custom_cost=self.custom_cost, min_size=self.min_size, jump=self.jump
),
detrend_model=self.detrend_model,
n_bkps=self.n_bkps,
pen=self.pen,
epsilon=self.epsilon,
)
|
9616
|
from __future__ import division
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import sys
import os
import time
#
# TORCH INSTALLATION: refer to https://pytorch.org/get-started/locally/
#
def update_progress(job_title, progress):
length = 20 # modify this to change the length
block = int(round(length*progress))
msg = "\r{0}: [{1}] {2}%".format(job_title, "#"*block + "-"*(length-block), round(progress*100, 2))
if progress >= 1: msg += " DONE\r\n"
sys.stdout.write(msg)
sys.stdout.flush()
def cls():
os.system('cls' if os.name=='nt' else 'clear')
cls()
################################################################################################################
# Initialize torch tensor for coordiantes
coords_data = [[ 0.0 , 0.0 , 0.0 ],
[ 1.0/(2.0**0.5), 0.0 , 1.0/(2.0**0.5)],
[ 1.0/(2.0**0.5), 0.0 ,-1.0/(2.0**0.5)],
[ 2.0**0.5 , 0.0 , 0.0 ],
[ 0.0 , 1.0 , 0.0 ],
[ 1.0/(2.0**0.5), 1.0 , 1.0/(2.0**0.5)],
[ 1.0/(2.0**0.5), 1.0 ,-1.0/(2.0**0.5)],
[ 2.0**0.5 , 1.0 , 0.0 ],
]
coords = torch.tensor(coords_data,requires_grad=True,dtype=torch.float64)
nnodes_r = coords.size(0)
nnodes_ie = 8
nnodes_if = 4
nterms_s = 8
ndirs = 3
coord_sys = 'CARTESIAN'
# Define matrix of polynomial basis terms at support nodes
val_r_data = [[ 1.0,-1.0,-1.0,-1.0, 1.0, 1.0, 1.0,-1.0],
[ 1.0,-1.0,-1.0, 1.0,-1.0,-1.0, 1.0, 1.0],
[ 1.0, 1.0,-1.0,-1.0,-1.0, 1.0,-1.0, 1.0],
[ 1.0, 1.0,-1.0, 1.0, 1.0,-1.0,-1.0,-1.0],
[ 1.0,-1.0, 1.0,-1.0, 1.0,-1.0,-1.0, 1.0],
[ 1.0,-1.0, 1.0, 1.0,-1.0, 1.0,-1.0,-1.0],
[ 1.0, 1.0, 1.0,-1.0,-1.0,-1.0, 1.0,-1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
]
val_r = torch.tensor(val_r_data,requires_grad=False,dtype=torch.float64)
# Define matrices at interpolation nodes (quadrature, level = 1)
val_i_data = [[ 1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0, 1.0/3.0,-1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0, 1.0/3.0, 1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0,-1.0/3.0, 1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0,-1.0/3.0,-1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0,-1.0/3.0, 1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0,-1.0/3.0,-1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0, 1.0/3.0,-1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0, 1.0/3.0, 1.0/3.0*np.sqrt(1.0/3.0)],
]
val_i = torch.tensor(val_i_data,requires_grad=False,dtype=torch.float64)
ddxi_i_data = [[ 0.0,0.0,0.0,1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),0.0, 1.0/3.0],
]
ddxi_i = torch.tensor(ddxi_i_data,requires_grad=False,dtype=torch.float64)
ddeta_i_data = [[ 0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),0.0,-np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),0.0,-np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),0.0,-np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),0.0,-np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),0.0, np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),0.0, np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),0.0, np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),0.0, np.sqrt(1.0/3.0), 1.0/3.0],
]
ddeta_i = torch.tensor(ddeta_i_data,requires_grad=False,dtype=torch.float64)
ddzeta_i_data= [[ 0.0,0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0],
]
ddzeta_i = torch.tensor(ddzeta_i_data,requires_grad=False,dtype=torch.float64)
# Define element interpolation nodes weights for linear element
weights_e_data = [1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]
weights_e = torch.tensor(weights_e_data,requires_grad=False,dtype=torch.float64)
# Define val_f for each face
# Face 1, XI_MIN
val_1_data = [[ 1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0],
[ 1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0],
]
val_1 = torch.tensor(val_1_data,requires_grad=False,dtype=torch.float64)
# Face 2, XI_MAX
val_2_data = [[ 1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0],
[ 1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0],
]
val_2 = torch.tensor(val_2_data,requires_grad=False,dtype=torch.float64)
# Face 3, ETA_MIN
val_3_data = [[ 1.0,-1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0, np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,-1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0, np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0,-np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0,-np.sqrt(1.0/3.0),-1.0/3.0],
]
val_3 = torch.tensor(val_3_data,requires_grad=False,dtype=torch.float64)
# Face 4, ETA_MAX
val_4_data = [[ 1.0,1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0,-np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0,-np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0, np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0, np.sqrt(1.0/3.0), 1.0/3.0],
]
val_4 = torch.tensor(val_4_data,requires_grad=False,dtype=torch.float64)
# Face 5, ZETA_MIN
val_5_data = [[ 1.0,-np.sqrt(1.0/3.0),-1.0,-np.sqrt(1.0/3.0), 1.0/3.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,-np.sqrt(1.0/3.0),-1.0, np.sqrt(1.0/3.0),-1.0/3.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),-1.0,-np.sqrt(1.0/3.0),-1.0/3.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),-1.0, np.sqrt(1.0/3.0), 1.0/3.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0],
]
val_5 = torch.tensor(val_5_data,requires_grad=False,dtype=torch.float64)
# Face 6, ZETA_MAX
val_6_data = [[ 1.0,-np.sqrt(1.0/3.0),1.0,-np.sqrt(1.0/3.0), 1.0/3.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-np.sqrt(1.0/3.0),1.0, np.sqrt(1.0/3.0),-1.0/3.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),1.0,-np.sqrt(1.0/3.0),-1.0/3.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),1.0, np.sqrt(1.0/3.0), 1.0/3.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0],
]
val_6 = torch.tensor(val_6_data,requires_grad=False,dtype=torch.float64)
#--------------------------------------------------------------------
# Matrix modes_to_nodes
val_r_inv = torch.inverse(val_r)
# Computes coordiantes modes
coords_modes = torch.mm(val_r_inv,coords)
# Initialized coordiantes
interp_coords = torch.mm(val_i,coords_modes)
# Initialized jacobian
jacobian = torch.empty(3,3,nnodes_ie, dtype=torch.float64)
for inode in range(0,nnodes_ie):
jacobian[0,0,inode] = torch.dot(ddxi_i[inode,:] , coords_modes[:,0])
jacobian[0,1,inode] = torch.dot(ddeta_i[inode,:] , coords_modes[:,0])
jacobian[0,2,inode] = torch.dot(ddzeta_i[inode,:] , coords_modes[:,0])
jacobian[1,0,inode] = torch.dot(ddxi_i[inode,:] , coords_modes[:,1])
jacobian[1,1,inode] = torch.dot(ddeta_i[inode,:] , coords_modes[:,1])
jacobian[1,2,inode] = torch.dot(ddzeta_i[inode,:] , coords_modes[:,1])
jacobian[2,0,inode] = torch.dot(ddxi_i[inode,:] , coords_modes[:,2])
jacobian[2,1,inode] = torch.dot(ddeta_i[inode,:] , coords_modes[:,2])
jacobian[2,2,inode] = torch.dot(ddzeta_i[inode,:] , coords_modes[:,2])
update_progress("Computing Jacobian ", inode/(nnodes_ie-1))
if coord_sys == 'CYLINDRICAL':
scaling_factor = torch.mm(val_i,coords_modes[:,0])
for inode in range(0,nnodes_ie):
jacobian[1,0,inode] = jacobian[1,0,inode] * scaling_factor[inode]
jacobian[1,1,inode] = jacobian[1,1,inode] * scaling_factor[inode]
jacobian[1,2,inode] = jacobian[1,2,inode] * scaling_factor[inode]
# Matrics and Determinant
metrics = torch.empty(3,3,nnodes_ie, dtype=torch.float64)
jinv = torch.empty(nnodes_ie, dtype=torch.float64)
for inode in range(0,nnodes_ie):
ijacobian = torch.empty(3,3, dtype=torch.float64)
imetric = torch.empty(3,3, dtype=torch.float64)
for irow in range(0,3):
for icol in range(0,3):
ijacobian[irow,icol] = jacobian[irow,icol,inode]
# Compute jacobian for the ith node
update_progress("Computing Jinv and Metric ", inode/(nnodes_ie-1))
jinv[inode] = torch.det(ijacobian)
imetric = torch.inverse(ijacobian)
for irow in range(0,3):
for icol in range(0,3):
metrics[irow,icol,inode] = imetric[irow,icol]
# Compute inverse Mass matrix
invmass = torch.empty(nterms_s,nterms_s,nnodes_ie, dtype=torch.float64)
mass = torch.empty(nterms_s,nterms_s,nnodes_ie, dtype=torch.float64)
val_tmp = torch.empty(nterms_s,nnodes_ie, dtype=torch.float64)
i = 1
for iterm in range(0,nterms_s):
for inode in range(0,nnodes_ie):
val_tmp[inode,iterm] = val_i[inode,iterm] * weights_e[inode] * jinv[inode]
update_progress("Computing invmass ", i/(nterms_s*nnodes_ie))
i += 1
mass = torch.mm(torch.t(val_tmp),val_i)
invmass = torch.inverse(mass)
# Compute BR2_VOL for each face
br2_vol_face1 = torch.mm(val_i,torch.mm(invmass,torch.t(val_1)))
br2_vol_face2 = torch.mm(val_i,torch.mm(invmass,torch.t(val_2)))
br2_vol_face3 = torch.mm(val_i,torch.mm(invmass,torch.t(val_3)))
br2_vol_face4 = torch.mm(val_i,torch.mm(invmass,torch.t(val_4)))
br2_vol_face5 = torch.mm(val_i,torch.mm(invmass,torch.t(val_5)))
br2_vol_face6 = torch.mm(val_i,torch.mm(invmass,torch.t(val_6)))
update_progress("Computing br2_vol ", 1)
# Compute BR2_FACE for each face
br2_face_face1 = torch.mm(val_1,torch.mm(invmass,torch.t(val_1)))
br2_face_face2 = torch.mm(val_2,torch.mm(invmass,torch.t(val_2)))
br2_face_face3 = torch.mm(val_3,torch.mm(invmass,torch.t(val_3)))
br2_face_face4 = torch.mm(val_4,torch.mm(invmass,torch.t(val_4)))
br2_face_face5 = torch.mm(val_5,torch.mm(invmass,torch.t(val_5)))
br2_face_face6 = torch.mm(val_6,torch.mm(invmass,torch.t(val_6)))
update_progress("Computing br2_face ", 1)
# Grad1, Grad2, and Grad3
grad1 = torch.empty(nnodes_ie,nterms_s, dtype=torch.float64)
grad2 = torch.empty(nnodes_ie,nterms_s, dtype=torch.float64)
grad3 = torch.empty(nnodes_ie,nterms_s, dtype=torch.float64)
i = 1
for iterm in range(0,nterms_s):
for inode in range(0,nnodes_ie):
grad1[inode,iterm] = metrics[0,0,inode] * ddxi_i[inode,iterm] + metrics[1,0,inode] * ddeta_i[inode,iterm] + metrics[2,0,inode] * ddzeta_i[inode,iterm]
grad2[inode,iterm] = metrics[0,1,inode] * ddxi_i[inode,iterm] + metrics[1,1,inode] * ddeta_i[inode,iterm] + metrics[2,1,inode] * ddzeta_i[inode,iterm]
grad3[inode,iterm] = metrics[0,2,inode] * ddxi_i[inode,iterm] + metrics[1,2,inode] * ddeta_i[inode,iterm] + metrics[2,2,inode] * ddzeta_i[inode,iterm]
update_progress("Computing grad1, grad2, grad3 ", i/(nnodes_ie*nterms_s))
i += 1
#WRITE_____________________
#
# Metrics
#
f = open("metrics.txt","w")
i = 1
for inode in range (0,nnodes_ie):
f.write("Metric interpolation node %d \n" % (inode+1))
array = np.zeros([3, 3])
for irow in range(0,3):
for icol in range(0,3):
array[irow,icol] = metrics[irow,icol,inode].item()
update_progress("Writing metrics to file ", i/(nnodes_ie*9))
i += 1
np.savetxt(f,array)
f.close()
#
# jinv
#
f = open("jinv.txt","w")
array = np.zeros([1])
i = 1
for inode in range (0,nnodes_ie):
f.write("Jinv interpolation node %d \n" % (inode+1))
array[0] = jinv[inode].item()
np.savetxt(f,array)
update_progress("Writing jinv to file ", i/(nnodes_ie))
i += 1
f.close()
#
# Grad1
#
f = open("grad1.txt","w")
f.write("Grad1 \n")
array = np.zeros([nnodes_ie,nterms_s])
i = 1
for inode in range (0,nnodes_ie):
for iterm in range(0,nterms_s):
array[inode,iterm] = grad1[inode,iterm].item()
update_progress("Writing grad1 to file ", i/(nnodes_ie*nterms_s))
i += 1
np.savetxt(f,array)
f.close()
#
# Grad2
#
f = open("grad2.txt","w")
f.write("Grad2 \n")
array = np.zeros([nnodes_ie,nterms_s])
i = 1
for inode in range (0,nnodes_ie):
for iterm in range(0,nterms_s):
array[inode,iterm] = grad2[inode,iterm].item()
update_progress("Writing grad2 to file ", i/(nnodes_ie*nterms_s))
i += 1
np.savetxt(f,array)
f.close()
#
# Grad3
#
f = open("grad3.txt","w")
f.write("Grad3 \n")
array = np.zeros([nnodes_ie,nterms_s])
i = 1
for inode in range (0,nnodes_ie):
for iterm in range(0,nterms_s):
array[inode,iterm] = grad3[inode,iterm].item()
update_progress("Writing grad3 to file ", i/(nnodes_ie*nterms_s))
i += 1
np.savetxt(f,array)
f.close()
#
# dmetric_dx
#
f = open("dmetric_dx.txt","w")
i = 1
for inode in range (0,nnodes_ie):
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
array = np.zeros([3,3])
f.write("dmetric_dx interpolation node %s, diff_node %s, diff_dir %s \n" % (inode+1,inode_diff+1,idir+1))
for irow in range(0,3):
for icol in range(0,3):
data = metrics[irow,icol,inode]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dmetric_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*3*3))
# This avoid to accumulate derivatives
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# interp_coords_dx
#
f = open("dinterp_xcoords_dx.txt","w")
i = 1
f.write("xcoord interpolation, coord 1, row=node, col=nnodes_r*dir \n")
array = np.zeros([nnodes_ie,nnodes_r*ndirs])
for inode in range (0,nnodes_ie):
data = interp_coords[inode,0]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
if idir == 0:
index = inode_diff
elif idir == 1:
index = nnodes_r + inode_diff
elif idir == 2:
index = 2*nnodes_r + inode_diff
array[inode,index] = ddata_np[inode_diff,idir]
update_progress("Writing interp_xcoords_dx to file ", i/(nnodes_ie*nnodes_r*3))
i += 1
# This avoid to accumulate derivatives
dummy = coords.grad.data.zero_()
np.savetxt(f,array)
f.close()
f = open("dinterp_ycoords_dx.txt","w")
i = 1
f.write("ycoord interpolation, coord 2, row=node, col=nnodes_r*dir \n")
array = np.zeros([nnodes_ie,nnodes_r*ndirs])
for inode in range (0,nnodes_ie):
data = interp_coords[inode,1]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
if idir == 0:
index = inode_diff
elif idir == 1:
index = nnodes_r + inode_diff
elif idir == 2:
index = 2*nnodes_r + inode_diff
array[inode,index] = ddata_np[inode_diff,idir]
update_progress("Writing interp_ycoords_dx to file ", i/(nnodes_ie*nnodes_r*3))
i += 1
# This avoid to accumulate derivatives
dummy = coords.grad.data.zero_()
np.savetxt(f,array)
f.close()
f = open("dinterp_zcoords_dx.txt","w")
i = 1
f.write("zcoord interpolation, coord 3, row=node, col=nnodes_r*dir \n")
array = np.zeros([nnodes_ie,nnodes_r*ndirs])
for inode in range (0,nnodes_ie):
data = interp_coords[inode,2]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
if idir == 0:
index = inode_diff
elif idir == 1:
index = nnodes_r + inode_diff
elif idir == 2:
index = 2*nnodes_r + inode_diff
array[inode,index] = ddata_np[inode_diff,idir]
update_progress("Writing interp_zcoords_dx to file ", i/(nnodes_ie*nnodes_r*3))
i += 1
# This avoid to accumulate derivatives
dummy = coords.grad.data.zero_()
np.savetxt(f,array)
f.close()
#
# djinv_dx
#
f = open("djinv_dx.txt","w")
i = 1
for inode in range (0,nnodes_ie):
array = np.zeros([nnodes_r,ndirs])
f.write("djinv_dx interpolation node %s, row=inode_diff, col=dir \n" % (inode+1))
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
data = jinv[inode]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[inode_diff,idir] = ddata_np[inode_diff,idir]
update_progress("Writing djinv_dx to file ", i/(nnodes_ie*nnodes_r*ndirs))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dmass_dx
#
f = open("dmass_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dmass_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nterms_s,nterms_s])
for irow in range(0,nterms_s):
for icol in range(0,nterms_s):
data = mass[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dmass_dx to file ", i/(nterms_s*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dinvmass_dx
#
f = open("dinvmass_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dinvmass_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nterms_s,nterms_s])
for irow in range(0,nterms_s):
for icol in range(0,nterms_s):
data = invmass[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dinvmass_dx to file ", i/(nterms_s*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dbr2_vol_dx
#
#
f = open("dbr2_vol_face1_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face1_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face1[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face1_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face2_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face2_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face2[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face2_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face3_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face3_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face3[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face3_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face4_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face4_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face4[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face4_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face5_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face5_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face5[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face5_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face6_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face6_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face6[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face6_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dbr2_face_dx
#
#
f = open("dbr2_face_face1_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face1_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face1[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face1_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face2_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face2_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face2[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face2_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face3_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face3_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face3[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face3_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face4_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face4_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face4[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face4_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face5_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face5_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face5[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face5_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face6_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face6_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face6[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face6_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dgrad1_dx
#
f = open("dgrad1_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dgrad1_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nterms_s])
for irow in range(0,nnodes_ie):
for icol in range(0,nterms_s):
data = grad1[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dgrad1_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dgrad2_dx
#
f = open("dgrad2_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dgrad2_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nterms_s])
for irow in range(0,nnodes_ie):
for icol in range(0,nterms_s):
data = grad2[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dgrad2_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dgrad3_dx
#
f = open("dgrad3_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dgrad3_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nterms_s])
for irow in range(0,nnodes_ie):
for icol in range(0,nterms_s):
data = grad3[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dgrad3_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
|
9637
|
from importlib import _bootstrap
from . import util
import collections
import imp
import sys
import unittest
class PathHookTests(unittest.TestCase):
"""Test the path hook for extension modules."""
# XXX Should it only succeed for pre-existing directories?
# XXX Should it only work for directories containing an extension module?
def hook(self, entry):
return _bootstrap._file_path_hook(entry)
def test_success(self):
# Path hook should handle a directory where a known extension module
# exists.
self.assertTrue(hasattr(self.hook(util.PATH), 'find_module'))
def test_main():
from test.support import run_unittest
run_unittest(PathHookTests)
if __name__ == '__main__':
test_main()
|
9687
|
from typing import Dict, Union
from graphql import (
GraphQLBoolean,
GraphQLFloat,
GraphQLInputField,
GraphQLInt,
GraphQLList,
GraphQLNonNull,
GraphQLScalarType,
GraphQLString,
)
from sqlalchemy import ARRAY, Boolean, Float, Integer
from sqlalchemy.dialects.postgresql import ARRAY as PGARRAY
from sqlalchemy.types import TypeEngine
def get_graphql_type_from_column(column_type: TypeEngine) -> Union[GraphQLScalarType, GraphQLList]:
if isinstance(column_type, Integer):
return GraphQLInt
if isinstance(column_type, Float):
return GraphQLFloat
if isinstance(column_type, Boolean):
return GraphQLBoolean
if isinstance(column_type, (ARRAY, PGARRAY)):
return GraphQLList(get_graphql_type_from_column(column_type.item_type))
return GraphQLString
def get_base_comparison_fields(graphql_type: Union[GraphQLScalarType, GraphQLList]) -> Dict[str, GraphQLInputField]:
return {
"_eq": GraphQLInputField(graphql_type),
"_neq": GraphQLInputField(graphql_type),
"_in": GraphQLInputField(GraphQLList(GraphQLNonNull(graphql_type))),
"_nin": GraphQLInputField(GraphQLList(GraphQLNonNull(graphql_type))),
"_lt": GraphQLInputField(graphql_type),
"_gt": GraphQLInputField(graphql_type),
"_gte": GraphQLInputField(graphql_type),
"_lte": GraphQLInputField(graphql_type),
"_is_null": GraphQLInputField(GraphQLBoolean),
}
def get_string_comparison_fields() -> Dict[str, GraphQLInputField]:
return {"_like": GraphQLInputField(GraphQLString), "_nlike": GraphQLInputField(GraphQLString)}
|
9734
|
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.auth.views import (LoginView, PasswordResetConfirmView,
PasswordResetView)
from django.http import HttpResponse, HttpResponseNotAllowed
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, UpdateView
from users.forms import (SignInForm, SignUpForm, UserPasswordResetForm,
UserProfileForm, UserSetPasswordForm)
from users.mixins import LockDuringEditMixin
from users.models import Lock, UserSession
class SignUp(CreateView):
model = User
form_class = SignUpForm
template_name = "registration/signup.html"
success_url = reverse_lazy("dashboard:dashboard")
class SignIn(LoginView):
form_class = SignInForm
class Profile(LoginRequiredMixin, LockDuringEditMixin, UpdateView):
model = User
form_class = UserProfileForm
template_name = "registration/profile.html"
success_url = reverse_lazy("users:profile")
def get_object(self):
return self.request.user
def form_valid(self, form):
response = super().form_valid(form)
update_session_auth_hash(self.request, self.object) # this will delete the current user session
# and create anew
UserSession.objects.create(user=self.object, session_id=self.request.session.session_key)
return response
class UserPasswordResetView(PasswordResetView):
form_class = UserPasswordResetForm
class UserPasswordResetConfirmView(PasswordResetConfirmView):
form_class = UserSetPasswordForm
def unlock(request, pk):
if request.method == "POST":
lock = Lock.objects.filter(pk=pk).delete()
return HttpResponse('')
return HttpResponseNotAllowed(["POST"])
|
9771
|
import io
import hashlib
import logging
import os
import struct
import random
from HintList import getHint, getHintGroup, Hint
from Utils import local_path
#builds out general hints based on location and whether an item is required or not
def buildGossipHints(world, rom):
stoneAddresses = [0x938e4c, 0x938EA8, 0x938F04, 0x938F60, 0x938FBC, 0x939018, 0x939074, 0x9390D0, 0x93912C, 0x939188,
0x9391E4, 0x939240, 0x93929C, 0x9392F8, 0x939354, 0x9393B0, 0x93940C, 0x939468, 0x9394C4, 0x939520,
0x93957C, 0x9395D8, 0x939634, 0x939690, 0x9396EC, 0x939748, 0x9397A4, 0x939800, 0x93985C, 0x9398B8,
0x939914, 0x939970] #address for gossip stone text boxes, byte limit is 92
alwaysLocations = getHintGroup('alwaysLocation')#These location will always have a hint somewhere in the world.
sometimesSpace = (int((len(stoneAddresses) - len(alwaysLocations)*2)/2))
sometimesLocations = getHintGroup('location')#A random selection of these locations will be in the hint pool.
random.shuffle(sometimesLocations)
sometimesLocations = sometimesLocations[0:sometimesSpace]
hintList = alwaysLocations
hintList.extend(alwaysLocations)
hintList.extend(sometimesLocations)
locationData = []
for hint in hintList:
for locationWorld in world.get_locations():
if hint.name == locationWorld.name:
locationData.extend([locationWorld])
#hopefully fixes weird VC error where the last character from a previous text box would sometimes spill over into the next box.
for address in range(stoneAddresses[0], 0x9399D8):
rom.write_byte(address, 0x08)
#shuffles the stone addresses for randomization, always locations will be placed first and twice
random.shuffle(stoneAddresses)
#loops through shuffled locations and addresses and builds hint.
while locationData:
currentLoc = locationData.pop(0)
Block_code = getBytes((getHint(currentLoc.name).text))
if currentLoc.item.type == 'Map' or currentLoc.item.type == 'Compass' or currentLoc.item.type == 'BossKey' or currentLoc.item.type == 'SmallKey':
Block_code.extend(getBytes((getHint(currentLoc.item.type).text)))
else:
Block_code.extend(getBytes((getHint(currentLoc.item.name).text)))
endText(Block_code)
if len(Block_code) > 92:
print('Too many characters in hint')
Block_code = getBytes("I am Error.")
Block_code.extend(getBytes(currentLoc.name))
Block_code.extend(getBytes('&'))
Block_code.extend(getBytes(currentLoc.item.name))
rom.write_bytes(stoneAddresses.pop(0), Block_code)
junkHints = getHintGroup('junkHint')
random.shuffle(junkHints)
while stoneAddresses:
junkHint = junkHints.pop()
Block_code = getBytes(junkHint.text)
endText(Block_code)
rom.write_bytes(stoneAddresses.pop(0), Block_code)
return rom
# builds boss reward text that is displayed at the temple of time altar for child and adult, pull based off of item in a fixed order.
def buildBossRewardHints(world, rom):
bossRewardsSpiritualStones = ['Kokiri Emerald', 'Goron Ruby', 'Zora Sapphire']
bossRewardsMedallions = ['Forest Medallion', 'Fire Medallion', 'Water Medallion', 'Shadow Medallion', 'Spirit Medallion', 'Light Medallion']
# text that appears at altar as a child.
Block_code = []
Block_code = getBytes(getHint('Spiritual Stone Text Start').text)
for reward in bossRewardsSpiritualStones:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Spiritual Stone Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95ED95, Block_code)
# text that appears at altar as an adult.
Block_code = []
for reward in bossRewardsMedallions:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Medallion Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95DB94, Block_code)
return rom
# pulls text string from hintlist for reward after sending the location to hintlist.
def buildBossString(Block_code, reward, world):
for location in world.get_locations():
if location.item.name == reward:
Block_code.extend([0x08])
Block_code.extend(getBytes(getHint(location.name).text))
return Block_code
# alternates through color set commands in child and adult boss reward hint strings setting the colors at the start of the string to correspond with the reward found at the location.
# skips over color commands at the end of stings to set color back to white.
def setRewardColor(Block_code):
rewardColors = [0x42, 0x41, 0x43, 0x45, 0x46, 0x44]
colorWhite = True
for i, byte in enumerate(Block_code):
if byte == 0x05 and colorWhite:
Block_code[i + 1] = rewardColors.pop(0)
colorWhite = False
elif byte == 0x05 and not colorWhite:
colorWhite = True
return Block_code
#sets the end of text byte in the text box.
def endText(byteArray):
return byteArray.extend([0x02])
# reads array of characters and converts them to an array of bytes.
def getBytes(string):
byteCode = []
for char in string:
if char == '^':
byteCode.extend([0x04])#box break
elif char == '&':
byteCode.extend([0x01])#new line
elif char == '@':
byteCode.extend([0x0F])#print player name
elif char == '#':
byteCode.extend([0x05, 0x40]) #sets color to white
else:
char = char.encode('utf-8')
char = char.hex()
byte = int('0x' + char, 16)
byteCode.extend([byte])
return byteCode
|
9790
|
import os
import pickle
import time
import timeit
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torch
import tempfile
import horovod.torch as hvd
from horovod.ray import RayExecutor
from ray_shuffling_data_loader.torch_dataset import (TorchShufflingDataset)
from ray_shuffling_data_loader.data_generation import (generate_data,
DATA_SPEC)
import argparse
DEFAULT_DATA_DIR = "s3://shuffling-data-loader-benchmarks/data/"
numpy_to_torch_dtype = {
np.bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128
}
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--test-batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help=("how many batches to wait before logging training "
"status"))
parser.add_argument(
"--fp16-allreduce",
action="store_true",
default=False,
help="use fp16 compression during allreduce")
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction")
parser.add_argument(
"--gradient-predivide-factor",
type=float,
default=1.0,
help=("apply gradient predivide factor in optimizer "
"(default: 1.0)"))
parser.add_argument("--num-workers", type=int, default=None)
parser.add_argument("--num-hosts", type=int, default=None)
parser.add_argument("--num-workers-per-host", type=int, default=None)
parser.add_argument("--cpus-per-worker", type=int, default=1)
parser.add_argument("--mock-train-step-time", type=float, default=1.0)
# Synthetic training data generation settings.
parser.add_argument("--cache-files", action="store_true", default=False)
parser.add_argument("--num-rows", type=int, default=2 * (10**7))
parser.add_argument("--num-files", type=int, default=25)
parser.add_argument("--max-row-group-skew", type=float, default=0.0)
parser.add_argument("--num-row-groups-per-file", type=int, default=5)
parser.add_argument("--data-dir", type=str, default=DEFAULT_DATA_DIR)
# Shuffling data loader settings.
parser.add_argument("--num-reducers", type=int, default=32)
parser.add_argument("--max-concurrent-epochs", type=int, default=2)
parser.add_argument("--address", default="auto")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_main(args, filenames):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if torch.cuda.is_available() and not args.no_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
rank = hvd.rank()
train_dataset = create_dataset(
filenames,
batch_size=args.batch_size,
rank=rank,
num_epochs=args.epochs,
world_size=hvd.size(),
num_reducers=args.num_reducers,
max_concurrent_epochs=args.max_concurrent_epochs)
model = Net()
# By default, Adasum doesn"t need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if torch.cuda.is_available() and not args.no_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(
model.parameters(), lr=args.lr * lr_scaler, momentum=args.momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = (hvd.Compression.fp16
if args.fp16_allreduce else hvd.Compression.none)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average,
gradient_predivide_factor=args.gradient_predivide_factor)
def _train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_dataset.set_epoch(epoch)
start_epoch = timeit.default_timer()
last_batch_time = start_epoch
batch_wait_times = []
for batch_idx, (data, target) in enumerate(train_dataset):
batch_wait_times.append(timeit.default_timer() - last_batch_time)
if torch.cuda.is_available() and not args.no_cuda:
if isinstance(data, list):
data = [t.cuda() for t in data]
target = target.cuda()
optimizer.zero_grad()
# output = model(data)
if batch_idx % args.log_interval == 0:
print(
f"Processing batch {batch_idx} in epoch {epoch} on worker "
f"{rank}.")
time.sleep(args.mock_train_step_time)
# TODO(Clark): Add worker synchronization barrier here.
# loss = F.nll_loss(output, target)
# loss.backward()
# optimizer.step()
last_batch_time = timeit.default_timer()
epoch_duration = timeit.default_timer() - start_epoch
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nEpoch {epoch}, worker {rank} stats over "
f"{len(batch_wait_times)} steps: {epoch_duration:.3f}")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
return batch_wait_times
print(f"Starting training on worker {rank}.")
batch_wait_times = []
for epoch in range(args.epochs):
batch_wait_times.extend(_train(epoch))
batch_wait_times.pop(0)
print(f"Done training on worker {rank}.")
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nWorker {rank} training stats over {args.epochs} epochs:")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
# TODO(Clark): Add logic to the dataset abstraction so we don't have to do
# this.
if rank == 0:
print("Waiting in rank 0 worker to let other workers consume queue...")
time.sleep(10)
print("Done waiting in rank 0 worker.")
def create_dataset(filenames, *, batch_size, rank, num_epochs, world_size,
num_reducers, max_concurrent_epochs):
print(f"Creating Torch shuffling dataset for worker {rank} with "
f"{batch_size} batch size, {num_epochs} epochs, {num_reducers} "
f"reducers, and {world_size} trainers.")
feature_columns = list(DATA_SPEC.keys())
feature_types = [
numpy_to_torch_dtype[dtype] for _, _, dtype in DATA_SPEC.values()
]
label_column = feature_columns.pop()
label_type = feature_types.pop()
return TorchShufflingDataset(
filenames,
num_epochs,
world_size,
batch_size,
rank,
num_reducers=num_reducers,
max_concurrent_epochs=max_concurrent_epochs,
feature_columns=feature_columns,
feature_types=feature_types,
label_column=label_column,
label_type=label_type)
if __name__ == "__main__":
args = parser.parse_args()
from ray_shuffling_data_loader.stats import human_readable_size
import ray
print("Connecting to Ray cluster...")
ray.init(address=args.address)
num_rows = args.num_rows
num_files = args.num_files
num_row_groups_per_file = args.num_row_groups_per_file
max_row_group_skew = args.max_row_group_skew
data_dir = args.data_dir
cache_path = os.path.join(tempfile.gettempdir(), "data_cache")
filenames = None
if args.cache_files and os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
filenames, num_bytes = pickle.load(f)
except Exception as exc:
print(f"Cache load failed - {exc}")
if not filenames:
print(f"Generating {num_rows} rows over {num_files} files, with "
f"{num_row_groups_per_file} row groups per file and at most "
f"{100 * max_row_group_skew:.1f}% row group skew.")
filenames, num_bytes = generate_data(num_rows, num_files,
num_row_groups_per_file,
max_row_group_skew, data_dir)
if args.cache_files:
with open(os.path.join(tempfile.gettempdir(), "data_cache"),
"wb") as f:
pickle.dump((filenames, num_bytes), f)
print(f"Generated {len(filenames)} files containing {num_rows} rows "
f"with {num_row_groups_per_file} row groups per file, totalling "
f"{human_readable_size(num_bytes)}.")
print("Create Ray executor")
worker_kwargs = {}
num_workers = args.num_workers
num_hosts = args.num_hosts
num_workers_per_host = args.num_workers_per_host
if num_workers is not None:
if num_hosts is not None:
raise ValueError(
"Only one of --num-workers and --num-hosts should be used.")
worker_kwargs["num_workers"] = num_workers
elif num_hosts is not None:
worker_kwargs["num_hosts"] = num_hosts
if num_workers_per_host is None:
raise ValueError("When giving --num-hosts, --num-workers-per-host "
"must also be given.")
worker_kwargs["num_workers_per_host"] = num_workers_per_host
cpus_per_worker = args.cpus_per_worker
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(
settings,
use_gpu=True,
gpus_per_worker=1,
cpus_per_worker=cpus_per_worker,
**worker_kwargs)
executor.start()
executor.run(train_main, args=[args, filenames])
executor.shutdown()
print("Done consuming batches.")
|
9839
|
import uos as os
import time
def countdown():
for i in range(5, 0, -1):
print("start stubbing in {}...".format(i))
time.sleep(1)
import createstubs
# import stub_lvgl
try:
# only run import if no stubs yet
os.listdir("stubs")
print("stub folder was found, stubbing is not automatically started")
except OSError:
countdown()
|
9871
|
import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family': 'sans-serif',
'weight': 'bold',
'size': 14}
class MappingEnv(gym.Env):
def __init__(self):
# config_file = path.join(path.dirname(__file__), "params_flock.cfg")
# config = configparser.ConfigParser()
# config.read(config_file)
# config = config['flock']
self.nearest_agents = 7
self.nearest_targets = 7
self.mean_pooling = True # normalize the adjacency matrix by the number of neighbors or not
self.centralized = True
# number states per agent
self.nx_system = 4
# number of actions per agent
self.nu = 2
# default problem parameters
self.n_agents = 100 # int(config['network_size'])
# self.comm_radius = 0.9 # float(config['comm_radius'])
self.dt = 0.1 # #float(config['system_dt'])
self.v_max = 5.0 # float(config['max_vel_init'])
self.v_bias = self.v_max
# intitialize state matrices
self.x = None
self.u = None
self.mean_vel = None
self.init_vel = None
self.greedy_action = None
self.diff = None
self.r2 = None
self.adj_mat = None
self.adj_mat_mean = None
self.diff_targets = None
self.r2_targets = None
self.target_observed = None
self.state_network = None
self.state_values = None
self.reward = None
self.max_accel = 1
# self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
# dtype=np.float32)
#
# self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, ),
# dtype=np.float32)
# target initialization
self.px_max = 100
self.py_max = 100
x = np.linspace(-1.0 * self.px_max, self.px_max, self.n_agents)
y = np.linspace(-1.0 * self.py_max, self.py_max, self.n_agents)
tx, ty = np.meshgrid(x, y)
tx = tx.reshape((-1, 1))
ty = ty.reshape((-1, 1))
self.obs_rad = 2.0
self.obs_rad2 = self.obs_rad * self.obs_rad
self.target_x = np.stack((tx, ty), axis=1).reshape((-1, 2))
self.target_unobserved = np.ones((self.n_agents * self.n_agents, 2), dtype=np.bool)
# rendering initialization
self.fig = None
self.ax = None
self.line1 = None
self.line2 = None
self.action_scalar = 10.0
self.seed()
def reset(self):
x = np.zeros((self.n_agents, self.nx_system))
self.target_unobserved = np.ones((self.n_agents * self.n_agents, 2), dtype=np.bool)
x[:, 0] = np.random.uniform(low=-self.px_max, high=self.px_max, size=(self.n_agents,))
x[:, 1] = np.random.uniform(low=-self.py_max, high=self.py_max, size=(self.n_agents,))
#bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) #+ bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) #+ bias[1]
# keep good initialization
self.mean_vel = np.mean(x[:, 2:4], axis=0)
self.init_vel = x[:, 2:4]
self.x = x
# self.a_net = self.get_connectivity(self.x)
self.compute_helpers()
return self.state_values, self.state_network
def params_from_cfg(self, args):
# TODO
pass
# # self.comm_radius = args.getfloat('comm_radius')
# # self.comm_radius2 = self.comm_radius * self.comm_radius
# # self.vr = 1 / self.comm_radius2 + np.log(self.comm_radius2)
# #
# # self.n_agents = args.getint('n_agents')
# # self.r_max = self.r_max * np.sqrt(self.n_agents)
#
# # self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
# # dtype=np.float32)
# #
# # self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),
# # dtype=np.float32)
#
# self.v_max = args.getfloat('v_max')
# self.v_bias = self.v_max
# self.dt = args.getfloat('dt')
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
# u = np.reshape(u, (-1, 2))
assert u.shape == (self.n_agents, self.nu)
u = np.clip(u, a_min=-self.max_accel, a_max=self.max_accel)
self.u = u * self.action_scalar
old_x = np.copy(self.x)
# x position
self.x[:, 0] = self.x[:, 0] + self.x[:, 2] * self.dt + self.u[:, 0] * self.dt * self.dt * 0.5
# y position
self.x[:, 1] = self.x[:, 1] + self.x[:, 3] * self.dt + self.u[:, 1] * self.dt * self.dt * 0.5
# x velocity
self.x[:, 2] = self.x[:, 2] + self.u[:, 0] * self.dt
# y velocity
self.x[:, 3] = self.x[:, 3] + self.u[:, 1] * self.dt
# clip velocities
self.x[:, 2:4] = np.clip(self.x[:, 2:4], -1.0*self.v_max, self.v_max)
dist_traveled = np.sum(np.linalg.norm(self.x[:, 0:2] - old_x[:, 0:2], axis=1))
self.compute_helpers()
done = (0 == np.sum(self.target_unobserved))
return (self.state_values, self.state_network), 10.0 * self.reward - dist_traveled, done, {}
def compute_helpers(self):
# TODO - check this, and initialize stuff in the init(), and try to make more efficient
# Neighbors computations
self.diff = self.x.reshape((self.n_agents, 1, self.nx_system)) - self.x.reshape(
(1, self.n_agents, self.nx_system))
self.r2 = np.multiply(self.diff[:, :, 0], self.diff[:, :, 0]) + np.multiply(self.diff[:, :, 1],
self.diff[:, :, 1])
np.fill_diagonal(self.r2, np.Inf)
nearest = np.argsort(self.r2, axis=1)
obs_neigh = np.zeros((self.n_agents, self.nearest_agents * 4))
self.adj_mat = np.zeros((self.n_agents, self.n_agents))
for i in range(self.nearest_agents):
ind2, ind3 = np.meshgrid(nearest[:, i], range(4), indexing='ij')
ind1, _ = np.meshgrid(range(self.n_agents), range(4), indexing='ij')
obs_neigh[:, i * self.nx_system:(i + 1) * self.nx_system] = np.reshape(
self.diff[ind1.flatten(), ind2.flatten(), ind3.flatten()], (-1, 4))
self.adj_mat[:, nearest[:, i]] = 1.0
# Normalize the adjacency matrix by the number of neighbors - results in mean pooling, instead of sum pooling
n_neighbors = np.reshape(np.sum(self.adj_mat, axis=1), (self.n_agents, 1)) # correct - checked this
n_neighbors[n_neighbors == 0] = 1
self.adj_mat_mean = self.adj_mat / n_neighbors
# Targets computations
self.diff_targets = self.x[:, 0:2].reshape((self.n_agents, 1, 2)) - self.target_x[
self.target_unobserved].reshape(
(1, -1, 2))
self.r2_targets = np.multiply(self.diff_targets[:, :, 0], self.diff_targets[:, :, 0]) + np.multiply(
self.diff_targets[:, :, 1],
self.diff_targets[:, :, 1])
nearest_targets = np.argsort(self.r2_targets, axis=1)
obs_target = np.zeros((self.n_agents, self.nearest_targets * 2))
for i in range(min(self.nearest_targets, np.shape(nearest_targets)[1])):
ind2, ind3 = np.meshgrid(nearest_targets[:, i], range(2), indexing='ij')
ind1, _ = np.meshgrid(range(self.n_agents), range(2), indexing='ij')
obs_target[:, i * 2:(i + 1) * 2] = np.reshape(
self.diff_targets[ind1.flatten(), ind2.flatten(), ind3.flatten()], (-1, 2))
self.target_observed = np.any(self.r2_targets < self.obs_rad2, axis=0).reshape((-1, 1))
self.target_unobserved[self.target_unobserved] = np.tile(np.logical_not(self.target_observed), (1, 2)).flatten()
self.reward = np.sum(self.target_observed.astype(np.int))
self.state_values = np.hstack((obs_neigh, obs_target))
self.greedy_action = -1.0 * obs_target[:, 0:2]
if self.mean_pooling:
self.state_network = self.adj_mat_mean
else:
self.state_network = self.adj_mat
def controller(self):
"""
The controller for flocking from Turner 2003.
Returns: the optimal action
"""
# TODO
# return np.zeros((self.n_agents, 2))
return self.greedy_action / 10.0
def render(self, mode='human'):
"""
Render the environment with agents as points in 2D space
"""
if self.fig is None:
plt.ion()
fig = plt.figure()
self.ax = fig.add_subplot(111)
line1, = self.ax.plot(self.x[:, 0], self.x[:, 1], 'bo')
locs = self.target_x[self.target_unobserved].reshape((-1, 2))
line2, = self.ax.plot(locs[:, 0], locs[:, 1], 'rx')
plt.ylim(-1.0 * self.py_max, 1.0 * self.py_max)
plt.xlim(-1.0 * self.px_max, 1.0 * self.px_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line2 = line2
# TODO render unobserved targets
else:
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
locs = self.target_x[self.target_unobserved].reshape((-1,2))
self.line2.set_xdata(locs[:, 0])
self.line2.set_ydata(locs[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
pass
|
9908
|
import io
import os
import numpy as np
import pandas
import json
import logging #<== Optional. Log to console, file, kafka
from pipeline_monitor import prometheus_monitor as monitor #<== Optional. Monitor runtime metrics
from pipeline_logger import log
import tensorflow as tf
from tensorflow.contrib import predictor
from keras.models import Sequential, load_model
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from collections import OrderedDict
_logger = logging.getLogger('pipeline-logger')
_logger.setLevel(logging.INFO)
_logger_stream_handler = logging.StreamHandler()
_logger_stream_handler.setLevel(logging.INFO)
_logger.addHandler(_logger_stream_handler)
__all__ = ['invoke'] #<== Optional. Being a good Python citizen.
_labels = { #<== Optional. Used for metrics/labels
'name': 'injection',
'tag': 'v1',
'type': 'tensorflow',
'runtime': 'python',
'chip': 'cpu',
}
def _initialize_upon_import(): #<== Optional. Called once upon server startup
''' Initialize / Restore Model Object.
'''
model = load_model('securitai-lstm-model.h5')
model.load_weights('securitai-lstm-weights.h5')
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
return model
# This is called unconditionally at *module import time*...
_model = _initialize_upon_import()
#@log(labels=_labels, logger=_logger) #<== Optional. Sample and compare predictions
def invoke(request): #<== Required. Called on every prediction
'''Where the magic happens...'''
with monitor(labels=_labels, name="transform_request"): #<== Optional. Expose fine-grained metrics
transformed_request = _transform_request(request) #<== Optional. Transform input (json) into TensorFlow (tensor)
with monitor(labels=_labels, name="invoke"): #<== Optional. Calls _model.predict()
response = _model.predict(transformed_request)
with monitor(labels=_labels, name="transform_response"): #<== Optional. Transform TensorFlow (tensor) into output (json)
transformed_response = _transform_response(response)
return transformed_response #<== Required. Returns the predicted value(s)
def _transform_request(request):
request_str = request.decode('utf-8')
# tokenize the csv request and create json
X = pandas.read_csv(io.StringIO(request_str), engine='python', quotechar='|', header=None).values[:,0]
for index, item in enumerate(X):
reqJson = json.loads(item, object_pairs_hook=OrderedDict)
del reqJson['http']['timestamp']
del reqJson['http']['headers']
del reqJson['http']['source']
del reqJson['http']['route']
del reqJson['http']['responsePayload']
X[index] = json.dumps(reqJson, separators=(',', ':'))
tokenizer = Tokenizer(filters='\t\n', char_level=True)
tokenizer.fit_on_texts(X)
# this used to be [log_entry]
seq = tokenizer.texts_to_sequences([request_str])
max_log_length = 1024
log_entry_processed = sequence.pad_sequences(seq, maxlen=max_log_length)
return log_entry_processed
def _transform_response(response):
return response[0]
if __name__ == '__main__':
with open('./pipeline_test_request.csv', 'rb') as fb:
request_bytes = fb.read()
response_bytes = invoke(request_bytes)
print(response_bytes)
|
9915
|
from datetime import timedelta
from typing import Union, List, Optional
import click
import pandas as pd
from flask import current_app as app
from flask.cli import with_appcontext
from flexmeasures import Sensor
from flexmeasures.data import db
from flexmeasures.data.schemas.generic_assets import GenericAssetIdField
from flexmeasures.data.schemas.sensors import SensorIdField
from flexmeasures.data.models.generic_assets import GenericAsset
from flexmeasures.data.models.time_series import TimedBelief
from flexmeasures.data.utils import save_to_db
@click.group("edit")
def fm_edit_data():
"""FlexMeasures: Edit data."""
@fm_edit_data.command("attribute")
@with_appcontext
@click.option(
"--asset-id",
"assets",
required=False,
multiple=True,
type=GenericAssetIdField(),
help="Add/edit attribute to this asset. Follow up with the asset's ID.",
)
@click.option(
"--sensor-id",
"sensors",
required=False,
multiple=True,
type=SensorIdField(),
help="Add/edit attribute to this sensor. Follow up with the sensor's ID.",
)
@click.option(
"--attribute",
"attribute_key",
required=True,
help="Add/edit this attribute. Follow up with the name of the attribute.",
)
@click.option(
"--float",
"attribute_float_value",
required=False,
type=float,
help="Set the attribute to this float value.",
)
@click.option(
"--bool",
"attribute_bool_value",
required=False,
type=bool,
help="Set the attribute to this bool value.",
)
@click.option(
"--str",
"attribute_str_value",
required=False,
type=str,
help="Set the attribute to this string value.",
)
@click.option(
"--int",
"attribute_int_value",
required=False,
type=int,
help="Set the attribute to this integer value.",
)
@click.option(
"--null",
"attribute_null_value",
required=False,
is_flag=True,
default=False,
help="Set the attribute to a null value.",
)
def edit_attribute(
attribute_key: str,
assets: List[GenericAsset],
sensors: List[Sensor],
attribute_null_value: bool,
attribute_float_value: Optional[float] = None,
attribute_bool_value: Optional[bool] = None,
attribute_str_value: Optional[str] = None,
attribute_int_value: Optional[int] = None,
):
"""Edit (or add) an asset attribute or sensor attribute."""
if not assets and not sensors:
raise ValueError("Missing flag: pass at least one --asset-id or --sensor-id.")
# Parse attribute value
attribute_value = parse_attribute_value(
attribute_float_value=attribute_float_value,
attribute_bool_value=attribute_bool_value,
attribute_str_value=attribute_str_value,
attribute_int_value=attribute_int_value,
attribute_null_value=attribute_null_value,
)
# Set attribute
for asset in assets:
asset.attributes[attribute_key] = attribute_value
db.session.add(asset)
for sensor in sensors:
sensor.attributes[attribute_key] = attribute_value
db.session.add(sensor)
db.session.commit()
print("Successfully edited/added attribute.")
@fm_edit_data.command("resample-data")
@with_appcontext
@click.option(
"--sensor-id",
"sensor_ids",
multiple=True,
required=True,
help="Resample data for this sensor. Follow up with the sensor's ID. This argument can be given multiple times.",
)
@click.option(
"--event-resolution",
"event_resolution_in_minutes",
type=int,
required=True,
help="New event resolution as an integer number of minutes.",
)
@click.option(
"--from",
"start_str",
required=False,
help="Resample only data from this datetime onwards. Follow up with a timezone-aware datetime in ISO 6801 format.",
)
@click.option(
"--until",
"end_str",
required=False,
help="Resample only data until this datetime. Follow up with a timezone-aware datetime in ISO 6801 format.",
)
@click.option(
"--skip-integrity-check",
is_flag=True,
help="Whether to skip checking the resampled time series data for each sensor."
" By default, an excerpt and the mean value of the original"
" and resampled data will be shown for manual approval.",
)
def resample_sensor_data(
sensor_ids: List[int],
event_resolution_in_minutes: int,
start_str: Optional[str] = None,
end_str: Optional[str] = None,
skip_integrity_check: bool = False,
):
"""Assign a new event resolution to an existing sensor and resample its data accordingly."""
event_resolution = timedelta(minutes=event_resolution_in_minutes)
event_starts_after = pd.Timestamp(start_str) # note that "" or None becomes NaT
event_ends_before = pd.Timestamp(end_str)
for sensor_id in sensor_ids:
sensor = Sensor.query.get(sensor_id)
if sensor.event_resolution == event_resolution:
print(f"{sensor} already has the desired event resolution.")
continue
df_original = sensor.search_beliefs(
most_recent_beliefs_only=False,
event_starts_after=event_starts_after,
event_ends_before=event_ends_before,
).sort_values("event_start")
df_resampled = df_original.resample_events(event_resolution).sort_values(
"event_start"
)
if not skip_integrity_check:
message = ""
if sensor.event_resolution < event_resolution:
message += f"Downsampling {sensor} to {event_resolution} will result in a loss of data. "
click.confirm(
message
+ f"Data before:\n{df_original}\nData after:\n{df_resampled}\nMean before: {df_original['event_value'].mean()}\nMean after: {df_resampled['event_value'].mean()}\nContinue?",
abort=True,
)
# Update sensor
sensor.event_resolution = event_resolution
db.session.add(sensor)
# Update sensor data
query = TimedBelief.query.filter(TimedBelief.sensor == sensor)
if not pd.isnull(event_starts_after):
query = query.filter(TimedBelief.event_start >= event_starts_after)
if not pd.isnull(event_ends_before):
query = query.filter(
TimedBelief.event_start + sensor.event_resolution <= event_ends_before
)
query.delete()
save_to_db(df_resampled, bulk_save_objects=True)
db.session.commit()
print("Successfully resampled sensor data.")
app.cli.add_command(fm_edit_data)
def parse_attribute_value(
attribute_null_value: bool,
attribute_float_value: Optional[float] = None,
attribute_bool_value: Optional[bool] = None,
attribute_str_value: Optional[str] = None,
attribute_int_value: Optional[int] = None,
) -> Union[float, int, bool, str, None]:
"""Parse attribute value."""
if not single_true(
[attribute_null_value]
+ [
v is not None
for v in [
attribute_float_value,
attribute_bool_value,
attribute_str_value,
attribute_int_value,
]
]
):
raise ValueError("Cannot set multiple values simultaneously.")
if attribute_null_value:
return None
elif attribute_float_value is not None:
return float(attribute_float_value)
elif attribute_bool_value is not None:
return bool(attribute_bool_value)
elif attribute_int_value is not None:
return int(attribute_int_value)
return attribute_str_value
def single_true(iterable) -> bool:
i = iter(iterable)
return any(i) and not any(i)
|
9946
|
import bpy
from bpy.props import *
from ...nodes.BASE.node_base import RenderNodeBase
class RenderNodeGetListIndex(RenderNodeBase):
"""A simple input node"""
bl_idname = 'RenderNodeGetListIndex'
bl_label = 'Get List Index'
def init(self, context):
self.create_output('RenderNodeSocketInt', "index", 'Index')
def process(self,context,id,path):
node = self.id_data.nodes.get(bpy.context.window_manager.rsn_active_list)
if not node or node.bl_idname != 'RenderNodeTaskRenderListNode': return
self.outputs[0].set_value(node.active_index)
def register():
bpy.utils.register_class(RenderNodeGetListIndex)
def unregister():
bpy.utils.unregister_class(RenderNodeGetListIndex)
|
9989
|
import pytest
from stable_baselines import A2C, ACER, ACKTR, DeepQ, DDPG, PPO1, PPO2, TRPO
from stable_baselines.ddpg import AdaptiveParamNoiseSpec
from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox
from stable_baselines.common.vec_env import DummyVecEnv
PARAM_NOISE_DDPG = AdaptiveParamNoiseSpec(initial_stddev=float(0.2), desired_action_stddev=float(0.2))
# Hyperparameters for learning identity for each RL model
LEARN_FUNC_DICT = {
'a2c': lambda e: A2C(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'acer': lambda e: ACER(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'acktr': lambda e: ACKTR(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'deepq': lambda e: DeepQ(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'ddpg': lambda e: DDPG(policy="MlpPolicy", env=e, param_noise=PARAM_NOISE_DDPG).learn(total_timesteps=1000),
'ppo1': lambda e: PPO1(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'ppo2': lambda e: PPO2(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
'trpo': lambda e: TRPO(policy="MlpPolicy", env=e).learn(total_timesteps=1000),
}
@pytest.mark.slow
@pytest.mark.parametrize("model_name", ['a2c', 'acer', 'acktr', 'deepq', 'ppo1', 'ppo2', 'trpo'])
def test_identity(model_name):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param model_name: (str) Name of the RL model
"""
env = DummyVecEnv([lambda: IdentityEnv(10)])
model = LEARN_FUNC_DICT[model_name](env)
n_trials = 1000
obs = env.reset()
action_shape = model.predict(obs, deterministic=False)[0].shape
action, _ = model.predict(obs, deterministic=True)
assert action.shape == action_shape
for _ in range(n_trials):
new_action = model.predict(obs, deterministic=True)[0]
assert action == model.predict(obs, deterministic=True)[0]
assert new_action.shape == action_shape
# Free memory
del model, env
@pytest.mark.slow
@pytest.mark.parametrize("model_name", ['a2c', 'ddpg', 'ppo1', 'ppo2', 'trpo'])
def test_identity_continuous(model_name):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param model_name: (str) Name of the RL model
"""
env = DummyVecEnv([lambda: IdentityEnvBox(eps=0.5)])
model = LEARN_FUNC_DICT[model_name](env)
n_trials = 1000
obs = env.reset()
action_shape = model.predict(obs, deterministic=False)[0].shape
action, _ = model.predict(obs, deterministic=True)
assert action.shape == action_shape
for _ in range(n_trials):
new_action = model.predict(obs, deterministic=True)[0]
assert action == model.predict(obs, deterministic=True)[0]
assert new_action.shape == action_shape
|
9990
|
import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
export_dir = './reference/00000002'
graph_pb = './creditcardfraud.pb'
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
# name="" is important to ensure we don't get spurious prefixing
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp1 = g.get_tensor_by_name("transaction:0")
inp2 = g.get_tensor_by_name("reference:0")
out = g.get_tensor_by_name("output:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"transaction": inp1, "reference": inp2}, {"output": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
|
10008
|
import pytest
from starsessions import SessionBackend
@pytest.mark.asyncio
async def test_cookie_read_write(cookie: SessionBackend, session_payload: dict) -> None:
new_id = await cookie.write(session_payload, "session_id")
assert await cookie.read(new_id) == session_payload
@pytest.mark.asyncio
async def test_cookie_remove(cookie: SessionBackend) -> None:
await cookie.remove("session_id")
@pytest.mark.asyncio
async def test_cookie_exists(cookie: SessionBackend) -> None:
assert await cookie.exists("session_id") is False
@pytest.mark.asyncio
async def test_cookie_generate_id(cookie: SessionBackend) -> None:
new_id = await cookie.generate_id()
assert isinstance(new_id, str)
|
10015
|
import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
from ...backbones_2d.transformer import TransformerEncoderLayer3D, TransformerEncoder
from ...roi_heads.target_assigner.proposal_target_layer import ProposalTargetLayer
from ...model_utils.model_nms_utils import class_agnostic_nms
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
class VoxelSetAbstractionTransFusionv5(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.linears_in = nn.ModuleList()
self.linears_out = nn.ModuleList()
self.fusion_channel = sum([x[-1] for x in SA_cfg[self.model_cfg.FEATURES_SOURCE[-2]].MLPS])
# self.fusion_channel = 16
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if c_bev == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(c_bev, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, c_bev, bias=False),
nn.BatchNorm1d(c_bev)))
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
mlps = SA_cfg['raw_points'].MLPS
for k in range(len(mlps)):
mlps[k] = [num_rawpoint_features - 3] + mlps[k]
self.SA_rawpoints = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg['raw_points'].POOL_RADIUS,
nsamples=SA_cfg['raw_points'].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool'
)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
c_in += cur
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
mlps = SA_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [mlps[k][0]] + mlps[k]
cur_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg[src_name].POOL_RADIUS,
nsamples=SA_cfg[src_name].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool',
)
self.SA_layers.append(cur_layer)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
self.SA_layer_names.append(src_name)
c_in += cur
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
if self.model_cfg.NORM:
self.transnorm = nn.LayerNorm(c_in)
else:
self.transnorm = None
if self.model_cfg.NORM2:
self.transnorm2 = nn.LayerNorm(self.fusion_channel)
else:
self.transnorm2 = None
# multi_location
self.trans_layer = TransformerEncoder(TransformerEncoderLayer3D(c_in, self.model_cfg.FUSION_HEAD), self.model_cfg.NUM_LAYERS, self.transnorm)
# have multi-modality + multi-scale
self.trans_fusion_layer = TransformerEncoder(TransformerEncoderLayer3D(self.fusion_channel, self.model_cfg.FUSION2_HEAD), self.model_cfg.NUM_LAYERS2, self.transnorm2)
self.reduce_radius = self.model_cfg.REDUCE_RADIUS**2
self.topks = self.model_cfg.NMS_CONFIG.TOPK
self.max_keypoints = self.model_cfg.NMS_CONFIG.MAX_POINTS
self.res1_actn_1 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
self.res1_actn_2 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
x_idxs = (keypoints[:, :, 0] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, :, 1] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
cur_x_idxs = x_idxs[k]
cur_y_idxs = y_idxs[k]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features.unsqueeze(dim=0))
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0)
return point_bev_features
def get_sampled_points(self, batch_dict):
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
empty_num = self.model_cfg.NUM_KEYPOINTS - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'FastFPS':
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def get_sampled_points_post(self, batch_dict, keypoints):
batch_size = batch_dict['batch_size']
src_points = keypoints
keypoints_list = []
for bs_idx in range(batch_size):
sampled_points = src_points[bs_idx].unsqueeze(dim=0) # (1, N, 3)
if sampled_points.shape[1] < self.max_keypoints:
cur_count = sampled_points.shape[1]
cur_pt_idxs = torch.arange(0, self.max_keypoints)
empty_num = self.max_keypoints - cur_count
while empty_num >= cur_count:
cur_pt_idxs[cur_count:cur_count * 2] = cur_pt_idxs[:cur_count]
empty_num -= cur_count
cur_count *= 2
if cur_count < self.max_keypoints:
assert empty_num == self.max_keypoints - cur_count
cur_pt_idxs[-empty_num:] = cur_pt_idxs[:empty_num]
keypoint = sampled_points[0][cur_pt_idxs].unsqueeze(dim=0)
else:
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.max_keypoints
).long()
if sampled_points.shape[1] < self.max_keypoints:
empty_num = self.max_keypoints - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoint = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list.append(keypoint)
keypoint = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoint
def reduce_points(self, batch_dict):
batch_indices = batch_dict['points'][:, 0].long()
masks = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
bs_mask = (batch_indices == bs_idx)
pts = batch_dict['points'][bs_mask].unsqueeze(dim=1)[:, :, 1: 4] # (N, 1, 3)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
masks.extend(mask)
batch_dict['points'] = batch_dict['points'][masks]
return batch_dict
def reduce_points_post(self, keypoints, batch_dict):
keypoints_list = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
pts = keypoints[bs_idx].unsqueeze(dim=1)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
keypoints_list.append(keypoints[bs_idx][mask])
return keypoints_list
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
if self.model_cfg.POINT_SOURCE == 'raw_points' and self.reduce_radius > 0:
# batch_dict = self.reduce_points(batch_dict)
keypoints = self.get_sampled_points(batch_dict)
keypoint_lst = self.reduce_points_post(keypoints, batch_dict)
keypoints = self.get_sampled_points_post(batch_dict, keypoint_lst)
else:
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
xyz = raw_points[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (raw_points[:, 0] == bs_idx).sum()
point_features = raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None
pooled_points, pooled_features = self.SA_rawpoints(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features,
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
pooled_points, pooled_features = self.SA_layers[k](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=batch_dict['multi_scale_3d_features'][src_name].features.contiguous(),
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
point_features_list_new = []
for i, x in enumerate(point_features_list):
feat = self.linears_in[i](x.view(batch_size * num_keypoints, -1))
point_features_list_new.append(feat.view(1, batch_size * num_keypoints, -1))
fusion_feat = torch.cat(point_features_list_new, dim=0)
# have multi-modality + multi-scale
trans1_feat_list = self.trans_fusion_layer(fusion_feat).view(len(fusion_feat), batch_size, num_keypoints, -1)
trans1_feat_projected_list = []
for i, x in enumerate(trans1_feat_list):
feat = self.linears_out[i](x.view(batch_size * num_keypoints, -1))
trans1_feat_projected_list.append(feat.view(batch_size, num_keypoints, -1))
# multi_location
point_features_main1 = torch.cat(point_features_list, dim=2)
point_features_res1 = self.res1_actn_1(torch.cat(trans1_feat_projected_list, dim=2))
point_features_main2 = point_features_res1 + point_features_main1
point_features_res2 = self.res1_actn_2(self.trans_layer(point_features_main2.permute(1, 0, 2)).permute(1, 0, 2))
point_features = point_features_main2 + point_features_res2
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1)
point_coords = torch.cat((batch_idx.view(-1, 1).float(), keypoints.view(-1, 3)), dim=1)
batch_dict['point_features_before_fusion'] = point_features.reshape(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.reshape(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = point_coords # (BxN, 4)
return batch_dict
class VoxelSetAbstraction(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:
input_channels = SA_cfg[src_name].MLPS[0][0] \
if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]
else:
input_channels = SA_cfg[src_name]['INPUT_CHANNELS']
cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=SA_cfg[src_name]
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += cur_num_c_out
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']
)
c_in += cur_num_c_out
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
def sectorized_proposal_centric_sampling(self, roi_boxes, points):
"""
Args:
roi_boxes: (M, 7 + C)
points: (N, 3)
Returns:
sampled_points: (N_out, 3)
"""
sampled_points, _ = sample_points_with_roi(
rois=roi_boxes, points=points,
sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,
num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)
)
sampled_points = sector_fps(
points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,
num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS
)
return sampled_points
def get_sampled_points(self, batch_dict):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'SPC':
cur_keypoints = self.sectorized_proposal_centric_sampling(
roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]
)
bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx
keypoints = torch.cat((bs_idxs[:, None], cur_keypoints), dim=1)
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
if len(keypoints.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1, 1)
keypoints = torch.cat((batch_idx.float(), keypoints.view(-1, 3)), dim=1)
return keypoints
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size = batch_dict['batch_size']
new_xyz = keypoints[:, 1:4].contiguous()
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_rawpoints,
xyz=raw_points[:, 1:4],
xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,
xyz_bs_idxs=raw_points[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
point_features = torch.cat(point_features_list, dim=-1)
batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = keypoints # (BxN, 4)
return batch_dict
|
10019
|
from ariadne import make_executable_schema, load_schema_from_path
from ariadne.asgi import GraphQL
from resolvers import query, skill, person, eye_color, mutation
# import schema from GraphQL file
type_defs = load_schema_from_path("./schema.gql")
schema = make_executable_schema(
type_defs, query, skill, person, eye_color, mutation
)
app = GraphQL(schema, debug=True)
|
10042
|
from collections import defaultdict
import graphene
import pytest
from django.core.exceptions import ValidationError
from ....shipping.error_codes import ShippingErrorCode
from ..mutations import BaseChannelListingMutation
def test_validate_duplicated_channel_ids(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id],
[second_channel_id],
errors,
ShippingErrorCode.DUPLICATED_INPUT_ITEM.value,
)
# then
assert result is None
assert errors["input"] == []
def test_validate_duplicated_channel_ids_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id], [second_channel_id], errors, error_code
)
# then
assert result is None
assert errors["input"][0].code == error_code
def test_validate_duplicated_channel_values(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field] == []
def test_validate_duplicated_channel_values_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field][0].code == error_code
def test_clean_channels_add_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"add_channels": [{"channel_id": channel_id}]}, errors, error_code
)
# then
assert result == {
"add_channels": [{"channel_id": channel_id, "channel": channel_PLN}],
"remove_channels": [],
}
assert errors["input"] == []
def test_clean_channels_remove_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert result == {"add_channels": [], "remove_channels": [str(channel_PLN.id)]}
assert errors["input"] == []
def test_test_clean_channels_with_errors(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id, channel_id]}, errors, error_code
)
# then
assert result == {}
assert errors["remove_channels"][0].code == error_code
def test_test_clean_channels_invalid_object_type(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Product", channel_PLN.id)
error_code = ShippingErrorCode.GRAPHQL_ERROR.value
errors = defaultdict(list)
# when
with pytest.raises(ValidationError) as error:
BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert (
error.value.error_dict["remove_channels"][0].message
== f"Must receive Channel id: {channel_id}."
)
|
10065
|
import unittest
from shapy.framework.tcelements import *
from shapy.framework.executor import run
from tests import TCTestCase
class TestIngress(TCTestCase):
def setUp(self):
self.interface = Interface('lo')
def test_ingress_filter(self):
q = IngressQdisc()
q.add(RedirectFilter('dst 127.0.0.3', 'eth0'))
self.interface.add_ingress(q)
self.interface.set_shaping()
|
10093
|
import argparse
from time import sleep, time
from collections import defaultdict
from sqlalchemy import orm, text, insert, delete
from sqlalchemy.orm import selectinload
import models
from app import db
from app import logger
from scripts.queue import JsonWorks, JsonAuthors, JsonConcepts, JsonInstitutions, JsonVenues
from util import elapsed
def run(**kwargs):
entity_type = kwargs.get("entity")
method_name = kwargs.get("method")
if entity_type == "work" and method_name == "add_everything":
queue_table = "queue.work_add_everything"
elif method_name == "store":
queue_table = f"queue.{entity_type.lower()}_store"
else:
queue_table = f"queue.{method_name.lower()}"
if single_id := kwargs.get('id'):
if objects := get_objects(entity_type, [single_id]):
logger.info(f'found object {objects[0]}')
store_objects(objects)
db.session.commit()
else:
logger.warn(f'found no object with id {single_id}')
else:
objects_updated = 0
limit = kwargs.get('limit')
chunk = kwargs.get('chunk')
total_count = 0
while limit is None or objects_updated < limit:
loop_start = time()
if object_ids := fetch_queue_chunk_ids(queue_table, chunk):
objects = get_objects(entity_type, object_ids)
for obj in objects:
method_start_time = time()
total_count += 1
print(f"*** #{total_count} starting {obj}.{method_name}() method")
method_to_run = getattr(obj, method_name)
method_to_run()
print(f">>> finished {obj}.{method_name}(). took {elapsed(method_start_time, 4)} seconds")
# print(1/0)
logger.info('committing')
start_time = time()
if method_name == "store":
store_json_objects(objects)
else:
db.session.commit() # fail loudly for now
logger.info(f'commit took {elapsed(start_time, 4)}s')
finish_object_ids(queue_table, object_ids)
objects_updated += len(objects)
logger.info(f'processed chunk of {chunk} objects in {elapsed(loop_start, 2)} seconds')
else:
logger.info('nothing ready in the queue, waiting 5 seconds...')
sleep(5)
def store_json_objects(objects):
delete_dict_all_objects = defaultdict(list)
insert_dict_all_objects = defaultdict(list)
for count, obj in enumerate(objects):
obj.delete_dict = defaultdict(list)
for row in obj.insert_dicts:
for table_name, insert_dict in row.items():
insert_dict_all_objects[table_name] += [insert_dict]
obj.delete_dict[table_name] += [insert_dict["id"]]
for table_name, ids in obj.delete_dict.items():
delete_dict_all_objects[table_name] += ids
start_time = time()
for table_name, delete_ids in delete_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(delete(my_table).where(my_table.id.in_(delete_ids)))
db.session.commit()
print("delete done")
for table_name, all_insert_strings in insert_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(insert(my_table).values(all_insert_strings))
db.session.commit()
print("insert and commit took {} seconds".format(elapsed(start_time, 2)))
def fetch_queue_chunk_ids(queue_table, chunk_size):
text_query = f"""
with chunk as (
select id
from {queue_table}
where started is null
order by
finished asc nulls first,
rand
limit :chunk
for update skip locked
)
update {queue_table}
set started = now()
from chunk
where {queue_table}.id = chunk.id
returning chunk.id;
"""
logger.info(f'getting {chunk_size} ids from the queue')
start_time = time()
ids = [
row[0] for row in
db.engine.execute(text(text_query).bindparams(chunk=chunk_size).execution_options(autocommit=True)).all()
]
logger.info(f'got {len(ids)} ids from the queue in {elapsed(start_time, 4)}s')
logger.info(f'got these ids: {ids}')
return ids
def finish_object_ids(queue_table, object_ids):
# logger.info(f'finishing queue chunk')
start_time = time()
query_text = f'''
update {queue_table}
set finished = now(), started=null
where id = any(:ids)
'''
db.session.execute(text(query_text).bindparams(ids=object_ids))
db.session.commit()
# logger.info(f'finished saving finish_objects in {elapsed(start_time, 4)}s')
def get_objects(entity_type, object_ids):
logger.info(f'getting {len(object_ids)} objects')
start_time = time()
if entity_type == "work":
objects = db.session.query(models.Work).options(
selectinload(models.Work.records).selectinload(models.Record.journals).raiseload('*'),
selectinload(models.Work.records).raiseload('*'),
selectinload(models.Work.locations),
selectinload(models.Work.journal).raiseload('*'),
selectinload(models.Work.references).raiseload('*'),
selectinload(models.Work.references_unmatched).raiseload('*'),
selectinload(models.Work.mesh),
selectinload(models.Work.counts_by_year).raiseload('*'),
selectinload(models.Work.abstract),
selectinload(models.Work.extra_ids).raiseload('*'),
selectinload(models.Work.related_works).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).selectinload(models.Author.orcids).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).raiseload('*'),
selectinload(models.Work.concepts).selectinload(models.WorkConcept.concept).raiseload('*'),
selectinload(models.Work.concepts_full).raiseload('*'),
orm.Load(models.Work).raiseload('*')
).filter(models.Work.paper_id.in_(object_ids)).all()
elif entity_type == "author":
objects = db.session.query(models.Author).options(
selectinload(models.Author.counts_by_year_papers),
selectinload(models.Author.counts_by_year_citations),
selectinload(models.Author.alternative_names),
selectinload(models.Author.author_concepts),
selectinload(models.Author.orcids).selectinload(models.AuthorOrcid.orcid_data),
selectinload(models.Author.last_known_institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Author.last_known_institution).raiseload('*'),
orm.Load(models.Author).raiseload('*')
).filter(models.Author.author_id.in_(object_ids)).all()
elif entity_type == "venue":
objects = db.session.query(models.Venue).options(
selectinload(models.Venue.counts_by_year_papers),
selectinload(models.Venue.counts_by_year_citations),
orm.Load(models.Venue).raiseload('*')
).filter(models.Venue.journal_id.in_(object_ids)).all()
elif entity_type == "institution":
objects = db.session.query(models.Institution).filter(models.Institution.affiliation_id.in_(object_ids)).all()
elif entity_type == "concept":
objects = db.session.query(models.Concept).filter(models.Concept.field_of_study_id.in_(object_ids)).all()
logger.info(f'got {len(objects)} objects in {elapsed(start_time, 4)}s')
return objects
# python -m scripts.fast_queue --entity=work --method=add_everything --limit=3
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run fast queue.")
parser.add_argument('--entity', type=str, help="the entity type to run")
parser.add_argument('--method', type=str, help="the method to run")
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)")
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many objects to work on")
parser.add_argument(
'--chunk', "-ch", nargs="?", default=100, type=int, help="how many objects to take off the queue at once"
)
parsed_args = parser.parse_args()
run(**vars(parsed_args))
|
10113
|
import re
from collections import Counter
def is_isogram(word):
if not isinstance(word, str) or word == '': return False
word = {j for i,j in Counter(
re.sub('[^a-z]', '', word.lower())
).most_common()
}
return len(word) == 1
|
10136
|
import krpc
import time
import math
from simple_pid import PID
conn = krpc.connect(name="UI Test")
vessel = conn.space_center.active_vessel
kerbin_frame = vessel.orbit.body.reference_frame
orb_frame = vessel.orbital_reference_frame
srf_frame = vessel.surface_reference_frame
surface_gravity = vessel.orbit.body.surface_gravity
current_met = conn.add_stream(getattr, vessel, 'met')
current_roll = conn.add_stream(getattr, vessel.flight(), 'roll')
current_pitch = conn.add_stream(getattr, vessel.flight(), 'pitch')
current_heading = conn.add_stream(getattr, vessel.flight(), 'heading')
current_alt = conn.add_stream(getattr, vessel.flight(), 'surface_altitude')
lowest = conn.add_stream(vessel.bounding_box, srf_frame)
current_drag = conn.add_stream(getattr, vessel.flight(), 'drag')
current_aero = conn.add_stream(getattr, vessel.flight(), 'aerodynamic_force')
current_speed = conn.add_stream(getattr, vessel.flight(kerbin_frame), 'speed')
vessel.control.activate_next_stage()
vessel.control.sas = True
time.sleep(.2)
vessel.control.sas_mode = conn.space_center.SASMode.retrograde
def bottom_altitude():
return max(0, current_alt() - abs(lowest()[0][0]))
for engine in vessel.parts.engines:
engine.gimbal_locked = True
while True:
aero_amp = math.sqrt(current_aero()[0] ** 2
+ current_aero()[1] ** 2
+ current_aero()[2] ** 2)
time_to_zero = current_speed() / ((((vessel.max_thrust * .9) + aero_amp) / vessel.mass)
+ vessel.orbit.body.surface_gravity)
if (time_to_zero * current_speed()) >= bottom_altitude() - current_speed():
print(current_speed())
print(f"Start Hover Slam Burn")
vessel.control.throttle = .9
break
while current_speed() > 50:
print(current_speed())
time.sleep(.01)
pass
print(f"Switch to Stab")
for leg in vessel.parts.legs:
leg.deployed = True
pid1 = PID(.15, 0, .5, setpoint=0)
pid1.output_limits = (0, 1)
pid1.sample_time = 0.01
while bottom_altitude() > 1:
vessel.control.throttle = pid1(bottom_altitude())
# pid1.setpoint *= .98
time.sleep(.01)
vessel.control.sas_mode = conn.space_center.SASMode.radial
vessel.control.throttle = 0
|
10139
|
import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(lvl):
pipelines = list(filter(lambda x: len(x['id']) == 1, lvl))
children_ = list(filter(lambda x: len(x['id']) > 1, lvl))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
@blueprint.route("badge/collection/<path:pipeline_path>")
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
10158
|
from asyncio import Future
from greenlet import getcurrent
import psycopg2
from psycopg2 import * # noqa
from psycopg2 import extensions, OperationalError
__version__ = psycopg2.__version__
def psycopg2_wait_callback(conn):
"""A wait callback to allow greenlet to work with Psycopg.
The caller must be from a greenlet other than the main one.
:param conn: psycopg2 connection or file number
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
"""
while True:
state = conn.poll()
if state == extensions.POLL_OK:
# Done with waiting
break
elif state == extensions.POLL_READ:
_wait_fd(conn)
elif state == extensions.POLL_WRITE:
_wait_fd(conn, read=False)
else: # pragma nocover
raise OperationalError("Bad result from poll: %r" % state)
# INTERNALS
def _wait_fd(conn, read=True):
'''Wait for an event on file descriptor ``fd``.
:param conn: file descriptor
:param read: wait for a read event if ``True``, otherwise a wait
for write event.
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
'''
current = getcurrent()
parent = current.parent
assert parent, '"_wait_fd" must be called by greenlet with a parent'
try:
fileno = conn.fileno()
except AttributeError:
fileno = conn
future = Future()
# When the event on fd occurs switch back to the current greenlet
if read:
future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read)
else:
future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read)
# switch back to parent greenlet
parent.switch(future)
# Back on the child greenlet. Raise error if there is one
future.result()
def _done_wait_fd(fd, future, read):
try:
if read:
future._loop.remove_reader(fd)
else:
future._loop.remove_writer(fd)
except Exception as exc:
future.set_exception(exc)
else:
future.set_result(None)
try:
extensions.POLL_OK
except AttributeError: # pragma nocover
from pulsar import ImproperlyConfigured
raise ImproperlyConfigured(
'Psycopg2 does not have support for asynchronous connections. '
'You need at least version 2.2.0 of Psycopg2.')
extensions.set_wait_callback(psycopg2_wait_callback)
|
10162
|
import numpy as np
from visual_dynamics.policies import CameraTargetPolicy
class RandomOffsetCameraTargetPolicy(CameraTargetPolicy):
def __init__(self, env, target_env, camera_node_name, agent_node_name, target_node_name,
height=12.0, radius=16.0, angle=(-np.pi/4, np.pi/4), tightness=0.1, hra_interpolation=True):
self.height = height
self.radius = radius
self.angle = angle
offset = self.sample_offset()
super(RandomOffsetCameraTargetPolicy, self).__init__(env, target_env, camera_node_name, agent_node_name,
target_node_name, offset, tightness=tightness,
hra_interpolation=hra_interpolation)
def reset(self):
self.offset = self.sample_offset()
state = super(RandomOffsetCameraTargetPolicy, self).reset()
# self.offset = self.sample_offset()
return state
def sample_offset(self):
height = np.random.uniform(*self.height) if isinstance(self.height, (list, tuple)) else self.height
radius = np.random.uniform(*self.radius) if isinstance(self.radius, (list, tuple)) else self.radius
angle = np.random.uniform(*self.angle) if isinstance(self.angle, (list, tuple)) else self.angle
return np.array([radius * np.sin(angle), -radius * np.cos(angle), height])
def _get_config(self):
config = super(RandomOffsetCameraTargetPolicy, self)._get_config()
config.pop('offset')
config.update({'height': self.height,
'radius': self.radius,
'angle': self.angle})
return config
|
10194
|
import typer
def name_callback(value: str):
if value != "Camila":
raise typer.BadParameter("Only Camila is allowed")
return value
def main(name: str = typer.Option(..., callback=name_callback)):
typer.echo(f"Hello {name}")
if __name__ == "__main__":
typer.run(main)
|
10201
|
from typing import List, Dict
import json
from gtmcore.http import ConcurrentRequestManager, ConcurrentRequest
from gtmcore.environment.packagemanager import PackageManager, PackageResult, PackageMetadata
from gtmcore.container import container_for_context
from gtmcore.labbook import LabBook
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class CondaPackageManagerBase(PackageManager):
"""Class to implement the conda package manager
"""
def __init__(self):
# String to be set in child classes indicating which python version you are checking. Typically should be either
# python 3.6* or python 2.7*
self.python_depends_str = None
# String of the name of the conda environment (e.g. py36 or py27, as created via container build)
self.python_env = None
# Note, currently we hard code channel config. Future changes to support the user specifying channels
# will modify this behavior
self.channel_priority = ['conda-forge', 'anaconda']
self.request_mgr = ConcurrentRequestManager()
def list_versions(self, package_name: str, labbook: LabBook, username: str) -> List[str]:
"""Method to list all available versions of a package based on the package name
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
list(str): Version strings
"""
# Check for package in channels, picking out version by priority
request_list = list()
for channel in self.channel_priority:
request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{package_name}",
headers={'Accept': 'application/json'}))
responses = self.request_mgr.resolve_many(request_list)
versions = None
for response in responses:
if response.status_code != 200:
continue
versions = response.json.get('versions')
break
if not versions:
raise ValueError(f"Package {package_name} not found in channels {' ,'.join(self.channel_priority)}.")
versions.reverse()
return versions
def list_installed_packages(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all packages that are currently installed
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format (name: <package name>, version: <version string>)
Returns:
list
"""
project_container = container_for_context(username, labbook=labbook)
result = project_container.run_container("conda list --no-pip --json", wait_for_output=True)
if result:
data = json.loads(result)
if data:
return [{"name": x['name'], 'version': x['version']} for x in data]
else:
return []
def validate_packages(self, package_list: List[Dict[str, str]], labbook: LabBook, username: str) \
-> List[PackageResult]:
"""Method to validate a list of packages, and if needed fill in any missing versions
Should check both the provided package name and version. If the version is omitted, it should be generated
from the latest version.
Args:
package_list(list): A list of dictionaries of packages to validate
labbook(str): The labbook instance
username(str): The username for the logged in user
Returns:
namedtuple: namedtuple indicating if the package and version are valid
"""
result = list()
# Check for package in channels, picking out version by priority
request_list = list()
for pkg in package_list:
for channel in self.channel_priority:
request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{pkg['package']}",
headers={'Accept': 'application/json'}))
responses = self.request_mgr.resolve_many(request_list)
# Repack into groups by package
responses_per_package = list(zip(*(iter(responses),) * len(self.channel_priority)))
for package, responses in zip(package_list, responses_per_package):
versions = None
latest_version = None
for response in responses:
if response.status_code != 200:
continue
versions = response.json.get('versions')
latest_version = response.json.get('latest_version')
break
if not versions:
# Package is not found
result.append(PackageResult(package=package['package'], version=package.get('version'), error=True))
continue
if package.get('version'):
# Package has been set, so validate it
if package.get('version') in versions:
# Both package name and version are valid
result.append(PackageResult(package=package['package'], version=package.get('version'),
error=False))
else:
# The package version is not in the list, so invalid
result.append(PackageResult(package=package['package'], version=package.get('version'), error=True))
else:
# You need to look up the latest version since not included
result.append(PackageResult(package=package['package'], version=str(latest_version),
error=False))
return result
def get_packages_metadata(self, package_list: List[str], labbook: LabBook, username: str) -> List[PackageMetadata]:
"""Method to get package metadata
Args:
package_list: List of package names
labbook(str): The labbook instance
username(str): The username for the logged in user
Returns:
list
"""
def _extract_metadata(data):
"""Extraction method to pull out the docs URL and description"""
latest_val = data.get('latest_version')
description_val = data.get('summary').strip()
docs_val = data.get('doc_url')
if not docs_val:
docs_val = data.get('html_url')
return latest_val, description_val, docs_val
# Check for package in channels, picking out version by priority
request_list = list()
for pkg in package_list:
for channel in self.channel_priority:
request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{pkg}",
headers={'Accept': 'application/json'},
extraction_function=_extract_metadata))
responses = self.request_mgr.resolve_many(request_list)
# Repack into groups by package
responses_per_package = list(zip(*(iter(responses),) * len(self.channel_priority)))
result = list()
for package, responses in zip(package_list, responses_per_package):
data = None
for response in responses:
if response.status_code == 200:
data = response.extracted_json
break
if data:
latest_version, description, docs_url = data
result.append(PackageMetadata(package_manager="conda", package=package, latest_version=latest_version,
description=description, docs_url=docs_url))
else:
result.append(PackageMetadata(package_manager="conda", package=package, latest_version=None,
description=None, docs_url=None))
return result
def generate_docker_install_snippet(self, packages: List[Dict[str, str]], single_line: bool = False) -> List[str]:
"""Method to generate a docker snippet to install 1 or more packages
Note: Because conda be so slow to solve environments with conda-forge included, always single line it.
Args:
packages(list(dict)): A list of package names and versions to install
single_line(bool): If true, collapse
Returns:
list
"""
package_strings = [f"{x['name']}={x['version']}" for x in packages]
if single_line:
return [f"RUN conda install -yq {' '.join(package_strings)}"]
else:
return [f"RUN conda install -yq {' '.join(package_strings)}"]
class Conda3PackageManager(CondaPackageManagerBase):
"""Class to implement the conda3 package manager
"""
def __init__(self):
super().__init__()
self.python_depends_str = 'python 3.6*'
self.python_env = 'py36'
class Conda2PackageManager(CondaPackageManagerBase):
"""Class to implement the conda2 package manager
"""
def __init__(self):
super().__init__()
self.python_depends_str = 'python 2.7*'
self.python_env = 'py27'
|
10213
|
import os
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase
from enjoliver import generator
class GenerateGroupTestCase(TestCase):
api_uri = None
test_matchbox_path = None
test_resources_path = None
tests_path = None
@classmethod
def setUpClass(cls):
cls.tests_path = mkdtemp(dir='/tmp')
cls.test_matchbox_path = os.path.join(cls.tests_path, 'test_matchbox')
cls.test_resources_path = os.path.join(cls.tests_path, 'test_resources')
os.mkdir(cls.test_matchbox_path)
os.mkdir(cls.test_resources_path)
os.mkdir(os.path.join(cls.test_matchbox_path, 'groups'))
cls.api_uri = "http://127.0.0.1:5000"
@classmethod
def tearDownClass(cls):
rmtree(cls.tests_path)
class TestGenerateGroups(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
matchbox_path=cls.test_matchbox_path
)
cls.gen.profiles_path = cls.test_resources_path
def test_instantiate_generate_group_with_incorrect_parameters(self):
with self.assertRaises(TypeError):
generator.GenerateGroup()
def test_instantiate_generate_group_with_non_existing_matchbox_path(self):
with self.assertRaises(OSError):
generator.GenerateGroup(
api_uri='foobar',
_id='foo',
name='foo-bar',
profile='foo-bar-baz',
matchbox_path='/foo/bar'
)
def test_instantiate_generate_group(self):
sandbox = mkdtemp(dir='/tmp')
os.mkdir(os.path.join(sandbox, 'groups'))
generator.GenerateGroup(
api_uri='foobar',
_id='foo',
name='foo-bar',
profile='foo-bar-baz',
matchbox_path=sandbox
)
rmtree(sandbox)
def test_00_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {'etcd_initial_cluster': '',
'api_uri': '%s' % self.gen.api_uri,
'ssh_authorized_keys': []}
self.gen._metadata()
self.assertEqual(expect['api_uri'], self.gen._target_data["metadata"]["api_uri"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': '%s' % self.gen.api_uri,
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy'
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="etcd-proxy.yaml",
matchbox_path=self.test_matchbox_path
)
result = new.generate()
self.assertEqual(expect["profile"], result["profile"])
self.assertEqual(expect["id"], result["id"])
self.assertEqual(expect["name"], result["name"])
self.assertEqual(expect["metadata"]["api_uri"], result["metadata"]["api_uri"])
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id=_id,
name="etcd-test",
profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
self.assertFalse(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id=_id,
name="etcd-test",
profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"one": "selector"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsSelectorLower(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=cls.test_matchbox_path
)
def test_00_api_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {
'api_uri': "%s" % self.gen.api_uri,
'ssh_authorized_keys': []
}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy", name="etcd-proxy", profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path)
result = new.generate()
result["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsSelectorUpper(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2E"},
matchbox_path=cls.test_matchbox_path
)
def test_00_ip_address(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {
'api_uri': "%s" % self.gen.api_uri,
'ssh_authorized_keys': []
}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': "%s" % self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri, _id="etcd-proxy",
name="etcd-proxy",
profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path
)
result = new.generate()
result["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
new.dump()
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsExtraMetadata(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2E"},
metadata={"etcd_initial_cluster": "static0=http://192.168.1.1:2379",
"api_seed": "http://192.168.1.2:5000"},
matchbox_path=cls.test_matchbox_path
)
def test_00_api_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {'etcd_initial_cluster': 'static0=http://192.168.1.1:2379',
'api_uri': "%s" % self.gen.api_uri,
'api_seed': 'http://192.168.1.2:5000',
'ssh_authorized_keys': []}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': "%s" % self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy", name="etcd-proxy", profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path
)
result = new.generate()
result["metadata"]["ssh_authorized_keys"] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
self.assertTrue(new.dump())
for i in range(10):
self.assertFalse(new.dump())
new.api_uri = "http://google.com"
self.assertTrue(new.dump())
self.assertFalse(new.dump())
|
10243
|
from itertools import chain
from django.conf import settings
from django.contrib.gis.db import models as gis_models
from django.db import models, router, transaction
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from ..fields import CleaningJsonField
from ..validators import DictListValidator, TextField, TimestampField
from .constants import GK25FIN_SRID
from .enforcement_domain import EnforcementDomain
from .mixins import TimestampedModelMixin
from .parking import Parking
class PermitArea(TimestampedModelMixin):
name = models.CharField(max_length=40, verbose_name=_('name'))
domain = models.ForeignKey(
EnforcementDomain, on_delete=models.PROTECT,
related_name='permit_areas')
identifier = models.CharField(max_length=10, verbose_name=_('identifier'))
geom = gis_models.MultiPolygonField(
srid=GK25FIN_SRID, verbose_name=_('geometry'))
permitted_user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name=_("permitted_user"))
class Meta:
unique_together = [('domain', 'identifier')]
ordering = ('identifier',)
def __str__(self):
return '{}/{}: {}'.format(self.domain.code, self.identifier, self.name)
class PermitSeriesQuerySet(models.QuerySet):
def active(self):
return self.filter(active=True)
def latest_active(self):
return self.active().order_by('-modified_at').first()
def prunable(self, time_limit=None):
limit = time_limit or (
timezone.now() - settings.PARKKIHUBI_PERMITS_PRUNABLE_AFTER)
return self.filter(created_at__lt=limit, active=False)
class PermitSeries(TimestampedModelMixin, models.Model):
active = models.BooleanField(default=False)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name=_("owner"))
objects = PermitSeriesQuerySet.as_manager()
class Meta:
ordering = ('created_at', 'id')
verbose_name = _("permit series")
verbose_name_plural = _("permit series")
@classmethod
def delete_prunable_series(cls, time_limit=None):
prunable = cls.objects.prunable(time_limit)
Permit.objects.filter(series__in=prunable).delete()
prunable.delete()
def __str__(self):
return str(self.id)
class PermitQuerySet(models.QuerySet):
def active(self):
return self.filter(series__active=True)
def by_time(self, timestamp):
lookup_items = PermitLookupItem.objects.by_time(timestamp)
return self.filter(lookup_items__in=lookup_items).distinct()
def by_subject(self, registration_number):
lookup_items = PermitLookupItem.objects.by_subject(registration_number)
return self.filter(lookup_items__in=lookup_items).distinct()
def by_area(self, area):
lookup_items = PermitLookupItem.objects.by_area(area)
return self.filter(lookup_items__in=lookup_items).distinct()
def bulk_create(self, permits, *args, **kwargs):
for permit in permits:
assert isinstance(permit, Permit)
permit.full_clean()
with transaction.atomic(using=self.db, savepoint=False):
created_permits = super().bulk_create(permits, *args, **kwargs)
PermitLookupItem.objects.using(self.db).bulk_create(
chain(*(x._make_lookup_items() for x in created_permits)))
return created_permits
class Permit(TimestampedModelMixin, models.Model):
domain = models.ForeignKey(
EnforcementDomain, on_delete=models.PROTECT,
related_name='permits')
series = models.ForeignKey(PermitSeries, on_delete=models.PROTECT)
external_id = models.CharField(max_length=50, null=True, blank=True)
subjects = CleaningJsonField(blank=True, validators=[DictListValidator({
'start_time': TimestampField(),
'end_time': TimestampField(),
'registration_number': TextField(max_length=20),
})])
areas = CleaningJsonField(blank=True, validators=[DictListValidator({
'start_time': TimestampField(),
'end_time': TimestampField(),
'area': TextField(max_length=10),
})])
objects = PermitQuerySet.as_manager()
class Meta:
unique_together = [('series', 'external_id')]
indexes = [
models.Index(fields=['series', 'id']),
]
ordering = ('series', 'id')
def __str__(self):
return 'Permit {id} ({series}{active}/{external_id} {dom})'.format(
id=self.id,
dom=self.domain.code,
series=self.series,
active='*' if self.series.active else '',
external_id=self.external_id)
def save(self, using=None, *args, **kwargs):
self.full_clean()
using = using or router.db_for_write(type(self), instance=self)
with transaction.atomic(using=using, savepoint=False):
super(Permit, self).save(using=using, *args, **kwargs)
self.lookup_items.all().using(using).delete()
new_lookup_items = self._make_lookup_items()
PermitLookupItem.objects.using(using).bulk_create(new_lookup_items)
def _make_lookup_items(self):
for area in self.areas:
for subject in self.subjects:
max_start_time = max(subject['start_time'], area['start_time'])
min_end_time = min(subject['end_time'], area['end_time'])
if max_start_time >= min_end_time:
continue
yield PermitLookupItem(
permit=self,
registration_number=Parking.normalize_reg_num(
subject['registration_number']),
area=PermitArea.objects.get(identifier=area['area'], domain=self.domain),
start_time=max_start_time,
end_time=min_end_time
)
class PermitLookupItemQuerySet(models.QuerySet):
def active(self):
return self.filter(permit__series__active=True)
def by_time(self, timestamp):
return self.filter(start_time__lte=timestamp, end_time__gte=timestamp)
def by_subject(self, registration_number):
normalized_reg_num = Parking.normalize_reg_num(registration_number)
return self.filter(registration_number=normalized_reg_num)
def by_area(self, area):
return self.filter(area=area)
class PermitLookupItem(models.Model):
permit = models.ForeignKey(
Permit, related_name="lookup_items", on_delete=models.CASCADE)
registration_number = models.CharField(max_length=20)
area = models.ForeignKey(PermitArea, on_delete=models.PROTECT, default=None, null=True, blank=True)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
objects = PermitLookupItemQuerySet.as_manager()
class Meta:
indexes = [
models.Index(fields=[
'registration_number', 'start_time', 'end_time',
'area', 'permit']),
]
ordering = ('registration_number', 'start_time', 'end_time')
def __str__(self):
return (
'{start_time:%Y-%m-%d %H:%M} -- {end_time:%Y-%m-%d %H:%M} / '
'{registration_number} / {area}'
).format(
start_time=self.start_time, end_time=self.end_time,
registration_number=self.registration_number,
area=self.area.identifier)
|
10251
|
from builtins import str
from builtins import range
from builtins import object
import logging
import inspect
import os
class CustomAttr(object):
"""This type handles non-flat data-types like
int, str, bool.
"""
def __init__(self, key, value):
self._value = value
self._key = key
def validate(self):
pass
def post_validation(self):
pass
class CustomAttrTlsContainer(CustomAttr):
def __init__(self, key, value):
super(CustomAttrTlsContainer, self).__init__(key, value)
def validate(self):
return True
def post_validation(self):
return self._value
def validate_custom_attributes(custom_attributes_dict, section,
custom_attributes):
section_dict = {}
if custom_attributes and section in custom_attributes_dict:
for key, value in list(custom_attributes.items()):
if key in custom_attributes_dict[section]:
#Sanitize the value
try:
type_attr = custom_attributes_dict[section][key]['type']
limits = custom_attributes_dict[section][key]['limits']
if type_attr == 'int':
value = int(value)
if value in range(limits[0], limits[1]):
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif type_attr == 'str':
if len(value) in range(limits[0], limits[1]):
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif type_attr == 'bool':
if value in limits:
if value == 'True':
value = ''
elif value == 'False':
value = 'no '
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif inspect.isclass(eval(type_attr)):
new_custom_attr = eval(type_attr)(key, value)
if new_custom_attr.validate():
value = new_custom_attr.post_validation()
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
except Exception as e:
logging.error(str(e))
continue
return section_dict
|
10287
|
import pytest
from flask_resty import Api
from flask_resty.testing import assert_response
# -----------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def routes(app):
api = Api(app, "/api")
api.add_ping("/ping")
# -----------------------------------------------------------------------------
def test_ping(base_client):
response = base_client.get("/ping")
assert_response(response, 200)
assert response.get_data(as_text=True) == ""
|
10345
|
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
concat,
)
import pandas._testing as tm
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
result = getattr(frame.ewm(span=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov(name):
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=5), name)(B)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
@pytest.mark.parametrize("min_periods", [0, 1, 2])
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov_min_periods(name, min_periods):
# GH 7898
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
tm.assert_series_equal(result, empty)
# check series of length 1
result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
Series([1.0])
)
tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_different_input_array_raise_exception(name):
A = Series(np.random.randn(50), index=np.arange(50))
A[:10] = np.NaN
msg = "other must be a DataFrame or Series"
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
def create_mock_weights(obj, com, adjust, ignore_na):
if isinstance(obj, DataFrame):
if not len(obj.columns):
return DataFrame(index=obj.index, columns=obj.columns)
w = concat(
[
create_mock_series_weights(
obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na
)
for i, _ in enumerate(obj.columns)
],
axis=1,
)
w.index = obj.index
w.columns = obj.columns
return w
else:
return create_mock_series_weights(obj, com, adjust, ignore_na)
def create_mock_series_weights(s, com, adjust, ignore_na):
w = Series(np.nan, index=s.index)
alpha = 1.0 / (1.0 + com)
if adjust:
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1.0 / (1.0 - alpha), count)
count += 1
elif not ignore_na:
count += 1
else:
sum_wts = 0.0
prev_i = -1
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.0
else:
w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, count - prev_i)
sum_wts += w.iat[i]
prev_i = count
count += 1
elif not ignore_na:
count += 1
return w
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
result = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
expected = (
x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill")
)
expected[
x.expanding().count() < (max(min_periods, 1) if min_periods else 1)
] = np.nan
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding().count()
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_var_debiasing_factors(
consistency_data, adjust, ignore_na, min_periods
):
x, is_constant, no_nans = consistency_data
com = 3.0
# check variance debiasing factors
var_unbiased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=False)
var_biased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=True)
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method="ffill")
cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill")
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.0] = np.nan
var_debiasing_factors_x = numerator / denominator
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
if bias:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (
(x * x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.mean()
)
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var_constant(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if not bias:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
cov_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_series_cov_corr(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if isinstance(x, Series):
var_x_plus_y = (
(x + x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.var(bias=bias)
)
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
var_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
cov_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x, bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
std_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if bias:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_x_times_y = (
(x * x)
.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
)
.mean()
)
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
|
10391
|
import unittest
from freeplane_importer.importer import Importer
from mock import Mock
from mock import MagicMock
from mock import call
from freeplane_importer.model_not_found_exception import ModelNotFoundException
class TestImporter(unittest.TestCase):
def setUp(self):
self.mock_collection = Mock()
self.mock_model = MagicMock()
self.mock_collection.models.byName.return_value = self.mock_model
self.mock_note = MagicMock()
self.mock_note.model.return_value = self.mock_model
self.mock_collection.newNote.return_value = self.mock_note
self.mock_collection.models.fieldNames.return_value = []
self.importer = Importer(self.mock_collection)
self.mock_collection.db.scalar.return_value = None
self.note = {
'id': 100,
'deck': 'History',
'model': 'Basic',
'fields': {}
}
def test_it_should_initialise_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.setCurrent.assert_called_with(
self.mock_model)
def test_it_should_select_the_correct_deck(self):
self.mock_collection.decks.id.return_value = 100
self.importer = Importer(self.mock_collection)
self.importer.import_note(self.note)
self.mock_model.__setitem__.assert_called_with('did', 100)
self.mock_collection.decks.id.assert_called_with('History')
def test_it_should_find_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.byName.assert_called_with('Basic')
def test_it_should_return_true_if_note_was_added_successfully(self):
self.assertTrue(self.importer.import_note(self.note))
def test_it_should_raise_a_no_model_exception_if_the_model_does_not_exist(self):
self.mock_collection.models.byName.return_value = None
self.assertRaises(ModelNotFoundException,
self.importer.import_note, self.note)
def test_it_should_create_a_new_note(self):
self.importer.import_note(self.note)
self.mock_collection.newNote.assert_called_with()
def test_it_should_get_the_field_names_from_the_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.fieldNames.assert_called_with(
self.mock_model)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_lowercase(self):
self.mock_collection.models.fieldNames.return_value = ['id']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('id', 100)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_uppercase(self):
self.mock_collection.models.fieldNames.return_value = ['ID']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('ID', 100)
def test_it_should_populate_the_note_with_the_field_values(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front', 'Back']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_has_calls(
[call('Front', 'Front value'), call('Back', 'Back value')])
def test_it_should_ignore_fields_that_do_not_exist_in_the_model(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front']
self.importer.import_note(self.note)
self.assertFalse('Back' in self.mock_note)
def test_it_should_save_the_note_changes(self):
self.importer.import_note(self.note)
self.mock_note.flush.assert_called_with()
def test_it_should_attempt_to_find_an_existing_note_with_the_given_node_id(self):
self.mock_collection.getNote.return_value = self.mock_note
self.mock_collection.db.scalar.return_value = 123
self.importer.import_note(self.note)
self.mock_collection.getNote.assert_called_with(123)
def test_it_should_add_the_note_to_the_collection_if_it_is_new(self):
del self.mock_note.mod
self.importer.import_note(self.note)
self.mock_collection.addNote.assert_called_with(self.mock_note)
def test_it_should_not_add_the_note_to_the_collection_if_it_is_not_new(self):
self.importer.import_note(self.note)
self.assertEqual(0, self.mock_collection.addNote.call_count)
|
10400
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy
def run():
workspace = os.path.join("lake")
# make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--pdf":
fext = "pdf"
# save the starting path
cwdpth = os.getcwd()
# change to the working directory
os.chdir(workspace)
# We are creating a square model with a specified head equal to `h1` along all boundaries.
# The head at the cell in the center in the top layer is fixed to `h2`. First, set the name
# of the model and the parameters of the model: the number of layers `Nlay`, the number of rows
# and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic
# conductivity `Kh`
name = "lake_example"
h1 = 100
h2 = 90
Nlay = 10
N = 101
L = 400.0
H = 50.0
Kh = 1.0
# Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it
# whatever you want). The modelname will be the name given to all MODFLOW files (input and output).
# The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k'
# for MODFLOW2000 or 'mf2005'for MODFLOW2005.
ml = flopy.modflow.Modflow(
modelname=name, exe_name="mf2005", version="mf2005"
)
# Define the discretization of the model. All layers are given equal thickness. The `bot` array
# is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and
# `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed,
# the Discretization file is built.
bot = np.linspace(-H / Nlay, -H, Nlay)
delrow = delcol = L / (N - 1)
dis = flopy.modflow.ModflowDis(
ml,
nlay=Nlay,
nrow=N,
ncol=N,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
laycbd=0,
)
# Next we specify the boundary conditions and starting heads with the Basic package. The `ibound`
# array will be `1` in all cells in all layers, except for along the boundary and in the cell at
# the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads
# are used to define the heads in the fixed head cells (this is a steady simulation, so none of
# the other starting values matter). So we set the starting heads to `h1` everywhere, except for
# the head at the center of the model in the top layer.
Nhalf = int((N - 1) / 2)
ibound = np.ones((Nlay, N, N), dtype=int)
ibound[:, 0, :] = -1
ibound[:, -1, :] = -1
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
ibound[0, Nhalf, Nhalf] = -1
start = h1 * np.ones((N, N))
start[Nhalf, Nhalf] = h2
# create external ibound array and starting head files
files = []
hfile = f"{name}_strt.ref"
np.savetxt(hfile, start)
hfiles = []
for kdx in range(Nlay):
file = f"{name}_ib{kdx + 1:02d}.ref"
files.append(file)
hfiles.append(hfile)
np.savetxt(file, ibound[kdx, :, :], fmt="%5d")
bas = flopy.modflow.ModflowBas(ml, ibound=files, strt=hfiles)
# The aquifer properties (really only the hydraulic conductivity) are defined with the
# LPF package.
lpf = flopy.modflow.ModflowLpf(ml, hk=Kh)
# Finally, we need to specify the solver we want to use (PCG with default values), and the
# output control (using the default values). Then we are ready to write all MODFLOW input
# files and run MODFLOW.
pcg = flopy.modflow.ModflowPcg(ml)
oc = flopy.modflow.ModflowOc(ml)
ml.write_input()
ml.run_model()
# change back to the starting directory
os.chdir(cwdpth)
# Once the model has terminated normally, we can read the heads file. First, a link to the heads
# file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by
# specifying, in this case, the step number and period number for which we want to retrieve data.
# A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions
# are used to make contours of the layers or a cross-section.
hds = flopy.utils.HeadFile(os.path.join(workspace, f"{name}.hds"))
h = hds.get_data(kstpkper=(0, 0))
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[0], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%2.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake1.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[-1], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%1.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake2.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
z = np.linspace(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)
c = plt.contour(x, z, h[:, 50, :], np.arange(90, 100.1, 0.2))
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake3.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
|
10423
|
import os
from functools import partial
from io import BytesIO
import numpy as np
import PIL.Image
import scipy.misc
import tensorflow as tf
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(tf.float32, name="input")
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {"input": t_preprocessed})
def load_inception():
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# 定义t_input为我们输入的图像
t_input = tf.placeholder(np.float32, name='input')
imagenet_mean = 117.0
# 输入图像需要经过处理才能送入网络中
# expand_dims是加一维,从[height, width, channel]变成[1, height, width, channel]
# t_input - imagenet_mean是减去一个均值
t_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input': t_preprocessed})
# 找到所有卷积层
layers = [op.name for op in graph.get_operations() if op.type ==
"Conv2D" and "import/" in op.name]
# 输出卷积层层数
print('Number of layers', len(layers))
# 特别地,输出mixed4d_3x3_bottleneck_pre_relu的形状
name = 'mixed4d_3x3_bottleneck_pre_relu'
print('shape of %s: %s' %(name, str(graph.get_tensor_by_name('import/' + name + ':0').get_shape())))
def savearray(img_array, img_name):
scipy.misc.toimage(img_array).save(img_name)
print('img saved: %s' % img_name)
def visstd(a, s=0.1):
return (a-a.mean())/max(a.std(), 1e-4)*s+0.5
def resize_ratio(img, ratio):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, ratio))
img = img / 255 * (max - min) + min
return img
def resize(img, hw):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, hw))
img = img / 255 * (max - min) + min
return img
def calc_grad_tiled(img, t_grad, tile_size=512):
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0) # 先在行上做整体移动,再在列上做整体移动
grad = np.zeros_like(img)
for y in range(0, max(h - sz // 2, sz), sz):
for x in range(0, max(w - sz // 2, sz), sz):
sub = img_shift[y:y + sz, x:x + sz]
g = sess.run(t_grad, {t_input: sub})
grad[y:y + sz, x:x + sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
k = np.float32([1, 4, 6, 4, 1])
k = np.outer(k, k)
k5x5 = k[:, :, None, None] / k.sum() * np.eye(3, dtype=np.float32)
# 将拉普拉斯金字塔还原到原始图像
def lap_merge(levels):
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5 * 4, tf.shape(hi), [1, 2, 2, 1]) + hi
return img
# 对img做标准化。
def normalize_std(img, eps=1e-10):
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img / tf.maximum(std, eps)
# 拉普拉斯金字塔标准化
def lap_normalize(img, scale_n=4):
img = tf.expand_dims(img, 0)
tlevels = lap_split_n(img, scale_n)
# 每一层都做一次normalize_std
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0, :, :, :]
# 这个函数将图像分为低频和高频成分
def lap_split(img):
with tf.name_scope('split'):
# 做过一次卷积相当于一次“平滑”,因此lo为低频成分
lo = tf.nn.conv2d(img, k5x5, [1, 2, 2, 1], 'SAME')
# 低频成分放缩到原始图像一样大小得到lo2,再用原始图像img减去lo2,就得到高频成分hi
lo2 = tf.nn.conv2d_transpose(lo, k5x5 * 4, tf.shape(img), [1, 2, 2, 1])
hi = img - lo2
return lo, hi
# 这个函数将图像img分成n层拉普拉斯金字塔
def lap_split_n(img, n):
levels = []
for i in range(n):
# 调用lap_split将图像分为低频和高频部分
# 高频部分保存到levels中
# 低频部分再继续分解
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def tffunc(*argtypes):
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
def render_deepdream(img0, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
name = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139
t_obj = graph.get_tensor_by_name("import/%s:0" % name)
t_score = tf.reduce_mean(t_obj)
t_grad = tf.gradients(t_score, t_input)[0]
lap_n=4
# 将lap_normalize转换为正常函数
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0
# 同样将图像进行金字塔分解
# 此时提取高频、低频的方法比较简单。直接缩放就可以
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw) / octave_scale))
hi = img - resize(lo, hw)
img = lo
octaves.append(hi)
# 先生成低频的图像,再依次放大并加上高频
for octave in range(octave_n):
if octave > 0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2]) + hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g * (step / (np.abs(g).mean() + 1e-7))
# 唯一的区别在于我们使用lap_norm_func来标准化g!
# g = lap_norm_func(g)
# img += g * step
print('.', end=' ')
img = img.clip(0, 255)
savearray(img, './predict_img/deepdream.jpg')
if __name__ == '__main__':
img0 = PIL.Image.open('./images/test.jpg')
img0 = np.float32(img0)
render_deepdream(img0)
|
10470
|
from flask import Flask, current_app
from flask import render_template
from flask import jsonify
from jieba.analyse import extract_tags
import string
from DB import chinaSQL
from DB import worldSQL
app = Flask(__name__, template_folder='../../web', static_folder='../../static')
@app.route('/', methods=["get", "post"])
def hello_world():
return render_template("china.html")
@app.route('/china', methods=["get", "post"])
def china():
return render_template("china.html")
@app.route('/world', methods=["get", "post"])
def world():
return render_template("world.html")
@app.route('/favicon.ico')
def favicon():
return current_app.send_static_file('image/favicon-32x32-sun.ico')
@app.route("/time")
def time():
data = chinaSQL.time()
return str(data[0])
@app.route("/chinaEightNumber")
def chinaEightNumber():
data = chinaSQL.chinaEightNumber()
return jsonify({"confirmTotal": data[0],
"healTotal": data[1],
"deadTotal": data[2],
"nowConfirmTotal": data[3],
"suspectTotal": data[4],
"nowSevereTotal": data[5],
"importedCaseTotal": data[6],
"noInfectTotal": data[7],
"confirmAdd": data[8],
"healAdd": data[9],
"deadAdd": data[10],
"nowConfirmAdd": data[11],
"suspectAdd": data[12],
"nowSevereAdd": data[13],
"importedCaseAdd": data[14],
"noInfectAdd": data[15]
})
@app.route('/chinaMap', methods=['GET'])
def chinaMap():
data = chinaSQL.chinaMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a, "value": b})
nowConfirmTotal.append({"name": a, "value": c})
confirmTotal.append({"name": a, "value": d})
healTotal.append({"name": a, "value": e})
deadTotal.append({"name": a, "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route('/chinaProvinceMap', methods=['GET'])
def chinaProvinceMap():
data = chinaSQL.chinaProvinceMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a + "市", "value": b})
nowConfirmTotal.append({"name": a + "市", "value": c})
confirmTotal.append({"name": a + "市", "value": d})
healTotal.append({"name": a + "市", "value": e})
deadTotal.append({"name": a + "市", "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route("/nationalTotal")
def nationalTotal():
data = chinaSQL.nationalTotal()
day, \
confirmChinaDayList, \
healChinaDayList, \
deadChinaDayList, \
importedCaseChinaDayList = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirmChinaDayList.append(b)
healChinaDayList.append(c)
deadChinaDayList.append(d)
importedCaseChinaDayList.append(e)
return jsonify({"day": day,
"confirmChinaDayList": confirmChinaDayList,
"healChinaDayList": healChinaDayList,
"deadChinaDayList": deadChinaDayList,
"importedCaseChinaDayList": importedCaseChinaDayList
})
@app.route("/dailyAdditionsNationwide")
def dailyAdditionsNationwide():
data = chinaSQL.dailyAdditionsNationwide()
day, \
confirmChinaDayAddList, \
healChinaDayAddList, \
deadChinaDayAddList, \
importedCaseChinaDayAddList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
confirmChinaDayAddList.append(b)
healChinaDayAddList.append(c)
deadChinaDayAddList.append(d)
importedCaseChinaDayAddList.append(e)
return jsonify({"day": day,
"confirmChinaDayAddList": confirmChinaDayAddList,
"healChinaDayAddList": healChinaDayAddList,
"deadChinaDayAddList": deadChinaDayAddList,
"importedCaseChinaDayAddList": importedCaseChinaDayAddList
})
@app.route("/dailyCasesNationwide")
def dailyCasesNationwide():
data = chinaSQL.dailyCasesNationwide()
day, \
suspectChinaDayList, \
noInfectChinaDayList, \
nowConfirmChinaDayList, \
nowSevereChinaDayList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
suspectChinaDayList.append(b)
noInfectChinaDayList.append(c)
nowConfirmChinaDayList.append(d)
nowSevereChinaDayList.append(e)
return jsonify({"day": day,
"suspectChinaDayList": suspectChinaDayList,
"noInfectChinaDayList": noInfectChinaDayList,
"nowConfirmChinaDayList": nowConfirmChinaDayList,
"nowSevereChinaDayList": nowSevereChinaDayList
})
@app.route("/nationalCumulativeCureMortalityRate")
def nationalCumulativeCureMortalityRate():
data = chinaSQL.nationalCumulativeCureMortalityRate()
day, \
healRateChinaDayList, \
deadRateChinaDayList = [], [], []
for a, b, c in data[7:]:
day.append(a.strftime("%m-%d"))
healRateChinaDayList.append(b)
deadRateChinaDayList.append(c)
return jsonify({"day": day,
"healRateChinaDayList": healRateChinaDayList,
"deadRateChinaDayList": deadRateChinaDayList
})
@app.route("/detailedDataByProvince")
def detailedDataByProvince():
data = chinaSQL.detailedDataByProvince()
provinceName, \
confirmTotal, \
healTotal, \
deadTotal, \
healRateTotal, \
deadRateTotal = [], [], [], [], [], []
for a, b, c, d, e, f in data:
provinceName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
healRateTotal.append(e)
deadRateTotal.append(f)
return jsonify({"provinceName": provinceName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal,
"healRateTotal": healRateTotal,
"deadRateTotal": deadRateTotal
})
@app.route("/cumulativeNumberOfConfirmedCasesInAllProvinces")
def cumulativeNumberOfConfirmedCasesInAllProvinces():
data = chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces()
provincedetails = []
for provinceName, confirmTotal in data:
provincedetails.append({"name": provinceName, "value": confirmTotal})
return jsonify({"data": provincedetails})
@app.route("/currentConfirmedDataInAllProvinces")
def currentConfirmedDataInAllProvinces():
data = chinaSQL.currentConfirmedDataInAllProvinces()
provinceName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
provinceName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"provinceName": provinceName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/existingDiagnosticClassificationInChina")
def existingDiagnosticClassificationInChina():
data = chinaSQL.existingDiagnosticClassificationInChina()
nowconfirmstatis = []
nowconfirmstatis.append({"name": '港澳台现存确诊', "value": data[0][0]})
nowconfirmstatis.append({"name": '境外输入现存确诊', "value": data[0][1]})
nowconfirmstatis.append({"name": '31省本土现有确诊', "value": data[0][2]})
return jsonify({"data": nowconfirmstatis})
@app.route("/totalNumberOfOverseasImportsFromTop10Provinces")
def totalNumberOfOverseasImportsFromTop10Provinces():
data = chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces()
importstatis = []
for province, importedCase in data:
importstatis.append({"name": province, "value": importedCase})
return jsonify({"data": importstatis})
@app.route("/eachProvinceComparesYesterdayData")
def eachProvinceComparesYesterdayData():
data = chinaSQL.eachProvinceComparesYesterdayData()
province, \
nowConfirm, \
confirmAdd, \
heal, \
dead, \
zero = [], [], [], [], [], []
for a, b, c, d, e, f in data:
province.append(a)
nowConfirm.append(b)
confirmAdd.append(c)
heal.append(d)
dead.append(e)
zero.append(f)
return jsonify({"province": province,
"nowConfirm": nowConfirm,
"confirmAdd": confirmAdd,
"heal": heal,
"dead": dead,
"zero": zero
})
@app.route("/hubeiNonHubeiNationalCumulativeData")
def hubeiNonHubeiNationalCumulativeData():
data = chinaSQL.hubeiNonHubeiNationalCumulativeData()
day, \
hubeiNowConfirm, \
hubeiHeal, \
hubeiDead, \
notHubeiNowConfirm, \
notHubeiHeal, \
notHubeiDead, \
countryNowConfirm, \
countryHeal, \
countryDead = [], [], [], [], [], [], [], [], [], []
for a, b, c, d, e, f, g, h, i, j in data:
day.append(a.strftime("%m-%d"))
hubeiNowConfirm.append(b)
hubeiHeal.append(c)
hubeiDead.append(d)
notHubeiNowConfirm.append(e)
notHubeiHeal.append(f)
notHubeiDead.append(g)
countryNowConfirm.append(h)
countryHeal.append(i)
countryDead.append(j)
return jsonify({"day": day,
"hubeiNowConfirm": hubeiNowConfirm,
"hubeiHeal": hubeiHeal,
"hubeiDead": hubeiDead,
"notHubeiNowConfirm": notHubeiNowConfirm,
"notHubeiHeal": notHubeiHeal,
"notHubeiDead": notHubeiDead,
"countryNowConfirm": countryNowConfirm,
"countryHeal": countryHeal,
"countryDead": countryDead
})
@app.route("/hubeiNonHubeiNationalCureMortalityRate")
def hubeiNonHubeiNationalCureMortalityRate():
data = chinaSQL.hubeiNonHubeiNationalCureMortalityRate()
day, \
hubeiHealRate, \
hubeiDeadRate, \
notHubeiHealRate, \
notHubeiDeadRate, \
countryHealRate, \
countryDeadRate = [], [], [], [], [], [], []
for a, b, c, d, e, f, g in data:
day.append(a.strftime("%m-%d"))
hubeiHealRate.append(b)
hubeiDeadRate.append(c)
notHubeiHealRate.append(d)
notHubeiDeadRate.append(e)
countryHealRate.append(f)
countryDeadRate.append(g)
return jsonify({"day": day,
"hubeiHealRate": hubeiHealRate,
"hubeiDeadRate": hubeiDeadRate,
"notHubeiHealRate": notHubeiHealRate,
"notHubeiDeadRate": notHubeiDeadRate,
"countryHealRate": countryHealRate,
"countryDeadRate": countryDeadRate
})
@app.route("/hubeiNonHubeiNationalDailyNew")
def hubeiNonHubeiNationalDailyNew():
data = chinaSQL.hubeiNonHubeiNationalDailyNew()
day, \
hubei, \
notHubei, \
country = [], [], [], []
for a, b, c, d in data[7:]:
day.append(a.strftime("%m-%d"))
hubei.append(b)
notHubei.append(c)
country.append(d)
return jsonify({"day": day,
"hubei": hubei,
"notHubei": notHubei,
"country": country
})
@app.route("/wuhanNotWuhanNotHubeiNewlyConfirmed")
def wuhanNotWuhanNotHubeiNewlyConfirmed():
data = chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed()
day, \
wuhan, \
notWuhan, \
notHubei = [], [], [], []
for a, b, c, d in data:
day.append(a.strftime("%m-%d"))
wuhan.append(b)
notWuhan.append(c)
notHubei.append(d)
return jsonify({"day": day,
"wuhan": wuhan,
"notWuhan": notWuhan,
"notHubei": notHubei
})
@app.route("/totalConfirmedTop20UrbanAreas")
def totalConfirmedTop20UrbanAreas():
data = chinaSQL.totalConfirmedTop20UrbanAreas()
cityName, \
deadRateTotal, \
healRateTotal = [], [], []
for a, b, c in data:
cityName.append(a)
deadRateTotal.append(b)
healRateTotal.append(c)
return jsonify({"cityName": cityName,
"deadRateTotal": deadRateTotal,
"healRateTotal": healRateTotal
})
@app.route("/existingConfirmedTop20UrbanAreas")
def existingConfirmedTop20UrbanAreas():
data = chinaSQL.existingConfirmedTop20UrbanAreas()
cityName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"cityName": cityName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/urbanDataOfHubeiProvince")
def urbanDataOfHubeiProvince():
data = chinaSQL.urbanDataOfHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/accumulativeDataExceptHubeiProvince")
def accumulativeDataExceptHubeiProvince():
data = chinaSQL.accumulativeDataExceptHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/provincesWithFatalCasesNationwide")
def provincesWithFatalCasesNationwide():
data = chinaSQL.provincesWithFatalCasesNationwide()
provincedetails = []
provincedetails.append({"name": "无死亡病例省份数量", "value": data[0][0]})
provincedetails.append({"name": "有死亡病例省份数量", "value": data[0][1]})
return jsonify({"data": provincedetails})
@app.route("/numberOfDeathsInCities")
def numberOfDeathsInCities():
data = chinaSQL.numberOfDeathsInCities()
dataCityCount = []
dataCityCount.append({"name": "无死亡病例城市数量", "value": data[0][0]})
dataCityCount.append({"name": "有死亡病例城市数量", "value": data[0][1]})
return jsonify({"data": dataCityCount})
@app.route("/outbreakOut")
def outbreakOut():
data = chinaSQL.outbreakOut()
d = []
for i in data:
k = i[0].rstrip(string.digits)
v = i[0][len(k):]
ks = extract_tags(k)
for j in ks:
if not j.isdigit():
d.append({"name": j, "value": v})
return jsonify({"kws": d})
@app.route("/worldFourNumber")
def worldFourNumber():
data = worldSQL.worldFourNumber()
return jsonify({"nowConfirm": data[0],
"confirm": data[1],
"heal": data[2],
"dead": data[3],
"nowConfirmAdd": data[4],
"confirmAdd": data[5],
"healAdd": data[6],
"deadAdd": data[7]
})
@app.route('/worldMapNoChina', methods=['GET'])
def worldMapNoChina():
data = worldSQL.worldMapNoChina()
nowConfirm, confirm, heal, dead = [], [], [], []
for a, b, c, d, e in data:
nowConfirm.append({"name": a, "value": b})
confirm.append({"name": a, "value": c})
heal.append({"name": a, "value": d})
dead.append({"name": a, "value": e})
data1 = worldSQL.worldMapChina()
nowConfirm.append({"name": "中国", "value": data1[0][0]})
confirm.append({"name": "中国", "value": data1[0][1]})
heal.append({"name": "中国", "value": data1[0][2]})
dead.append({"name": "中国", "value": data1[0][3]})
return jsonify({"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/globalCumulativeTrend")
def globalCumulativeTrend():
data = worldSQL.globalCumulativeTrend()
day, \
confirm, \
heal, \
dead, \
newAddConfirm = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirm.append(b)
heal.append(c)
dead.append(d)
newAddConfirm.append(e)
return jsonify({"day": day,
"confirm": confirm,
"heal": heal,
"dead": dead,
"newAddConfirm": newAddConfirm
})
@app.route("/globalCumulativeCureMortality")
def globalCumulativeCureMortality():
data = worldSQL.globalCumulativeCureMortality()
day, \
healRate, \
deadRate = [], [], []
for a, b, c in data:
day.append(a.strftime("%m-%d"))
healRate.append(b)
deadRate.append(c)
return jsonify({"day": day,
"healRate": healRate,
"deadRate": deadRate
})
@app.route("/foreignCumulativeDiagnosisTop10Countries")
def foreignCumulativeDiagnosisTop10Countries():
data = worldSQL.foreignCumulativeDiagnosisTop10Countries()
name, \
nowConfirm, \
confirm, \
heal, \
dead = [], [], [], [], []
for a, b, c, d, e in data:
name.append(a)
nowConfirm.append(b)
confirm.append(c)
heal.append(d)
dead.append(e)
return jsonify({"name": name,
"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/theTop10CountriesGrewFastestInSevenDays")
def theTop10CountriesGrewFastestInSevenDays():
data = worldSQL.theTop10CountriesGrewFastestInSevenDays()
nation, \
day7, \
day, \
rate = [], [], [], []
for a, b, c, d in data:
nation.append(a)
day7.append(b)
day.append(c)
rate.append(d)
return jsonify({"nation": nation,
"day7": day7,
"day0": day,
"rate": rate
})
@app.route("/overseasCountriesWithMoreThan10000ConfirmedCases")
def overseasCountriesWithMoreThan10000ConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases()
foreignlist = []
for name, confirm in data:
foreignlist.append({"name": name, "value": confirm})
return jsonify({"data": foreignlist})
@app.route("/overseasCountriesWithMoreThan10000HaveBeenConfirmedCases")
def overseasCountriesWithMoreThan10000HaveBeenConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases()
foreignlist = []
for name, nowConfirm in data:
foreignlist.append({"name": name, "value": nowConfirm})
return jsonify({"data": foreignlist})
@app.route("/newCasesInTheTop10CountriesWithin24Hours")
def newCasesInTheTop10CountriesWithin24Hours():
data = worldSQL.newCasesInTheTop10CountriesWithin24Hours()
nationAddConfirm = []
for nation, addConfirm in data:
nationAddConfirm.append({"name": nation, "value": addConfirm})
return jsonify({"data": nationAddConfirm})
@app.route("/theNumberOfForeignCountriesWithConfirmedCases")
def theNumberOfForeignCountriesWithConfirmedCases():
data = worldSQL.theNumberOfForeignCountriesWithConfirmedCases()
foreignlist = []
for continent, count in data:
foreignlist.append({"name": continent, "value": count})
return jsonify({"data": foreignlist})
if __name__ == '__main__':
app.run()
|
10476
|
import os
def create_project(path):
dirs = ['configs', 'module', 'data']
dirs = [os.path.join(path, d) for d in dirs]
for d in dirs:
os.makedirs(d)
train_script = r"""
import ever as er
def train(trainer_name):
trainer = er.trainer.get_trainer(trainer_name)()
trainer.run()
"""
with open(os.path.join(path, 'train.py'), 'w') as f:
f.write(train_script)
print('created project in {}'.format(path))
|
10482
|
import json
import sys
def compatible_loads(json_data):
"""
Function json.loads in python 3.0 - 3.5 can't handle bytes, so this function handle it.
:param json_data:
:return: unicode (str if it's python 3)
"""
if isinstance(json_data, bytes) and (3, 0) <= sys.version_info < (3, 6):
json_data = json_data.decode("utf-8")
return json.loads(json_data)
def get_massage_from_io_error(error):
"""
:param: IOError
:return: error message
"""
if sys.version_info >= (3, 0):
return error.strerror
else:
return error.message
|
10502
|
import unittest
from stringsheet.parser import create_spreadsheet_values
from stringsheet.parser import create_language_sheet_values
from stringsheet.parser import parse_resources
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.resources = parse_resources('test-resources/res')
class CreateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateSpreadsheetValuesTestCase, self).setUp()
self.values = create_spreadsheet_values(self.resources)
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de', 'pl', 'zh-rCN', 'zh-rTW'],
['a_string', '', 'A string', '', '', '', ''],
['partly_added', '', 'Partly added', 'Partly added (de)', '', '',
''],
['string', 'String with comment', 'String', 'String (de)',
'String (pl)', 'String (zh-rCN)', 'String (zh-rTW)'],
['string_2', '', 'String 2', '', '', '', ''],
['array[0]', 'Item comment', 'First', '', '', '', ''],
['array[1]', '', 'Second', '', '', '', ''],
['array_comment[0]', 'Array comment', 'Some item', '', '', '', ''],
['array_comment[1]', 'Array comment', 'More items', '', '', '', ''],
['array_comment[2]', 'Comment', 'More', '', '', '', ''],
['plural{zero}', 'Parent comment', 'Other', '', '', '', ''],
['plural{one}', 'Parent comment', 'One', '', '', '', ''],
['plural{two}', 'Parent comment', 'Other', '', '', '', ''],
['plural{few}', 'Parent comment', 'Other', '', '', '', ''],
['plural{many}', 'Parent comment', 'Other', '', '', '', ''],
['plural{other}', 'Comment', 'Other', '', '', '', ''],
['plurals{zero}', 'Item comment', 'Zero', '', '', '', ''],
['plurals{one}', '', 'One', '', '', '', ''],
['plurals{two}', '', 'Two', '', '', '', ''],
['plurals{few}', '', 'Few', '', '', '', ''],
['plurals{many}', '', 'Many', '', '', '', ''],
['plurals{other}', '', 'Other', '', '', '', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateLanguageSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateLanguageSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'de')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'de'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', 'Partly added (de)'],
['string', 'String with comment', 'String', 'String (de)'],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
class CreateTemplateSpreadsheetValuesTestCase(BaseTestCase):
def setUp(self):
super(CreateTemplateSpreadsheetValuesTestCase, self).setUp()
self.values = create_language_sheet_values(self.resources, 'Template')
def test_rows_are_valid(self):
rows = [
['id', 'comment', 'default', 'language-id'],
['a_string', '', 'A string', ''],
['partly_added', '', 'Partly added', ''],
['string', 'String with comment', 'String', ''],
['string_2', '', 'String 2', ''],
['array[0]', 'Item comment', 'First', ''],
['array[1]', '', 'Second', ''],
['array_comment[0]', 'Array comment', 'Some item', ''],
['array_comment[1]', 'Array comment', 'More items', ''],
['array_comment[2]', 'Comment', 'More', ''],
['plural{zero}', 'Parent comment', 'Other', ''],
['plural{one}', 'Parent comment', 'One', ''],
['plural{two}', 'Parent comment', 'Other', ''],
['plural{few}', 'Parent comment', 'Other', ''],
['plural{many}', 'Parent comment', 'Other', ''],
['plural{other}', 'Comment', 'Other', ''],
['plurals{zero}', 'Item comment', 'Zero', ''],
['plurals{one}', '', 'One', ''],
['plurals{two}', '', 'Two', ''],
['plurals{few}', '', 'Few', ''],
['plurals{many}', '', 'Many', ''],
['plurals{other}', '', 'Other', ''],
]
self.assertEqual(len(rows), len(self.values))
for index, row in enumerate(rows):
self.assertEqual(row, self.values[index])
if __name__ == '__main__':
unittest.main()
|
10504
|
import os, sys
import numpy as np
from sedflow import obs as Obs
from sedflow import train as Train
from provabgs import infer as Infer
from provabgs import models as Models
####################################################
# input
####################################################
sample = sys.argv[1]
itrain = int(sys.argv[2])
nhidden = int(sys.argv[3])
nblocks = int(sys.argv[4])
niter = int(sys.argv[5])
i0 = int(sys.argv[6])
i1 = int(sys.argv[7])
####################################################
# compile NSA failures
####################################################
# u, g, r, i, z, sigma_u, sigma_g, sigma_r, sigma_i, sigma_z, redshift
y_nsa = Obs.load_nsa_data(test_set=False)
igals = np.load('/scratch/network/chhahn/sedflow/nsa_fail/fail.igals.npy')
# convert to flux
y_flux = Train.mag2flux(y_nsa[:,:5])
y_ivar = Train.sigma_mag2flux(y_nsa[:,5:10], y_nsa[:,:5])**-2
y_zred = y_nsa[:,-1]
####################################################
# setup inference
####################################################
# SPS parameter priors
prior_sps = Infer.load_priors([
Infer.UniformPrior(7., 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index
])
# SPS model
m_sps = Models.NMF(burst=True, emulator=True)
def run_mcmc(i_obs):
# desi MCMC object
nsa_mcmc = Infer.nsaMCMC(model=m_sps, prior=prior_sps)
fmcmc = os.path.join('/scratch/network/chhahn/sedflow/nsa_fail',
'mcmc.nsa.%i.hdf5' % i_obs)
if not os.path.isfile(fmcmc):
print('%s running' % os.path.basename(fmcmc))
if not np.all(np.isfinite(y_flux[i_obs])):
print('NaN photometry', y_flux[i_obs])
return None
if not np.all(np.isfinite(y_ivar[i_obs])):
print('NaN ivar', y_ivar[i_obs])
return None
# run MCMC
zeus_chain = nsa_mcmc.run(
bands='sdss', # u, g, r, i, z
photo_obs=y_flux[i_obs],
photo_ivar_obs=y_ivar[i_obs],
zred=y_zred[i_obs],
vdisp=0.,
sampler='zeus',
nwalkers=30,
burnin=0,
opt_maxiter=2000,
niter=niter,
progress=True,
writeout=fmcmc)
else:
print('%s already exists' % os.path.basename(fmcmc))
return None
for i in range(i0, i1+1):
run_mcmc(igals[i])
|
10515
|
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def read_dataset_from_npy(path):
""" Read dataset from .npy file
"""
data = np.load(path, allow_pickle=True)
return data[()]['X'], data[()]['y'], data[()]['train_idx'], data[()]['test_idx']
def read_dataset(ucr_root_dir, dataset_name, shot):
""" Read univariate dataset from UCR
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
y_train = df_train.values[:, 0].astype(np.int64)
y_test = df_test.values[:, 0].astype(np.int64)
y = np.concatenate((y_train, y_test))
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test))
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
X[np.isnan(X)] = 0
std_ = X.std(axis=1, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=1, keepdims=True)) / std_
# add a dimension to make it multivariate with one dimension
X = X.reshape((X.shape[0], 1, X.shape[1]))
return X, y, train_idx, test_idx
def read_multivariate_dataset(root_dir, dataset_name, shot):
""" Read multivariate dataset
"""
X = np.load(os.path.join(root_dir, dataset_name+".npy"), allow_pickle=True)
y = np.loadtxt(os.path.join(root_dir, dataset_name+'_label.txt'))
y = y.astype(np.int64)
dim = X[0].shape[0]
max_length = 0
for _X in X:
if _X.shape[1] > max_length:
max_length = _X.shape[1]
X_list = []
for i in range(len(X)):
_X = np.zeros((dim, max_length))
_X[:, :X[i].shape[1]] = X[i]
X_list.append(_X)
X = np.array(X_list, dtype=np.float32)
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
idx = np.array([i for i in range(len(X))])
np.random.shuffle(idx)
train_idx, test_idx = idx[:int(len(idx)*0.8)], idx[int(len(idx)*0.8):]
tmp = [[] for _ in range(len(np.unique(y)))]
for i in train_idx:
tmp[y[i]].append(i)
train_idx = []
for _tmp in tmp:
train_idx.extend(_tmp[:shot])
# znorm
std_ = X.std(axis=2, keepdims=True)
std_[std_ == 0] = 1.0
X = (X - X.mean(axis=2, keepdims=True)) / std_
return X, y, train_idx, test_idx
def read_X(ucr_root_dir, dataset_name):
""" Read the raw time-series
"""
dataset_dir = os.path.join(ucr_root_dir, dataset_name)
df_train = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TRAIN.tsv'), sep='\t', header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, dataset_name+'_TEST.tsv'), sep='\t', header=None)
X_train = df_train.drop(columns=[0]).astype(np.float32)
X_test = df_test.drop(columns=[0]).astype(np.float32)
X_train.columns = range(X_train.shape[1])
X_test.columns = range(X_test.shape[1])
X_train = X_train.values
X_test = X_test.values
X = np.concatenate((X_train, X_test), axis=0)
return X
class Logger:
def __init__(self, f):
self.f = f
def log(self, content):
print(content)
self.f.write(content + '\n')
self.f.flush()
|
10546
|
from ctypes import c_uint32, c_void_p, string_at
from rotypes.idldsl import define_winrt_com_method, GUID
from rotypes.inspectable import IInspectable, IUnknown
@GUID('905a0fef-bc53-11df-8c49-001e4fc686da')
class IBufferByteAccess(IUnknown):
pass
@GUID('905A0FE0-BC53-11DF-8C49-001E4FC686DA')
class IBuffer(IInspectable):
def __len__(self):
return self.Length
def __bytes__(self):
byteaccess = self.astype(IBufferByteAccess)
ptr = byteaccess.Buffer()
return string_at(ptr, len(self))
define_winrt_com_method(IBufferByteAccess, 'Buffer', retval=c_void_p)
define_winrt_com_method(IBuffer, 'get_Capacity', propget=c_uint32)
define_winrt_com_method(IBuffer, 'get_Length', propget=c_uint32)
define_winrt_com_method(IBuffer, 'put_Length', propput=c_uint32)
|
10550
|
import csv
import json
import pickle
import logging
import re
import pandas
import gzip
import os
import numpy as np
from random import randint, random
from tqdm import tqdm
from retriever.dense_retriever import DenseRetriever
from models.tokenization import tokenize
from typing import Union, List
class InputExample:
"""
Structure for one input example with texts, the label and a unique id
"""
def __init__(self, guid: str, texts: List[str], label: Union[int, float]):
"""
Creates one InputExample with the given texts, guid and label
str.strip() is called on both texts.
:param guid
id for the example
:param texts
the texts for the example
:param label
the label for the example
"""
self.guid = guid
self.texts = [text.strip() for text in texts]
self.label = label
def get_texts(self):
return self.texts
def get_label(self):
return self.label
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def get_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
guid = "%s-%d" % (filename, id)
id += 1
if label == 'entailment':
label = 0
elif label == 'contradiction':
label = 1
else:
label = 2
examples.append(InputExample(guid=guid,
texts=[sample['s1'], sample['s2']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def get_qa_examples(filename, max_examples=0, dev=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['relevant']
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if not dev:
if label == 1:
for _ in range(13):
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=label))
if 0 < max_examples <= len(examples):
break
return examples
def map_label(label):
labels = {"relevant": 0, "irrelevant": 1}
return labels[label.strip().lower()]
def get_qar_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['question'], sample['answer']],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_qar_artificial_examples():
examples = []
id = 0
print('Loading passages...')
passages = []
file = open('data/msmarco/collection.tsv', 'r', encoding='utf8')
while True:
line = file.readline()
if not line:
break
line = line.rstrip('\n').split('\t')
passages.append(line[1])
print('Loaded passages')
with open('data/qar/qar_artificial_queries.csv') as f:
for i, line in enumerate(f):
queries = line.rstrip('\n').split('|')
for query in queries:
guid = "%s-%d" % ('', id)
id += 1
examples.append(InputExample(guid=guid,
texts=[query, passages[i]],
label=1.0))
return examples
def get_single_examples(filename, max_examples=0):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['text']],
label=1))
if 0 < max_examples <= len(examples):
break
return examples
def get_qnli_examples(filename, max_examples=0, no_contradictions=False, fever_only=False):
examples = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
label = sample['label']
if label == 'contradiction' and no_contradictions:
continue
if sample['evidence'] == '':
continue
if fever_only and sample['source'] != 'fever':
continue
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid,
texts=[sample['statement'].strip(), sample['evidence'].strip()],
label=1.0))
if 0 < max_examples <= len(examples):
break
return examples
def get_retrieval_examples(filename, negative_corpus='data/msmarco/collection.tsv', max_examples=0, no_statements=True,
encoder_model=None, negative_samples_num=4):
examples = []
queries = []
passages = []
negative_passages = []
id = 0
with open(filename, encoding='utf8') as file:
for j, line in enumerate(file):
line = line.rstrip('\n')
sample = json.loads(line)
if 'evidence' in sample and sample['evidence'] == '':
continue
guid = "%s-%d" % (filename, id)
id += 1
if sample['type'] == 'question':
query = sample['question']
passage = sample['answer']
else:
query = sample['statement']
passage = sample['evidence']
query = query.strip()
passage = passage.strip()
if sample['type'] == 'statement' and no_statements:
continue
queries.append(query)
passages.append(passage)
if sample['source'] == 'natural-questions':
negative_passages.append(passage)
if max_examples == len(passages):
break
if encoder_model is not None:
# Load MSMARCO passages
logging.info('Loading MSM passages...')
with open(negative_corpus) as file:
for line in file:
p = line.rstrip('\n').split('\t')[1]
negative_passages.append(p)
logging.info('Building ANN index...')
dense_retriever = DenseRetriever(model=encoder_model, batch_size=1024, use_gpu=True)
dense_retriever.create_index_from_documents(negative_passages)
results = dense_retriever.search(queries=queries, limit=100, probes=256)
negative_samples = [
[negative_passages[p[0]] for p in r if negative_passages[p[0]] != passages[i]][:negative_samples_num]
for i, r in enumerate(results)
]
# print(queries[0])
# print(negative_samples[0][0])
for i in range(len(queries)):
texts = [queries[i], passages[i]] + negative_samples[i]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
else:
for i in range(len(queries)):
texts = [queries[i], passages[i]]
examples.append(InputExample(guid=guid,
texts=texts,
label=1.0))
return examples
def get_pair_input(tokenizer, sent1, sent2, max_len=256):
text = "[CLS] {} [SEP] {} [SEP]".format(sent1, sent2)
tokenized_text = tokenizer.tokenize(text)[:max_len]
indexed_tokens = tokenizer.encode(text)[:max_len]
segments_ids = []
sep_flag = False
for i in range(len(tokenized_text)):
if tokenized_text[i] == '[SEP]' and not sep_flag:
segments_ids.append(0)
sep_flag = True
elif sep_flag:
segments_ids.append(1)
else:
segments_ids.append(0)
return indexed_tokens, segments_ids
def build_batch(tokenizer, text_list, max_len=256):
token_id_list = []
segment_list = []
attention_masks = []
longest = -1
for pair in text_list:
sent1, sent2 = pair
ids, segs = get_pair_input(tokenizer, sent1, sent2, max_len=max_len)
if ids is None or segs is None:
continue
token_id_list.append(ids)
segment_list.append(segs)
attention_masks.append([1] * len(ids))
if len(ids) > longest:
longest = len(ids)
if len(token_id_list) == 0:
return None, None, None
# padding
assert (len(token_id_list) == len(segment_list))
for ii in range(len(token_id_list)):
token_id_list[ii] += [0] * (longest - len(token_id_list[ii]))
attention_masks[ii] += [1] * (longest - len(attention_masks[ii]))
segment_list[ii] += [1] * (longest - len(segment_list[ii]))
return token_id_list, segment_list, attention_masks
def load_unsupervised_dataset(dataset_file):
print('Loading dataset...')
x = pickle.load(open(dataset_file, "rb"))
print('Done')
return x, len(x[0])
def load_supervised_dataset(dataset_file):
print('Loading dataset...')
d = pickle.load(open(dataset_file, "rb"))
print('Done')
return d[0], d[1]
|
10559
|
import typing as t
from gradient_boosting_model.config.core import config
import numpy as np
import pandas as pd
from marshmallow import fields, Schema, ValidationError
class HouseDataInputSchema(Schema):
Alley = fields.Str(allow_none=True)
BedroomAbvGr = fields.Integer()
BldgType = fields.Str()
BsmtCond = fields.Str(allow_none=True)
BsmtExposure = fields.Str(allow_none=True)
BsmtFinSF1 = fields.Float(allow_none=True)
BsmtFinSF2 = fields.Float(allow_none=True)
BsmtFinType1 = fields.Str(allow_none=True)
BsmtFinType2 = fields.Str(allow_none=True)
BsmtFullBath = fields.Float(allow_none=True)
BsmtHalfBath = fields.Float(allow_none=True)
BsmtQual = fields.Str(allow_none=True)
BsmtUnfSF = fields.Float()
CentralAir = fields.Str()
Condition1 = fields.Str()
Condition2 = fields.Str()
Electrical = fields.Str(allow_none=True)
EnclosedPorch = fields.Integer()
ExterCond = fields.Str()
ExterQual = fields.Str()
Exterior1st = fields.Str(allow_none=True)
Exterior2nd = fields.Str(allow_none=True)
Fence = fields.Str(allow_none=True)
FireplaceQu = fields.Str(allow_none=True)
Fireplaces = fields.Integer()
Foundation = fields.Str()
FullBath = fields.Integer()
Functional = fields.Str(allow_none=True)
GarageArea = fields.Float()
GarageCars = fields.Float()
GarageCond = fields.Str(allow_none=True)
GarageFinish = fields.Str(allow_none=True)
GarageQual = fields.Str(allow_none=True)
GarageType = fields.Str(allow_none=True)
GarageYrBlt = fields.Float(allow_none=True)
GrLivArea = fields.Integer()
HalfBath = fields.Integer()
Heating = fields.Str()
HeatingQC = fields.Str()
HouseStyle = fields.Str()
Id = fields.Integer()
KitchenAbvGr = fields.Integer()
KitchenQual = fields.Str(allow_none=True)
LandContour = fields.Str()
LandSlope = fields.Str()
LotArea = fields.Integer()
LotConfig = fields.Str()
LotFrontage = fields.Float(allow_none=True)
LotShape = fields.Str()
LowQualFinSF = fields.Integer()
MSSubClass = fields.Integer()
MSZoning = fields.Str(allow_none=True)
MasVnrArea = fields.Float(allow_none=True)
MasVnrType = fields.Str(allow_none=True)
MiscFeature = fields.Str(allow_none=True)
MiscVal = fields.Integer()
MoSold = fields.Integer()
Neighborhood = fields.Str()
OpenPorchSF = fields.Integer()
OverallCond = fields.Integer()
OverallQual = fields.Integer()
PavedDrive = fields.Str()
PoolArea = fields.Integer()
PoolQC = fields.Str(allow_none=True)
RoofMatl = fields.Str()
RoofStyle = fields.Str()
SaleCondition = fields.Str()
SaleType = fields.Str(allow_none=True)
ScreenPorch = fields.Integer()
Street = fields.Str()
TotRmsAbvGrd = fields.Integer()
TotalBsmtSF = fields.Float()
Utilities = fields.Str(allow_none=True)
WoodDeckSF = fields.Integer()
YearBuilt = fields.Integer()
YearRemodAdd = fields.Integer()
YrSold = fields.Integer()
FirstFlrSF = fields.Integer()
SecondFlrSF = fields.Integer()
ThreeSsnPortch = fields.Integer()
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
if input_data[config.model_config.numerical_na_not_allowed].isnull().any().any():
validated_data = validated_data.dropna(
axis=0, subset=config.model_config.numerical_na_not_allowed
)
return validated_data
def validate_inputs(
*, input_data: pd.DataFrame
) -> t.Tuple[pd.DataFrame, t.Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
validated_data = drop_na_inputs(input_data=input_data)
# set many=True to allow passing in a list
schema = HouseDataInputSchema(many=True)
errors = None
try:
# replace numpy nans so that Marshmallow can validate
schema.load(validated_data.replace({np.nan: None}).to_dict(orient="records"))
except ValidationError as exc:
errors = exc.messages
return validated_data, errors
|
10561
|
import os
from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.base_dotnet_agent import BaseDotNetAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
class DotNetBot(BaseDotNetAgent):
def get_port_file_path(self):
# Look for a port.cfg file in the same directory as THIS python file.
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__), 'port.cfg'))
def load_config(self, config_header: ConfigHeader):
self.dotnet_executable_path = config_header.getpath('dotnet_executable_path')
self.logger.info(".NET executable is configured as {}".format(self.dotnet_executable_path))
@staticmethod
def create_agent_configurations(config: ConfigObject):
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('dotnet_executable_path', str, default=None,
description='Relative path to the executable that runs the .NET executable.')
|
10594
|
import itertools
from typing import Sequence, Iterator
# Source: https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/core/utils/chat_formatting.py
def error(text: str) -> str:
"""Get text prefixed with an error emoji.
Returns
-------
str
The new message.
"""
return "\N{NO ENTRY SIGN} {}".format(text)
def warning(text: str) -> str:
"""Get text prefixed with a warning emoji.
Returns
-------
str
The new message.
"""
return "\N{WARNING SIGN} {}".format(text)
def info(text: str) -> str:
"""Get text prefixed with an info emoji.
Returns
-------
str
The new message.
"""
return "\N{INFORMATION SOURCE} {}".format(text)
def question(text: str) -> str:
"""Get text prefixed with a question emoji.
Returns
-------
str
The new message.
"""
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
def bold(text: str) -> str:
"""Get the given text in bold.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "**{}**".format(text)
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}\n```".format(lang, text)
return ret
def inline(text: str) -> str:
"""Get the given text as inline code.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "`{}`".format(text)
def italics(text: str) -> str:
"""Get the given text in italics.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "*{}*".format(text)
def bordered(*columns: Sequence[str], ascii_border: bool = False) -> str:
"""Get two blocks of text in a borders.
Note
----
This will only work with a monospaced font.
Parameters
----------
*columns : `sequence` of `str`
The columns of text, each being a list of lines in that column.
ascii_border : bool
Whether or not the border should be pure ASCII.
Returns
-------
str
The bordered text.
"""
borders = {
"TL": "-" if ascii_border else "┌", # Top-left
"TR": "-" if ascii_border else "┐", # Top-right
"BL": "-" if ascii_border else "└", # Bottom-left
"BR": "-" if ascii_border else "┘", # Bottom-right
"HZ": "-" if ascii_border else "─", # Horizontal
"VT": "|" if ascii_border else "│", # Vertical
}
sep = " " * 4 # Separator between boxes
widths = tuple(
max(len(row) for row in column) + 9 for column in columns
) # width of each col
colsdone = [False] * len(columns) # whether or not each column is done
lines = [sep.join("{TL}" + "{HZ}" * width + "{TR}" for width in widths)]
for line in itertools.zip_longest(*columns):
row = []
for colidx, column in enumerate(line):
width = widths[colidx]
done = colsdone[colidx]
if column is None:
if not done:
# bottom border of column
column = "{HZ}" * width
row.append("{BL}" + column + "{BR}")
colsdone[colidx] = True # mark column as done
else:
# leave empty
row.append(" " * (width + 2))
else:
column += " " * (width - len(column)) # append padded spaces
row.append("{VT}" + column + "{VT}")
lines.append(sep.join(row))
final_row = []
for width, done in zip(widths, colsdone):
if not done:
final_row.append("{BL}" + "{HZ}" * width + "{BR}")
else:
final_row.append(" " * (width + 2))
lines.append(sep.join(final_row))
return "\n".join(lines).format(**borders)
def pagify(
text: str,
delims: Sequence[str] = ["\n"],
*,
priority: bool = False,
escape_mass_mentions: bool = True,
shorten_by: int = 8,
page_length: int = 2000
) -> Iterator[str]:
"""Generate multiple pages from the given text.
Note
----
This does not respect code blocks or inline code.
Parameters
----------
text : str
The content to pagify and send.
delims : `sequence` of `str`, optional
Characters where page breaks will occur. If no delimiters are found
in a page, the page will break after ``page_length`` characters.
By default this only contains the newline.
Other Parameters
----------------
priority : `bool`
Set to :code:`True` to choose the page break delimiter based on the
order of ``delims``. Otherwise, the page will always break at the
last possible delimiter.
escape_mass_mentions : `bool`
If :code:`True`, any mass mentions (here or everyone) will be
silenced.
shorten_by : `int`
How much to shorten each page by. Defaults to 8.
page_length : `int`
The maximum length of each page. Defaults to 2000.
Yields
------
`str`
Pages of the given text.
"""
in_text = text
page_length -= shorten_by
while len(in_text) > page_length:
this_page_len = page_length
if escape_mass_mentions:
this_page_len -= in_text.count("@here", 0, page_length) + in_text.count(
"@everyone", 0, page_length
)
closest_delim = (in_text.rfind(d, 1, this_page_len) for d in delims)
if priority:
closest_delim = next((x for x in closest_delim if x > 0), -1)
else:
closest_delim = max(closest_delim)
closest_delim = closest_delim if closest_delim != -1 else this_page_len
if escape_mass_mentions:
to_send = escape(in_text[:closest_delim], mass_mentions=True)
else:
to_send = in_text[:closest_delim]
if not to_send.strip():
yield to_send
in_text = in_text[closest_delim:]
if not in_text.strip():
if escape_mass_mentions:
yield escape(in_text, mass_mentions=True)
else:
yield in_text
def strikethrough(text: str) -> str:
"""Get the given text with a strikethrough.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "~~{}~~".format(text)
def underline(text: str) -> str:
"""Get the given text with an underline.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "__{}__".format(text)
def escape(text: str, *, mass_mentions: bool = False, formatting: bool = False) -> str:
"""Get text with all mass mentions or markdown escaped.
Parameters
----------
text : str
The text to be escaped.
mass_mentions : `bool`, optional
Set to :code:`True` to escape mass mentions in the text.
formatting : `bool`, optional
Set to :code:`True` to escpae any markdown formatting in the text.
Returns
-------
str
The escaped text.
"""
if mass_mentions:
text = text.replace("@everyone", "@\u200beveryone")
text = text.replace("@here", "@\u200bhere")
if formatting:
text = (
text.replace("`", "\\`")
.replace("*", "\\*")
.replace("_", "\\_")
.replace("~", "\\~")
)
return text
|
10617
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import Conv, ResBlock
class Wavenet_Student(nn.Module):
def __init__(self, num_blocks_student=[1, 1, 1, 1, 1, 1], num_layers=10,
front_channels=32, residual_channels=64, gate_channels=128, skip_channels=64,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Student, self).__init__()
self.num_blocks = num_blocks_student
self.num_flow = len(self.num_blocks)
self.num_layers = num_layers
self.iafs = nn.ModuleList()
for i in range(self.num_flow):
self.iafs.append(Wavenet_Flow(out_channels=2,
num_blocks=self.num_blocks[i], num_layers=self.num_layers,
front_channels=front_channels, residual_channels=residual_channels,
gate_channels=gate_channels, skip_channels=skip_channels,
kernel_size=kernel_size, cin_channels=cin_channels, causal=causal))
def forward(self, z, c):
return self.iaf(z, c)
def iaf(self, z, c_up):
mu_tot, logs_tot = 0., 0.
for i, iaf in enumerate(self.iafs):
mu_logs = iaf(z, c_up)
mu = mu_logs[:, 0:1, :-1]
logs = mu_logs[:, 1:, :-1]
mu_tot = mu_tot * torch.exp(logs) + mu
logs_tot = logs_tot + logs
z = z[:, :, 1:] * torch.exp(logs) + mu
z = F.pad(z, pad=(1, 0), mode='constant', value=0)
return z, mu_tot, logs_tot
def receptive_field(self):
receptive_field = 1
for iaf in self.iafs:
receptive_field += iaf.receptive_field_size() - 1
return receptive_field
def generate(self, z, c_up):
x, _, _ = self.iaf(z, c_up)
return x
def remove_weight_norm(self):
for iaf in self.iafs:
iaf.remove_weight_norm()
class Wavenet_Flow(nn.Module):
def __init__(self, out_channels=1, num_blocks=1, num_layers=10,
front_channels=32, residual_channels=64, gate_channels=32, skip_channels=None,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Flow, self). __init__()
self.causal = causal
self.num_blocks = num_blocks
self.num_layers = num_layers
self.front_channels = front_channels
self.out_channels = out_channels
self.gate_channels = gate_channels
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.cin_channels = cin_channels
self.kernel_size = kernel_size
self.front_conv = nn.Sequential(
Conv(1, self.residual_channels, self.front_channels, causal=self.causal),
nn.ReLU()
)
self.res_blocks = nn.ModuleList()
self.res_blocks_fast = nn.ModuleList()
for b in range(self.num_blocks):
for n in range(self.num_layers):
self.res_blocks.append(ResBlock(self.residual_channels, self.gate_channels, self.skip_channels,
self.kernel_size, dilation=2**n,
cin_channels=self.cin_channels, local_conditioning=True,
causal=self.causal, mode='SAME'))
self.final_conv = nn.Sequential(
nn.ReLU(),
Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal),
nn.ReLU(),
Conv(self.skip_channels, self.out_channels, 1, causal=self.causal)
)
def forward(self, x, c):
return self.wavenet(x, c)
def wavenet(self, tensor, c=None):
h = self.front_conv(tensor)
skip = 0
for i, f in enumerate(self.res_blocks):
h, s = f(h, c)
skip += s
out = self.final_conv(skip)
return out
def receptive_field_size(self):
num_dir = 1 if self.causal else 2
dilations = [2 ** (i % self.num_layers) for i in range(self.num_layers * self.num_blocks)]
return num_dir * (self.kernel_size - 1) * sum(dilations) + 1 + (self.front_channels - 1)
def remove_weight_norm(self):
for f in self.res_blocks:
f.remove_weight_norm()
|
10618
|
try:
from gevent import monkey
monkey.patch_all()
except ImportError:
# fine if no gevent is available
pass
import base64
import logging
from unittest.mock import Mock
from flask.app import Flask
from flask_testing import TestCase
from openbrokerapi.api import BrokerCredentials
from openbrokerapi.log_util import basic_config
class BrokerTestCase(TestCase):
auth_header = 'Basic ' + base64.b64encode(b":").decode("ascii")
def create_app(self):
from openbrokerapi.api import get_blueprint
app = Flask(__name__)
self.broker = Mock()
app.register_blueprint(
get_blueprint(self.broker,
BrokerCredentials("", ""),
basic_config(level=logging.WARN)
)
)
return app
|
10652
|
import openmoc
import openmoc.log as log
import openmoc.plotter as plotter
import openmoc.materialize as materialize
log.set_log_level('NORMAL')
###############################################################################
########################### Creating Materials ############################
###############################################################################
log.py_printf('NORMAL', 'Importing materials data from HDF5...')
materials = openmoc.materialize.load_from_hdf5('c5g7-mgxs.h5', '../')
###############################################################################
########################### Creating Surfaces #############################
###############################################################################
log.py_printf('NORMAL', 'Creating surfaces...')
xmin = openmoc.XPlane(x=-5.0, name='xmin')
xmax = openmoc.XPlane(x= 5.0, name='xmax')
ymin = openmoc.YPlane(y=-5.0, name='ymin')
ymax = openmoc.YPlane(y= 5.0, name='ymax')
zmin = openmoc.ZPlane(z=-5.0, name='zmin')
zmax = openmoc.ZPlane(z= 5.0, name='zmax')
xmin.setBoundaryType(openmoc.REFLECTIVE)
xmax.setBoundaryType(openmoc.REFLECTIVE)
ymin.setBoundaryType(openmoc.REFLECTIVE)
ymax.setBoundaryType(openmoc.REFLECTIVE)
zmin.setBoundaryType(openmoc.REFLECTIVE)
zmax.setBoundaryType(openmoc.REFLECTIVE)
###############################################################################
############################# Creating Cells ##############################
###############################################################################
log.py_printf('NORMAL', 'Creating cells...')
fuel = openmoc.Cell(name='fuel')
fuel.setFill(materials['UO2'])
moderator = openmoc.Cell(name='moderator')
moderator.setFill(materials['UO2'])
root_cell = openmoc.Cell(name='root cell')
root_cell.addSurface(halfspace=+1, surface=xmin)
root_cell.addSurface(halfspace=-1, surface=xmax)
root_cell.addSurface(halfspace=+1, surface=ymin)
root_cell.addSurface(halfspace=-1, surface=ymax)
root_cell.addSurface(halfspace=+1, surface=zmin)
root_cell.addSurface(halfspace=-1, surface=zmax)
###############################################################################
########################### Creating Universes ############################
###############################################################################
log.py_printf('NORMAL', 'Creating universes...')
fue_univ = openmoc.Universe(name='homogeneous fue cell')
fue_univ.addCell(fuel)
mod_univ = openmoc.Universe(name='homogeneous mod cell')
mod_univ.addCell(moderator)
root_universe = openmoc.Universe(name='root universe')
root_universe.addCell(root_cell)
###############################################################################
########################### Creating Lattices #############################
###############################################################################
log.py_printf('NORMAL', 'Creating simple 10 x 10 lattice...')
f = fue_univ
lattice = openmoc.Lattice(name='10x10 lattice')
lattice.setWidth(width_x=1.0, width_y=1.0, width_z=1.0)
lattice.setUniverses([[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]]])
root_cell.setFill(lattice)
###############################################################################
########################## Creating the Geometry ##########################
###############################################################################
log.py_printf('NORMAL', 'Creating geometry...')
geometry = openmoc.Geometry()
geometry.setRootUniverse(root_universe)
geometry.initializeFlatSourceRegions()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.