ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4c0c6fb28104e271573f1e507c84394d19bf0a | import numpy as np
import matplotlib.pyplot as plt
from tatpulsar.pulsar_timing.utils import *
# Transfer to OOP
#__all__ = ['resampling_profile',
# "norm_profile",
# "phihist"]
__all__ = ['Profile',
"phihist"]
class Profile():
"""
Profile class
"""
def __init__(self, counts, cycles=1):
'''
Initialize Parameter
counts : array
the counts in each phase bin of Profile
cycles : int
the period cycles of input Profile (default is 1).
If cycles=2, the phase of profile would be np.linspace(0, 2, size_of_Profile+1)[:-1]
'''
if type(cycles) != int:
raise TypeError("The cycles of profile should be int")
if cycles > 2:
raise IOError("Why do you have to setup so many cycles? 2 cycles is enough.")
if cycles == 2:
self.counts = np.append(counts, counts)
else:
self.counts = counts
self.phase = np.linspace(0, cycles, self.size+1)[:-1]
@property
def size(self):
return self.counts.size
def resample(self, sample_num=1, kind='poisson'):
'''
resampling the profile
Parameters
-----------
sample_num : int, optional
number of the resamplings for the profile, the default number is 1
kind : str, optional
The distribution of the profile, default is poisson.
('poisson', 'gaussian') are refering to the poisson and gauss distribution
Returns
-----------
resampled_profile : array or ndarray
if sample_num == 1, return a one dimensional array
if sample_num >1 , return a multi-dimensional array
'''
raw_profile = np.array(self.counts.tolist()*sample_num)
if sample_num <= 0:
raise IOError("The number of sampling must a positive integer")
if kind == "poisson":
resampled_profile = np.random.poisson(raw_profile)
elif kind == "gaussian":
pass #TODO
resampled_profile = resampled_profile.reshape(int(resampled_profile.size/self.size),
int(self.size))
return resampled_profile
def norm(self, yerr=None, method=0):
'''
normalize the profile
method = 0 : normalization = (profile-min)/(max-min)
method = 1 : normalization = (profile-min)/mean(profile)
'''
#TODO: normalize with error
if method == 0:
return (self.counts-np.min(self.counts))/(np.max(self.counts)-np.min(self.counts))
elif method == 1:
return (self.counts-np.min(self.counts))/(np.mean(self.counts))
def phihist(phi, bin_profile, **kwargs):
'''
Ensure that the input and output of the histogram are appropriate.
The input variables are the pulse phi of events, and the bin_profile.
The counts of each bin are calculated by dividing [0, 1] into number of bin_profile.
Parameters
-----------
phi : array
a set of phase value of events.
bin_profile : int
the number of bins of profile
Return
----------
Profile : object
return the object of Profile
'''
x = np.linspace(0, 1, bin_profile + 1)
counts, phase = np.histogram(phi, x)
profile_object = Profile(counts, **kwargs)
return profile_object
def resampling_profile(profile, sample_num=1, kind='poisson'):
'''
resampling the profile
Parameters
-----------
profile : array
The un-normalized profile
sample_num : int, optional
number of the resamplings for the profile, the default number is 1
kind : str, optional
The distribution of the profile, default is poisson.
('poisson', 'gaussian') are refering to the poisson and gauss distribution
Returns
-----------
resampled_profile : array or ndarray
if sample_num == 1, return a one dimensional array
if sample_num >1 , return a multi-dimensional array
'''
raw_profile = np.array(profile.tolist()*sample_num)
if sample_num <= 0:
raiseError("The number of sampling must a positive integer")
if kind == "poisson":
resampled_profile = np.random.poisson(raw_profile)
elif kind == "gaussian":
pass #TODO
resampled_profile = resampled_profile.reshape(int(len(resampled_profile)/len(profile)),
int(len(profile)))
return resampled_profile
def norm_profile(profile, yerr=None):
return (profile-np.min(profile))/(np.max(profile)-np.min(profile))
|
py | 1a4c0cc3d9d315a36af10ebbf4105726ad64c3ef | from __future__ import print_function, division
from sympy import (
Basic,
sympify,
symbols,
Dummy,
Lambda,
summation,
Piecewise,
S,
cacheit,
Sum,
exp,
I,
Ne,
Eq,
poly,
series,
factorial,
And,
)
from sympy.polys.polyerrors import PolynomialError
from sympy.solvers.solveset import solveset
from sympy.stats.crv import reduce_rational_inequalities_wrap
from sympy.stats.rv import (
NamedArgsMixin,
SinglePSpace,
SingleDomain,
random_symbols,
PSpace,
ConditionalDomain,
RandomDomain,
ProductDomain,
)
from sympy.stats.symbolic_probability import Probability
from sympy.functions.elementary.integers import floor
from sympy.sets.fancysets import Range, FiniteSet
from sympy.sets.sets import Union
from sympy.sets.contains import Contains
from sympy.utilities import filldedent
from sympy.core.sympify import _sympify
import random
class DiscreteDistribution(Basic):
def __call__(self, *args):
return self.pdf(*args)
class SingleDiscreteDistribution(DiscreteDistribution, NamedArgsMixin):
""" Discrete distribution of a single variable
Serves as superclass for PoissonDistribution etc....
Provides methods for pdf, cdf, and sampling
See Also:
sympy.stats.crv_types.*
"""
set = S.Integers
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
def sample(self):
""" A random realization from the distribution """
icdf = self._inverse_cdf_expression()
while True:
sample_ = floor(list(icdf(random.uniform(0, 1)))[0])
if sample_ >= self.set.inf:
return sample_
@cacheit
def _inverse_cdf_expression(self):
""" Inverse of the CDF
Used by sample
"""
x = Dummy("x", positive=True, integer=True)
z = Dummy("z", positive=True)
cdf_temp = self.cdf(x)
# Invert CDF
try:
inverse_cdf = solveset(cdf_temp - z, x, domain=S.Reals)
except NotImplementedError:
inverse_cdf = None
if not inverse_cdf or len(inverse_cdf.free_symbols) != 1:
raise NotImplementedError("Could not invert CDF")
return Lambda(z, inverse_cdf)
@cacheit
def compute_cdf(self, **kwargs):
""" Compute the CDF from the PDF
Returns a Lambda
"""
x, z = symbols("x, z", integer=True, cls=Dummy)
left_bound = self.set.inf
# CDF is integral of PDF from left bound to z
pdf = self.pdf(x)
cdf = summation(pdf, (x, left_bound, z), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
def _cdf(self, x):
return None
def cdf(self, x, **kwargs):
""" Cumulative density function """
if not kwargs:
cdf = self._cdf(x)
if cdf is not None:
return cdf
return self.compute_cdf(**kwargs)(x)
@cacheit
def compute_characteristic_function(self, **kwargs):
""" Compute the characteristic function from the PDF
Returns a Lambda
"""
x, t = symbols("x, t", real=True, cls=Dummy)
pdf = self.pdf(x)
cf = summation(exp(I * t * x) * pdf, (x, self.set.inf, self.set.sup))
return Lambda(t, cf)
def _characteristic_function(self, t):
return None
def characteristic_function(self, t, **kwargs):
""" Characteristic function """
if not kwargs:
cf = self._characteristic_function(t)
if cf is not None:
return cf
return self.compute_characteristic_function(**kwargs)(t)
@cacheit
def compute_moment_generating_function(self, **kwargs):
t = Dummy("t", real=True)
x = Dummy("x", integer=True)
pdf = self.pdf(x)
mgf = summation(exp(t * x) * pdf, (x, self.set.inf, self.set.sup))
return Lambda(t, mgf)
def _moment_generating_function(self, t):
return None
def moment_generating_function(self, t, **kwargs):
if not kwargs:
mgf = self._moment_generating_function(t)
if mgf is not None:
return mgf
return self.compute_moment_generating_function(**kwargs)(t)
@cacheit
def compute_quantile(self, **kwargs):
""" Compute the Quantile from the PDF
Returns a Lambda
"""
x = Dummy("x", integer=True)
p = Dummy("p", real=True)
left_bound = self.set.inf
pdf = self.pdf(x)
cdf = summation(pdf, (x, left_bound, x), **kwargs)
set = ((x, p <= cdf),)
return Lambda(p, Piecewise(*set))
def _quantile(self, x):
return None
def quantile(self, x, **kwargs):
""" Cumulative density function """
if not kwargs:
quantile = self._quantile(x)
if quantile is not None:
return quantile
return self.compute_quantile(**kwargs)(x)
def expectation(self, expr, var, evaluate=True, **kwargs):
""" Expectation of expression over distribution """
# TODO: support discrete sets with non integer stepsizes
if evaluate:
try:
p = poly(expr, var)
t = Dummy("t", real=True)
mgf = self.moment_generating_function(t)
deg = p.degree()
taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t)
result = 0
for k in range(deg + 1):
result += (
p.coeff_monomial(var ** k)
* taylor.coeff_monomial(t ** k)
* factorial(k)
)
return result
except PolynomialError:
return summation(
expr * self.pdf(var), (var, self.set.inf, self.set.sup), **kwargs
)
else:
return Sum(
expr * self.pdf(var), (var, self.set.inf, self.set.sup), **kwargs
)
def __call__(self, *args):
return self.pdf(*args)
class DiscreteDistributionHandmade(SingleDiscreteDistribution):
_argnames = ("pdf",)
@property
def set(self):
return self.args[1]
def __new__(cls, pdf, set=S.Integers):
return Basic.__new__(cls, pdf, set)
class DiscreteDomain(RandomDomain):
"""
A domain with discrete support with step size one.
Represented using symbols and Range.
"""
is_Discrete = True
class SingleDiscreteDomain(DiscreteDomain, SingleDomain):
def as_boolean(self):
return Contains(self.symbol, self.set)
class ConditionalDiscreteDomain(DiscreteDomain, ConditionalDomain):
"""
Domain with discrete support of step size one, that is restricted by
some condition.
"""
@property
def set(self):
rv = self.symbols
if len(self.symbols) > 1:
raise NotImplementedError(
filldedent(
"""
Multivariate conditional domains are not yet implemented."""
)
)
rv = list(rv)[0]
return reduce_rational_inequalities_wrap(self.condition, rv).intersect(
self.fulldomain.set
)
class DiscretePSpace(PSpace):
is_real = True
is_Discrete = True
@property
def pdf(self):
return self.density(*self.symbols)
def where(self, condition):
rvs = random_symbols(condition)
assert all(r.symbol in self.symbols for r in rvs)
if len(rvs) > 1:
raise NotImplementedError(
filldedent(
"""Multivariate discrete
random variables are not yet supported."""
)
)
conditional_domain = reduce_rational_inequalities_wrap(condition, rvs[0])
conditional_domain = conditional_domain.intersect(self.domain.set)
return SingleDiscreteDomain(rvs[0].symbol, conditional_domain)
def probability(self, condition):
complement = isinstance(condition, Ne)
if complement:
condition = Eq(condition.args[0], condition.args[1])
try:
_domain = self.where(condition).set
if condition == False or _domain is S.EmptySet:
return S.Zero
if condition == True or _domain == self.domain.set:
return S.One
prob = self.eval_prob(_domain)
except NotImplementedError:
from sympy.stats.rv import density
expr = condition.lhs - condition.rhs
dens = density(expr)
if not isinstance(dens, DiscreteDistribution):
dens = DiscreteDistributionHandmade(dens)
z = Dummy("z", real=True)
space = SingleDiscretePSpace(z, dens)
prob = space.probability(condition.__class__(space.value, 0))
if prob is None:
prob = Probability(condition)
return prob if not complement else S.One - prob
def eval_prob(self, _domain):
sym = list(self.symbols)[0]
if isinstance(_domain, Range):
n = symbols("n", integer=True)
inf, sup, step = (r for r in _domain.args)
summand = (self.pdf).replace(sym, n * step)
rv = summation(summand, (n, inf / step, (sup) / step - 1)).doit()
return rv
elif isinstance(_domain, FiniteSet):
pdf = Lambda(sym, self.pdf)
rv = sum(pdf(x) for x in _domain)
return rv
elif isinstance(_domain, Union):
rv = sum(self.eval_prob(x) for x in _domain.args)
return rv
def conditional_space(self, condition):
# XXX: Converting from set to tuple. The order matters to Lambda
# though so we should be starting with a set...
density = Lambda(tuple(self.symbols), self.pdf / self.probability(condition))
condition = condition.xreplace(dict((rv, rv.symbol) for rv in self.values))
domain = ConditionalDiscreteDomain(self.domain, condition)
return DiscretePSpace(domain, density)
class ProductDiscreteDomain(ProductDomain, DiscreteDomain):
def as_boolean(self):
return And(*[domain.as_boolean for domain in self.domains])
class SingleDiscretePSpace(DiscretePSpace, SinglePSpace):
""" Discrete probability space over a single univariate variable """
is_real = True
@property
def set(self):
return self.distribution.set
@property
def domain(self):
return SingleDiscreteDomain(self.symbol, self.set)
def sample(self):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
return {self.value: self.distribution.sample()}
def compute_expectation(self, expr, rvs=None, evaluate=True, **kwargs):
rvs = rvs or (self.value,)
if self.value not in rvs:
return expr
expr = _sympify(expr)
expr = expr.xreplace(dict((rv, rv.symbol) for rv in rvs))
x = self.value.symbol
try:
return self.distribution.expectation(expr, x, evaluate=evaluate, **kwargs)
except NotImplementedError:
return Sum(expr * self.pdf, (x, self.set.inf, self.set.sup), **kwargs)
def compute_cdf(self, expr, **kwargs):
if expr == self.value:
x = Dummy("x", real=True)
return Lambda(x, self.distribution.cdf(x, **kwargs))
else:
raise NotImplementedError()
def compute_density(self, expr, **kwargs):
if expr == self.value:
return self.distribution
raise NotImplementedError()
def compute_characteristic_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.characteristic_function(t, **kwargs))
else:
raise NotImplementedError()
def compute_moment_generating_function(self, expr, **kwargs):
if expr == self.value:
t = Dummy("t", real=True)
return Lambda(t, self.distribution.moment_generating_function(t, **kwargs))
else:
raise NotImplementedError()
def compute_quantile(self, expr, **kwargs):
if expr == self.value:
p = Dummy("p", real=True)
return Lambda(p, self.distribution.quantile(p, **kwargs))
else:
raise NotImplementedError()
|
py | 1a4c0d8e1e6ebc9430bf6a012ea31ca9737fd495 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test eaiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
py | 1a4c0f9549f45bc7f44dfd3f3b57a97e5f12a599 | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from neurst.data.data_pipelines.transcript_data_pipeline import TranscriptDataPipeline
from neurst.metrics import register_metric
from neurst.metrics.metric import Metric
def _wer(ref, hypo):
errors = np.zeros([len(ref) + 1, len(hypo) + 1, 3])
errors[0, :, 1] = np.arange(len(hypo) + 1)
errors[:, 0, 2] = np.arange(len(ref) + 1)
substitution = np.array([1, 0, 0])
insertion = np.array([0, 1, 0])
deletion = np.array([0, 0, 1])
for r, ref in enumerate(ref):
for d, dec in enumerate(hypo):
errors[r + 1, d + 1] = min((
errors[r, d] + (ref != dec) * substitution,
errors[r + 1, d] + insertion,
errors[r, d + 1] + deletion), key=np.sum)
return tuple(errors[-1, -1])
@register_metric
class Wer(Metric):
def __init__(self, language="en", *args, **kwargs):
_ = args
_ = kwargs
self._language = language
super(Wer, self).__init__()
def set_groundtruth(self, groundtruth):
""" Setup inside groundtruth.
Args:
groundtruth: A list of references,
[sent0_ref, sent1_ref, ...]
"""
self._references = [TranscriptDataPipeline.cleanup_transcript(
self._language, x, lowercase=True, remove_punctuation=True) for x in groundtruth]
def greater_or_eq(self, result1, result2):
return self.get_value(result1) <= self.get_value(result2)
def get_value(self, result):
if isinstance(result, (float, np.float32, np.float64)):
return result
return result["WER"]
def call(self, hypothesis, groundtruth=None):
""" Calculate wer
Args:
hypothesis: A list of hypothesis texts.
groundtruth: A list of reference texts.
Returns:
A tuple(wer, substitutions, insertions, deletions)
"""
if groundtruth is None:
groundtruth = self._references
else:
groundtruth = [TranscriptDataPipeline.cleanup_transcript(
self._language, x, lowercase=True, remove_punctuation=True) for x in groundtruth]
hypothesis = [TranscriptDataPipeline.cleanup_transcript(
self._language, x, lowercase=True, remove_punctuation=True) for x in hypothesis]
substitutions = 0
insertions = 0
deletions = 0
numwords = 0
for lref, lout in zip(groundtruth, hypothesis):
# read the reference and output
reftext, output = lref.strip().split(), lout.strip().split()
# compare output to reference
s, i, d = _wer(reftext, output)
substitutions += s
insertions += i
deletions += d
numwords += len(reftext)
substitutions /= numwords
deletions /= numwords
insertions /= numwords
error = substitutions + deletions + insertions
return {
"WER": error * 100.,
"WER-substitutions": substitutions * 100.,
"WER-insertions": insertions * 100.,
"WER-deletions": deletions * 100.
}
|
py | 1a4c10dd268c18ea7f80bb6c81bca827718cd899 |
def test_list_labels(man):
errors = []
G = man.setGraph("swapi")
resp = G.listLabels()
print(resp)
if len(resp["vertex_labels"]) != 6:
errors.append("listLabels returned an unexpected number of vertex labels; %d != 2" % (len(resp["vertex_labels"])))
if sorted(resp["vertex_labels"]) != ["Character", "Film", "Planet", "Species", "Starship", "Vehicle"]:
errors.append("listLabels returned unexpected vertex labels")
if len(resp["edge_labels"]) != 10:
errors.append("listLabels returned an unexpected number of edge labels; %d != 10" % (len(resp["edge_labels"])))
if sorted(resp["edge_labels"]) != ["characters", "films", "homeworld", "people", "pilots", "planets", "residents", "species", "starships", "vehicles"]:
errors.append("listLabels returned unexpected edge labels")
return errors
|
py | 1a4c10e57be9c1ea98e7ba154fb2b42115d10876 | import json
from django.db import connection
from elasticsearch import Elasticsearch
from jobs.models import Job
es_client = Elasticsearch('http://localhost:9200')
def run():
# Create Index
es_client.indices.create(index='jobs')
# Put Mapping
with open("jobs/job.json", "r") as fp:
es_client.indices.put_mapping(index='jobs', doc_type='job', body=json.load(fp))
# Start Indexing
job_ids = Job.objects.values_list('id', flat=True)
db_cursor = connection.cursor()
for job_id in job_ids:
query = "SELECT get_job_data({});".format(job_id)
db_cursor.execute(query)
result = db_cursor.fetchone()
es_client.index(index='jobs', doc_type='job', body=result[0])
print("Indexed job {}".format(job_id))
|
py | 1a4c123434fe0dd4fd4d4283d27fac64f378eac5 | from pandas import DataFrame
excluded = [
'01 Buster',
'838 Spyder',
'Aqua Blaster',
'B.O.X.',
'B.R.1.C.K',
'CHMP',
'Droid Ravager',
'Drumstick',
'Grumpii',
'HBB Renegade',
'MegaBoidz',
'Meta',
'Order 66',
'Puff Boxer',
'R.E.X. 02',
'Red Steel',
'SB Skyhammer',
'T.I.G.E.R.Zero',
'WAT 51',
'| | | | | | | | | | | | | | | |',
]
def unique_filter(df: DataFrame):
return df[~df['Front'].isin(excluded)]
|
py | 1a4c1319c0e769b64abde7618edf6952dec7bc71 | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-03-01
算法思想: 二叉搜索树中的搜索区间
借助带list的辅助函数
"""
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param: root: param root: The root of the binary search tree
@param: k1: An integer
@param: k2: An integer
@return: return: Return all keys that k1<=key<=k2 in ascending order
"""
def searchRange(self, root, k1, k2):
# write your code here
result = []
self.helper(root, k1, k2, result)
return result
def helper(self, root, k1, k2, result):
if root == None:
return
elif root.val >= k1 and root.val <= k2:
self.helper(root.left, k1, k2, result)
result.append(root.val)
self.helper(root.right, k1, k2, result)
elif root.val < k1:
self.helper(root.right, k1, k2, result)
else:
self.helper(root.left, k1, k2, result)
def searchRange_2(self, root, k1, k2):
if root == None:
return []
elif root.val >= k1 and root.val <= k2:
left = self.searchRange_2(root.left, k1, k2)
result = [root.val]
right = self.searchRange_2(root.right, k1, k2)
return left+result+right
elif root.val < k1:
right = self.searchRange_2(root.right, k1, k2)
return right
else:
left = self.searchRange_2(root.left, k1, k2)
return left
if __name__ == '__main__':
root = TreeNode(20)
node8 = TreeNode(8)
node22 = TreeNode(22)
root.left = node8
root.right = node22
node4 = TreeNode(4)
node12 = TreeNode(12)
node8.left = node4
node8.right = node12
print Solution().searchRange(root, 10, 22)
print Solution().searchRange_2(root, 10, 22) |
py | 1a4c136f29156c0b254012e92f83e11168ebe5f7 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import copy
import textwrap
import unittest
from collections import namedtuple
from typing import Any, Dict # noqa: F401
from unittest.mock import MagicMock, patch
import neobolt
from amundsen_common.entity.resource_type import ResourceType
from amundsen_common.models.api import health_check
from amundsen_common.models.dashboard import DashboardSummary
from amundsen_common.models.feature import Feature, FeatureWatermark
from amundsen_common.models.generation_code import GenerationCode
from amundsen_common.models.lineage import Lineage, LineageItem
from amundsen_common.models.popular_table import PopularTable
from amundsen_common.models.table import (Application, Badge, Column,
ProgrammaticDescription,
ResourceReport, Source, SqlJoin,
SqlWhere, Stat, Table, TableSummary,
Tag, User, Watermark)
from amundsen_common.models.user import User as UserModel
from neo4j import GraphDatabase
from metadata_service import create_app
from metadata_service.entity.dashboard_detail import DashboardDetail
from metadata_service.entity.dashboard_query import DashboardQuery
from metadata_service.entity.tag_detail import TagDetail
from metadata_service.exception import NotFoundException
from metadata_service.proxy.neo4j_proxy import Neo4jProxy
from metadata_service.util import UserResourceRel
class TestNeo4jProxy(unittest.TestCase):
def setUp(self) -> None:
self.app = create_app(config_module_class='metadata_service.config.LocalConfig')
self.app_context = self.app.app_context()
self.app_context.push()
table_entry = {'db': {'name': 'hive'},
'clstr': {
'name': 'gold'},
'schema': {
'name': 'foo_schema'},
'tbl': {
'name': 'foo_table'},
'tbl_dscrpt': {
'description': 'foo description'}
}
col1 = copy.deepcopy(table_entry) # type: Dict[Any, Any]
col1['col'] = {'name': 'bar_id_1',
'col_type': 'varchar',
'sort_order': 0}
col1['col_dscrpt'] = {'description': 'bar col description'}
col1['col_stats'] = [{'stat_type': 'avg', 'start_epoch': 1, 'end_epoch': 1, 'stat_val': '1'}]
col1['col_badges'] = []
col2 = copy.deepcopy(table_entry) # type: Dict[Any, Any]
col2['col'] = {'name': 'bar_id_2',
'col_type': 'bigint',
'sort_order': 1}
col2['col_dscrpt'] = {'description': 'bar col2 description'}
col2['col_stats'] = [{'stat_type': 'avg', 'start_epoch': 2, 'end_epoch': 2, 'stat_val': '2'}]
col2['col_badges'] = [{'key': 'primary key', 'category': 'column'}]
app1 = {
'application_url': 'url1',
'name': 'Airflow',
'id': 'id1',
}
app2 = {
'application_url': 'url2',
'name': 'Airflow',
'id': 'id2',
}
table_level_results = MagicMock()
table_level_results.single.return_value = {
'wmk_records': [
{
'key': 'hive://gold.test_schema/test_table/high_watermark/',
'partition_key': 'ds',
'partition_value': 'fake_value',
'create_time': 'fake_time',
},
{
'key': 'hive://gold.test_schema/test_table/low_watermark/',
'partition_key': 'ds',
'partition_value': 'fake_value',
'create_time': 'fake_time',
}
],
'producing_apps': [app1],
'consuming_apps': [app2],
'resource_reports': [
{
'name': 'test_report',
'url': 'https://test.report/index.html'
}
],
'last_updated_timestamp': 1,
'owner_records': [
{
'key': '[email protected]',
'email': '[email protected]',
'updated_at': 0,
}
],
'tag_records': [
{
'key': 'test',
'tag_type': 'default'
}
],
'badge_records': [
{
'key': 'golden',
'category': 'table_status'
}
],
'src': {
'source': '/source_file_loc',
'key': 'some key',
'source_type': 'github'
},
'prog_descriptions': [
{
'description_source': 's3_crawler',
'description': 'Test Test Test'
},
{
'description_source': 'quality_report',
'description': 'Test Test'
}
]
}
table_common_usage = MagicMock()
table_common_usage.single.return_value = {
'joins': [
{
'join_exec_cnt': 2,
'join': {
'join_sql': (
'statewide_cases cases '
'join statewide_testing tests on cases.newcountconfirmed <= tests.tested'
),
'join_type': 'inner join',
'joined_on_column': 'newcountconfirmed',
'joined_on_table': {
'schema': 'open_data',
'cluster': 'ca_covid',
'database': 'snowflake',
'name': 'statewide_testing'
},
'column': 'newcountconfirmed'
}
}
],
'filters': [
{
'where_clause': 'b.countnewestcases <= 15',
'where_exec_cnt': 2
}
]
}
last_updated_timestamp = '01'
self.col_usage_return_value = [
col1,
col2
]
self.table_level_return_value = table_level_results
self.app_producing, self.app_consuming = app1, app2
self.last_updated_timestamp = last_updated_timestamp
self.table_common_usage = table_common_usage
def tearDown(self) -> None:
pass
def test_health_neo4j(self) -> None:
# Test health when the enterprise version is used
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_result = MagicMock()
mock_result.single.return_value = {'status': 'check'}
mock_execute.side_effect = [
mock_result
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
health_actual = neo4j_proxy.health()
expected_checks = {'Neo4jProxy:connection': {'status': 'check', 'overview_enabled': True}}
health_expected = health_check.HealthCheck(status='ok', checks=expected_checks)
self.assertEqual(health_actual.status, health_expected.status)
self.assertDictEqual(health_actual.checks, health_expected.checks)
# Test health when the open source version is used
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.side_effect = neobolt.exceptions.ClientError()
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
health_actual = neo4j_proxy.health()
expected_checks = {'Neo4jProxy:connection': {'overview_enabled': False}}
health_expected = health_check.HealthCheck(status='ok', checks=expected_checks)
self.assertEqual(health_actual.status, health_expected.status)
self.assertDictEqual(health_actual.checks, health_expected.checks)
# Test health failure (e.g. any other error)
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.side_effect = Exception()
health_actual = neo4j_proxy.health()
expected_checks = {'Neo4jProxy:connection': {}}
health_expected = health_check.HealthCheck(status='fail', checks=expected_checks)
self.assertEqual(health_actual.status, health_expected.status)
self.assertDictEqual(health_actual.checks, health_expected.checks)
def test_get_table(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.side_effect = [
self.col_usage_return_value,
[],
self.table_level_return_value,
self.table_common_usage,
[]
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table = neo4j_proxy.get_table(table_uri='dummy_uri')
expected = Table(database='hive', cluster='gold', schema='foo_schema', name='foo_table',
tags=[Tag(tag_name='test', tag_type='default')],
badges=[Badge(badge_name='golden', category='table_status')],
table_readers=[], description='foo description',
watermarks=[Watermark(watermark_type='high_watermark',
partition_key='ds',
partition_value='fake_value',
create_time='fake_time'),
Watermark(watermark_type='low_watermark',
partition_key='ds',
partition_value='fake_value',
create_time='fake_time')],
columns=[Column(name='bar_id_1', description='bar col description', col_type='varchar',
sort_order=0, stats=[Stat(start_epoch=1,
end_epoch=1,
stat_type='avg',
stat_val='1')], badges=[]),
Column(name='bar_id_2', description='bar col2 description', col_type='bigint',
sort_order=1, stats=[Stat(start_epoch=2,
end_epoch=2,
stat_type='avg',
stat_val='2')],
badges=[Badge(badge_name='primary key', category='column')])],
owners=[User(email='[email protected]', user_id='[email protected]')],
table_writer=Application(**self.app_producing, kind='Producing'),
table_apps=[
Application(**self.app_producing, kind='Producing'),
Application(**self.app_consuming, kind='Consuming')
],
last_updated_timestamp=1,
source=Source(source='/source_file_loc',
source_type='github'),
is_view=False,
programmatic_descriptions=[
ProgrammaticDescription(source='quality_report',
text='Test Test'),
ProgrammaticDescription(source='s3_crawler',
text='Test Test Test')
],
resource_reports=[
ResourceReport(name='test_report', url='https://test.report/index.html')
],
common_joins=[
SqlJoin(
join_sql=(
'statewide_cases cases '
'join statewide_testing tests on cases.newcountconfirmed <= tests.tested'
),
join_type='inner join',
joined_on_column='newcountconfirmed',
joined_on_table=TableSummary(
schema='open_data',
cluster='ca_covid',
database='snowflake',
name='statewide_testing'
),
column='newcountconfirmed'
)
],
common_filters=[
SqlWhere(where_clause='b.countnewestcases <= 15')
])
self.assertEqual(str(expected), str(table))
def test_get_table_view_only(self) -> None:
col_usage_return_value = copy.deepcopy(self.col_usage_return_value)
for col in col_usage_return_value:
col['tbl']['is_view'] = True
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.side_effect = [
col_usage_return_value,
[],
self.table_level_return_value,
self.table_common_usage,
[]
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table = neo4j_proxy.get_table(table_uri='dummy_uri')
expected = Table(database='hive', cluster='gold', schema='foo_schema', name='foo_table',
tags=[Tag(tag_name='test', tag_type='default')],
badges=[Badge(badge_name='golden', category='table_status')],
table_readers=[], description='foo description',
watermarks=[Watermark(watermark_type='high_watermark',
partition_key='ds',
partition_value='fake_value',
create_time='fake_time'),
Watermark(watermark_type='low_watermark',
partition_key='ds',
partition_value='fake_value',
create_time='fake_time')],
columns=[Column(name='bar_id_1', description='bar col description', col_type='varchar',
sort_order=0, stats=[Stat(start_epoch=1,
end_epoch=1,
stat_type='avg',
stat_val='1')], badges=[]),
Column(name='bar_id_2', description='bar col2 description', col_type='bigint',
sort_order=1, stats=[Stat(start_epoch=2,
end_epoch=2,
stat_type='avg',
stat_val='2')],
badges=[Badge(badge_name='primary key', category='column')])],
owners=[User(email='[email protected]', user_id='[email protected]')],
table_writer=Application(**self.app_producing, kind='Producing'),
table_apps=[
Application(**self.app_producing, kind='Producing'),
Application(**self.app_consuming, kind='Consuming')
],
last_updated_timestamp=1,
source=Source(source='/source_file_loc',
source_type='github'),
is_view=True,
programmatic_descriptions=[
ProgrammaticDescription(source='quality_report',
text='Test Test'),
ProgrammaticDescription(source='s3_crawler',
text='Test Test Test')
],
resource_reports=[
ResourceReport(name='test_report', url='https://test.report/index.html')
],
common_joins=[
SqlJoin(
join_sql=(
'statewide_cases cases '
'join statewide_testing tests on cases.newcountconfirmed <= tests.tested'
),
join_type='inner join',
joined_on_column='newcountconfirmed',
joined_on_table=TableSummary(
schema='open_data',
cluster='ca_covid',
database='snowflake',
name='statewide_testing'
),
column='newcountconfirmed'
)
],
common_filters=[
SqlWhere(where_clause='b.countnewestcases <= 15')
])
self.assertEqual(str(expected), str(table))
def test_get_table_with_valid_description(self) -> None:
"""
Test description is returned for table
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = dict(description='sample description')
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table_description = neo4j_proxy.get_table_description(table_uri='test_table')
table_description_query = textwrap.dedent("""
MATCH (n:Table {key: $key})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=table_description_query,
param_dict={'key': 'test_table'})
self.assertEqual(table_description, 'sample description')
def test_get_table_with_no_description(self) -> None:
"""
Test None is returned for table with no description
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table_description = neo4j_proxy.get_table_description(table_uri='test_table')
table_description_query = textwrap.dedent("""
MATCH (n:Table {key: $key})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=table_description_query,
param_dict={'key': 'test_table'})
self.assertIsNone(table_description)
def test_put_table_description(self) -> None:
"""
Test updating table description
:return:
"""
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.put_table_description(table_uri='test_table',
description='test_description')
self.assertEqual(mock_run.call_count, 2)
self.assertEqual(mock_commit.call_count, 1)
def test_get_column_with_valid_description(self) -> None:
"""
Test description is returned for column
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = dict(description='sample description')
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
col_description = neo4j_proxy.get_column_description(table_uri='test_table',
column_name='test_column')
column_description_query = textwrap.dedent("""
MATCH (tbl:Table {key: $tbl_key})-[:COLUMN]->(c:Column {name: $column_name})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=column_description_query,
param_dict={'tbl_key': 'test_table',
'column_name': 'test_column'})
self.assertEqual(col_description, 'sample description')
def test_get_column_with_no_description(self) -> None:
"""
Test None is returned for column with no description
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
col_description = neo4j_proxy.get_column_description(table_uri='test_table',
column_name='test_column')
column_description_query = textwrap.dedent("""
MATCH (tbl:Table {key: $tbl_key})-[:COLUMN]->(c:Column {name: $column_name})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=column_description_query,
param_dict={'tbl_key': 'test_table',
'column_name': 'test_column'})
self.assertIsNone(col_description)
def test_put_column_description(self) -> None:
"""
Test updating column description
:return:
"""
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.put_column_description(table_uri='test_table',
column_name='test_column',
description='test_description')
self.assertEqual(mock_run.call_count, 2)
self.assertEqual(mock_commit.call_count, 1)
def test_add_owner(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.add_owner(table_uri='dummy_uri',
owner='tester')
# we call neo4j twice in add_owner call
self.assertEqual(mock_run.call_count, 2)
self.assertEqual(mock_commit.call_count, 1)
def test_delete_owner(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.delete_owner(table_uri='dummy_uri',
owner='tester')
# we only call neo4j once in delete_owner call
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_commit.call_count, 1)
def test_add_table_badge(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.add_badge(id='dummy_uri',
badge_name='hive',
resource_type=ResourceType.Table)
# we call neo4j twice in add_tag call
self.assertEqual(mock_run.call_count, 3)
self.assertEqual(mock_commit.call_count, 1)
def test_add_column_badge(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.add_badge(id='dummy_uri/dummy_column',
badge_name='hive',
resource_type=ResourceType.Column)
# we call neo4j twice in add_tag call
self.assertEqual(mock_run.call_count, 3)
self.assertEqual(mock_commit.call_count, 1)
def test_add_tag(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.add_tag(id='dummy_uri',
tag='hive')
# we call neo4j twice in add_tag call
self.assertEqual(mock_run.call_count, 3)
self.assertEqual(mock_commit.call_count, 1)
def test_delete_tag(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.delete_tag(id='dummy_uri',
tag='hive')
# we only call neo4j once in delete_tag call
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_commit.call_count, 1)
def test_get_tags(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [
{'tag_name': {'key': 'tag1'}, 'tag_count': 2},
{'tag_name': {'key': 'tag2'}, 'tag_count': 1}
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy.get_tags()
expected = [
TagDetail(tag_name='tag1', tag_count=2),
TagDetail(tag_name='tag2', tag_count=1),
]
self.assertEqual(actual.__repr__(), expected.__repr__())
def test_get_neo4j_latest_updated_ts(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = {
'ts': {
'latest_timestamp': '1000'
}
}
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_last_updated_ts = neo4j_proxy.get_latest_updated_ts()
self.assertEqual(neo4j_last_updated_ts, '1000')
mock_execute.return_value.single.return_value = {
'ts': {
}
}
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_last_updated_ts = neo4j_proxy.get_latest_updated_ts()
self.assertEqual(neo4j_last_updated_ts, 0)
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_last_updated_ts = neo4j_proxy.get_latest_updated_ts()
self.assertIsNone(neo4j_last_updated_ts)
def test_get_statistics(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [
{'number_of_tables': '2', 'number_of_documented_tables': '1', 'number_of_documented_cols': '1',
'number_of_owners': '1', 'number_of_tables_with_owners': '1',
'number_of_documented_and_owned_tables': '1'}]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_statistics = neo4j_proxy.get_statistics()
self.assertEqual(neo4j_statistics, {'number_of_tables': '2', 'number_of_documented_tables': '1',
'number_of_documented_cols': '1', 'number_of_owners': '1',
'number_of_tables_with_owners': '1',
'number_of_documented_and_owned_tables': '1'})
def test_get_popular_tables(self) -> None:
# Test cache hit for global popular tables
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [{'resource_key': 'foo'}, {'resource_key': 'bar'}]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
self.assertEqual(neo4j_proxy._get_global_popular_resources_uris(2), ['foo', 'bar'])
self.assertEqual(neo4j_proxy._get_global_popular_resources_uris(2), ['foo', 'bar'])
self.assertEqual(neo4j_proxy._get_global_popular_resources_uris(2), ['foo', 'bar'])
self.assertEqual(mock_execute.call_count, 1)
# Test cache hit for personal popular tables
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [{'resource_key': 'foo'}, {'resource_key': 'bar'}]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
self.assertEqual(neo4j_proxy._get_personal_popular_resources_uris(2, 'test_id'), ['foo', 'bar'])
self.assertEqual(neo4j_proxy._get_personal_popular_resources_uris(2, 'test_id'), ['foo', 'bar'])
self.assertEqual(neo4j_proxy._get_personal_popular_resources_uris(2, 'other_id'), ['foo', 'bar'])
self.assertEqual(mock_execute.call_count, 2)
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [
{'database_name': 'db', 'cluster_name': 'clstr', 'schema_name': 'sch', 'table_name': 'foo',
'table_description': 'test description'},
{'database_name': 'db', 'cluster_name': 'clstr', 'schema_name': 'sch', 'table_name': 'bar'}
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy.get_popular_tables(num_entries=2)
expected = [
PopularTable(database='db', cluster='clstr', schema='sch', name='foo', description='test description'),
PopularTable(database='db', cluster='clstr', schema='sch', name='bar'),
]
self.assertEqual(actual.__repr__(), expected.__repr__())
def test_get_popular_resources_table(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_get_popular_tables') as mock_execute:
mock_execute.return_value = [
TableSummary(**{'database': 'db', 'cluster': 'clstr', 'schema': 'sch', 'name': 'foo',
'description': 'test description'}),
TableSummary(**{'database': 'db', 'cluster': 'clstr', 'schema': 'sch', 'name': 'bar'})
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy.get_popular_resources(num_entries=2, resource_types=["table"])
expected = {
ResourceType.Table.name: [
TableSummary(database='db', cluster='clstr', schema='sch', name='foo',
description='test description'),
TableSummary(database='db', cluster='clstr', schema='sch', name='bar')
]
}
self.assertEqual(expected.__repr__(), actual.__repr__())
def test_get_popular_resources_table_dashboard(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_get_popular_tables') as mock_execute:
mock_execute.return_value = [
TableSummary(**{'database': 'db', 'cluster': 'clstr', 'schema': 'sch', 'name': 'foo',
'description': 'test description'}),
TableSummary(**{'database': 'db', 'cluster': 'clstr', 'schema': 'sch', 'name': 'bar'})
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy.get_popular_resources(num_entries=2, resource_types=["table", "dashboard"])
expected = {
ResourceType.Table.name: [
TableSummary(database='db', cluster='clstr', schema='sch', name='foo',
description='test description'),
TableSummary(database='db', cluster='clstr', schema='sch', name='bar')
],
ResourceType.Dashboard.name: []
}
self.assertEqual(expected.__repr__(), actual.__repr__())
def test_get_user(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = {
'user_record': {
'employee_type': 'teamMember',
'full_name': 'test_full_name',
'is_active': 'True',
'profile_url': 'test_profile',
'github_username': 'test-github',
'slack_id': 'test_id',
'last_name': 'test_last_name',
'first_name': 'test_first_name',
'team_name': 'test_team',
'email': 'test_email',
},
'manager_record': {
'full_name': 'test_manager_fullname'
}
}
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_user = neo4j_proxy.get_user(id='test_email')
self.assertEqual(neo4j_user.email, 'test_email')
def test_get_user_other_key_values(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = {
'user_record': {
'employee_type': 'teamMember',
'full_name': 'test_full_name',
'is_active': 'True',
'profile_url': 'test_profile',
'github_username': 'test-github',
'slack_id': 'test_id',
'last_name': 'test_last_name',
'first_name': 'test_first_name',
'team_name': 'test_team',
'email': 'test_email',
'mode_user_id': 'mode_foo_bar',
'etc': 'etc_foo_bar',
},
'manager_record': {
'full_name': 'test_manager_fullname'
}
}
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_user = neo4j_proxy.get_user(id='test_email')
self.assertEqual(neo4j_user.other_key_values, {'mode_user_id': 'mode_foo_bar'})
def test_put_user_new_user(self) -> None:
"""
Test creating a new user
:return:
"""
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_transaction = mock_driver.return_value.session.return_value.begin_transaction.return_value
mock_run = mock_transaction.run
mock_commit = mock_transaction.commit
test_user = MagicMock()
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.create_update_user(user=test_user)
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_commit.call_count, 1)
def test_get_users(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
test_user = {
'employee_type': 'teamMember',
'full_name': 'test_full_name',
'is_active': True,
'profile_url': 'test_profile',
'github_username': 'test-github',
'slack_id': 'test_id',
'last_name': 'test_last_name',
'first_name': 'test_first_name',
'team_name': 'test_team',
'email': 'test_email',
'manager_fullname': 'test_manager',
}
test_user_obj = UserModel(email='test_email',
first_name='test_first_name',
last_name='test_last_name',
full_name='test_full_name',
is_active=True,
profile_url='test_profile',
github_username='test-github',
team_name='test_team',
slack_id='test_id',
employee_type='teamMember',
manager_fullname='test_manager')
# TODO: Add frequent_used, bookmarked, & owned resources)
mock_execute.return_value.single.return_value = {'users': [test_user]}
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
users = neo4j_proxy.get_users()
actual_data = [test_user_obj]
for attr in ['employee_type',
'full_name',
'is_active',
'profile_url',
'github_username',
'slack_id',
'last_name',
'first_name',
'team_name',
'email',
'manager_fullname']:
self.assertEqual(getattr(users[0], attr),
getattr(actual_data[0], attr))
def test_get_table_by_user_relation(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [
{
'resource': {
'name': 'table_name'
},
'db': {
'name': 'db_name'
},
'clstr': {
'name': 'cluster'
},
'schema': {
'name': 'schema'
},
}
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
result = neo4j_proxy.get_table_by_user_relation(user_email='test_user',
relation_type=UserResourceRel.follow)
self.assertEqual(len(result['table']), 1)
self.assertEqual(result['table'][0].name, 'table_name')
self.assertEqual(result['table'][0].database, 'db_name')
self.assertEqual(result['table'][0].cluster, 'cluster')
self.assertEqual(result['table'][0].schema, 'schema')
def test_get_dashboard_by_user_relation(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = [
{
'uri': 'dashboard_uri',
'cluster_name': 'cluster',
'dg_name': 'dashboard_group',
'dg_url': 'http://foo.bar/group',
'product': 'foobar',
'name': 'dashboard',
'url': 'http://foo.bar/dashboard',
'description': 'description',
'last_successful_run_timestamp': 1234567890
}
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
result = neo4j_proxy.get_dashboard_by_user_relation(user_email='test_user',
relation_type=UserResourceRel.follow)
expected = DashboardSummary(uri='dashboard_uri',
cluster='cluster',
group_name='dashboard_group',
group_url='http://foo.bar/group',
product='foobar',
name='dashboard',
url='http://foo.bar/dashboard',
description='description',
last_successful_run_timestamp=1234567890)
self.assertEqual(len(result['dashboard']), 1)
self.assertEqual(expected, result['dashboard'][0])
def test_add_resource_relation_by_user(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.add_resource_relation_by_user(id='dummy_uri',
user_id='tester',
relation_type=UserResourceRel.follow,
resource_type=ResourceType.Table)
self.assertEqual(mock_run.call_count, 2)
self.assertEqual(mock_commit.call_count, 1)
def test_delete_resource_relation_by_user(self) -> None:
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.delete_resource_relation_by_user(id='dummy_uri',
user_id='tester',
relation_type=UserResourceRel.follow,
resource_type=ResourceType.Table)
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_commit.call_count, 1)
def test_get_invalid_user(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
self.assertRaises(NotFoundException, neo4j_proxy.get_user, id='invalid_email')
def test_get_dashboard(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.side_effect = [
{
'cluster_name': 'cluster_name',
'uri': 'foo_dashboard://gold.bar/dashboard_id',
'url': 'http://www.foo.bar/dashboard_id',
'product': 'foobar',
'name': 'dashboard name',
'created_timestamp': 123456789,
'description': 'description',
'group_name': 'group_name',
'group_url': 'http://www.group_url.com',
'last_successful_run_timestamp': 9876543210,
'last_run_timestamp': 987654321,
'last_run_state': 'good_state',
'updated_timestamp': 123456654321,
'recent_view_count': 100,
'owners': [
{
'employee_type': 'teamMember',
'full_name': 'test_full_name',
'is_active': True,
'profile_url': 'test_profile',
'github_username': 'test-github',
'slack_id': 'test_id',
'last_name': 'test_last_name',
'first_name': 'test_first_name',
'team_name': 'test_team',
'email': 'test_email',
},
{
'employee_type': 'teamMember',
'full_name': 'test_full_name2',
'is_active': True,
'profile_url': 'test_profile',
'github_username': 'test-github2',
'slack_id': 'test_id2',
'last_name': 'test_last_name2',
'first_name': 'test_first_name2',
'team_name': 'test_team2',
'email': 'test_email2',
}
],
'tags': [
{
'key': 'tag_key1',
'tag_type': 'tag_type1'
},
{
'key': 'tag_key2',
'tag_type': 'tag_type2'
}
],
'badges': [
{
'key': 'golden',
'category': 'table_status'
}
],
'charts': [{'name': 'chart1'}, {'name': 'chart2'}],
'queries': [{'name': 'query1'}, {'name': 'query2', 'url': 'http://foo.bar/query',
'query_text': 'SELECT * FROM foo.bar'}],
'tables': [
{
'database': 'db1',
'name': 'table1',
'description': 'table description 1',
'cluster': 'cluster1',
'schema': 'schema1'
},
{
'database': 'db2',
'name': 'table2',
'description': None,
'cluster': 'cluster2',
'schema': 'schema2'
}
]
},
{
'cluster_name': 'cluster_name',
'uri': 'foo_dashboard://gold.bar/dashboard_id',
'url': 'http://www.foo.bar/dashboard_id',
'product': 'foobar',
'name': 'dashboard name',
'created_timestamp': 123456789,
'description': None,
'group_name': 'group_name',
'group_url': 'http://www.group_url.com',
'last_run_timestamp': None,
'last_run_state': None,
'updated_timestamp': None,
'recent_view_count': 0,
'owners': [],
'tags': [],
'badges': [],
'charts': [],
'queries': [],
'tables': []
}
]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
dashboard = neo4j_proxy.get_dashboard(id='dashboard_id')
expected = DashboardDetail(uri='foo_dashboard://gold.bar/dashboard_id', cluster='cluster_name',
group_name='group_name', group_url='http://www.group_url.com',
product='foobar',
name='dashboard name', url='http://www.foo.bar/dashboard_id',
description='description', created_timestamp=123456789,
last_successful_run_timestamp=9876543210,
updated_timestamp=123456654321, last_run_timestamp=987654321,
last_run_state='good_state',
owners=[User(email='test_email', user_id='test_email',
first_name='test_first_name',
last_name='test_last_name',
full_name='test_full_name', is_active=True,
profile_url='test_profile',
github_username='test-github',
team_name='test_team', slack_id='test_id',
employee_type='teamMember', manager_fullname=None),
User(email='test_email2', user_id='test_email2',
first_name='test_first_name2',
last_name='test_last_name2',
full_name='test_full_name2', is_active=True,
profile_url='test_profile',
github_username='test-github2',
team_name='test_team2', slack_id='test_id2',
employee_type='teamMember', manager_fullname=None)],
frequent_users=[], chart_names=['chart1', 'chart2'],
query_names=['query1', 'query2'],
queries=[DashboardQuery(name='query1'),
DashboardQuery(name='query2', url='http://foo.bar/query',
query_text='SELECT * FROM foo.bar')],
tables=[
PopularTable(database='db1',
name='table1',
description='table description 1',
cluster='cluster1',
schema='schema1'),
PopularTable(database='db2',
name='table2',
cluster='cluster2',
schema='schema2'),
],
tags=[Tag(tag_type='tag_type1', tag_name='tag_key1'),
Tag(tag_type='tag_type2', tag_name='tag_key2')],
badges=[Badge(badge_name='golden', category='table_status')],
recent_view_count=100)
self.assertEqual(expected, dashboard)
dashboard2 = neo4j_proxy.get_dashboard(id='dashboard_id')
expected2 = DashboardDetail(uri='foo_dashboard://gold.bar/dashboard_id', cluster='cluster_name',
group_name='group_name', group_url='http://www.group_url.com',
product='foobar', name='dashboard name',
url='http://www.foo.bar/dashboard_id', description=None,
created_timestamp=123456789, updated_timestamp=None, last_run_timestamp=None,
last_run_state=None, owners=[], frequent_users=[], chart_names=[],
query_names=[], tables=[], tags=[], badges=[],
last_successful_run_timestamp=None, recent_view_count=0)
self.assertEqual(expected2, dashboard2)
def test_get_dashboard_with_valid_description(self) -> None:
"""
Test description is returned for dashboard
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = dict(description='sample description')
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table_description = neo4j_proxy.get_dashboard_description(id='test_dashboard')
dashboard_description_query = textwrap.dedent("""
MATCH (n:Dashboard {key: $key})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=dashboard_description_query,
param_dict={'key': 'test_dashboard'})
self.assertEqual(table_description.description, 'sample description')
def test_get_dashboard_with_no_description(self) -> None:
"""
Test None is returned for table with no description
:return:
"""
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
table_description = neo4j_proxy.get_dashboard_description(id='test_dashboard')
dashboard_description_query = textwrap.dedent("""
MATCH (n:Dashboard {key: $key})-[:DESCRIPTION]->(d:Description)
RETURN d.description AS description;
""")
mock_execute.assert_called_with(statement=dashboard_description_query,
param_dict={'key': 'test_dashboard'})
self.assertIsNone(table_description.description)
def test_put_dashboard_description(self) -> None:
"""
Test updating table description
:return:
"""
with patch.object(GraphDatabase, 'driver') as mock_driver:
mock_session = MagicMock()
mock_driver.return_value.session.return_value = mock_session
mock_transaction = MagicMock()
mock_session.begin_transaction.return_value = mock_transaction
mock_run = MagicMock()
mock_transaction.run = mock_run
mock_commit = MagicMock()
mock_transaction.commit = mock_commit
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
neo4j_proxy.put_dashboard_description(id='test_dashboard',
description='test_description')
self.assertEqual(mock_run.call_count, 2)
self.assertEqual(mock_commit.call_count, 1)
expected_stmt = textwrap.dedent("""
MATCH (n1:Description {key: $desc_key}), (n2:Dashboard {key: $key})
MERGE (n2)-[r2:DESCRIPTION]->(n1)
RETURN n1.key, n2.key
""")
mock_run.assert_called_with(expected_stmt, {'desc_key': 'test_dashboard/_description',
'key': 'test_dashboard'})
def test_user_resource_relation_clause(self) -> None:
with patch.object(GraphDatabase, 'driver'):
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy._get_user_resource_relationship_clause(UserResourceRel.follow,
id='foo',
user_key='bar',
resource_type=ResourceType.Table)
expected = '(resource:Table {key: $resource_key})-[r1:FOLLOWED_BY]->(usr:User {key: $user_key})-' \
'[r2:FOLLOW]->(resource:Table {key: $resource_key})'
self.assertEqual(expected, actual)
actual = neo4j_proxy._get_user_resource_relationship_clause(UserResourceRel.read,
id='foo',
user_key='bar',
resource_type=ResourceType.Table)
expected = '(resource:Table {key: $resource_key})-[r1:READ_BY]->(usr:User {key: $user_key})-[r2:READ]->' \
'(resource:Table {key: $resource_key})'
self.assertEqual(expected, actual)
actual = neo4j_proxy._get_user_resource_relationship_clause(UserResourceRel.own,
id='foo',
user_key='bar',
resource_type=ResourceType.Table)
expected = '(resource:Table {key: $resource_key})-[r1:OWNER]->(usr:User {key: $user_key})-[r2:OWNER_OF]->' \
'(resource:Table {key: $resource_key})'
self.assertEqual(expected, actual)
actual = neo4j_proxy._get_user_resource_relationship_clause(UserResourceRel.follow,
id='foo',
user_key='bar',
resource_type=ResourceType.Dashboard)
expected = '(resource:Dashboard {key: $resource_key})-[r1:FOLLOWED_BY]->(usr:User {key: $user_key})-' \
'[r2:FOLLOW]->(resource:Dashboard {key: $resource_key})'
self.assertEqual(expected, actual)
def test_get_lineage_no_lineage_information(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
key = "alpha"
mock_execute.return_value.single.side_effect = [{}]
expected = Lineage(
key=key,
upstream_entities=[],
downstream_entities=[],
direction="both",
depth=1
)
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy.get_lineage(id=key, resource_type=ResourceType.Table, direction="both", depth=1)
self.assertEqual(expected, actual)
def test_get_lineage_success(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
key = "alpha"
mock_execute.return_value.single.side_effect = [{
"upstream_entities": [
{"key": "beta", "source": "gold", "level": 1, "badges": [], "usage":100, "parent": None},
{"key": "gamma", "source": "dyno", "level": 1,
"badges":
[
{"key": "badge1", "category": "default"},
{"key": "badge2", "category": "default"},
],
"usage": 200, "parent": None},
],
"downstream_entities": [
{"key": "delta", "source": "gold", "level": 1, "badges": [], "usage": 50, "parent": None},
]
}]
expected = Lineage(
key=key,
upstream_entities=[
LineageItem(**{"key": "beta", "source": "gold", "level": 1, "badges": [], "usage":100}),
LineageItem(**{"key": "gamma", "source": "dyno", "level": 1,
"badges":
[
Badge(**{"badge_name": "badge1", "category": "default"}),
Badge(**{"badge_name": "badge2", "category": "default"})
],
"usage": 200}),
],
downstream_entities=[
LineageItem(**{"key": "delta", "source": "gold", "level": 1, "badges": [], "usage": 50})
],
direction="both",
depth=1
)
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
actual = neo4j_proxy.get_lineage(id=key, resource_type=ResourceType.Table, direction="both", depth=1)
self.assertEqual(expected.__repr__(), actual.__repr__())
def test_get_feature_success(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.side_effect = [{
'wmk_records': [
{
'key': 'test_feature_group/test_feature_name/1.2.3/high_watermark',
'time': 'fake_time',
},
{
'key': 'test_feature_group/test_feature_name/1.2.3/low_watermark',
'time': 'fake_time',
}
],
'availability_records': [
{
'name': 'hive',
'publisher_last_updated_epoch_ms': 1621250037268,
'published_tag': '2021-05-16',
'key': 'database://hive'
},
{
'name': 'dynamodb',
'publisher_last_updated_epoch_ms': 1621250037268,
'published_tag': '2021-05-16',
'key': 'database://dynamodb'
}
],
'prog_descriptions': [
{
'description_source': 'quality_report',
'description': 'Test Test'
}
],
'owner_records': [
{
'key': '[email protected]',
'email': '[email protected]'
}
],
'badge_records': [
{
'key': 'pii',
'category': 'data'
}
],
'tag_records': [
{
'tag_type': 'default', 'key': 'test'
},
],
'desc': {
'description': 'test feature description',
'key': 'test_feature_group/test_feature_name/1.2.3/_description',
'description_source': 'description'
},
'feat': {
'last_updated_timestamp': 1,
'data_type': 'bigint',
'name': 'test_feature_name',
'created_timestamp': 1,
'version': '1.2.3',
'key': 'test_feature_group/test_feature_name/1.2.3',
'status': 'active',
'entity': 'test_entity'
},
'fg': {
'name': 'test_feature_group',
}
}]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
feature = neo4j_proxy.get_feature(feature_uri='dummy_uri')
expected = Feature(key='test_feature_group/test_feature_name/1.2.3',
name='test_feature_name',
version='1.2.3', status='active',
feature_group='test_feature_group', entity='test_entity',
data_type='bigint', availability=['hive', 'dynamodb'],
description='test feature description',
owners=[User(email='[email protected]')],
badges=[Badge(badge_name='pii', category='data')],
tags=[Tag(tag_name='test', tag_type='default')],
programmatic_descriptions=[
ProgrammaticDescription(source='quality_report',
text='Test Test'),
],
watermarks=[FeatureWatermark(
key='test_feature_group/test_feature_name/1.2.3/high_watermark',
watermark_type='high_watermark',
time='fake_time'),
FeatureWatermark(
key='test_feature_group/test_feature_name/1.2.3/low_watermark',
watermark_type='low_watermark',
time='fake_time')],
last_updated_timestamp=1,
created_timestamp=1,
)
self.assertEqual(str(expected), str(feature))
def test_get_feature_not_found(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
self.assertRaises(NotFoundException, neo4j_proxy._exec_feature_query, feature_key='invalid_feat_uri')
self.assertRaises(NotFoundException, neo4j_proxy.get_feature, feature_uri='invalid_feat_uri')
def test_get_resource_generation_code_success(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value.single.side_effect = [
{'query_records': {
'key': 'test_feature_group/test_feature_name/1.2.3/_generation_code',
'text': 'SELECT * FROM test_table',
'source': 'test_source'}}]
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
gen_code = neo4j_proxy.get_resource_generation_code(uri='dummy_uri',
resource_type=ResourceType.Feature)
expected = GenerationCode(key='test_feature_group/test_feature_name/1.2.3/_generation_code',
text='SELECT * FROM test_table',
source='test_source')
self.assertEqual(str(expected), str(gen_code))
def test_get_resource_generation_code_not_found(self) -> None:
with patch.object(GraphDatabase, 'driver'), patch.object(Neo4jProxy, '_execute_cypher_query') as mock_execute:
mock_execute.return_value = None
neo4j_proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
self.assertRaises(NotFoundException,
neo4j_proxy.get_resource_generation_code,
uri='invalid_feat_uri',
resource_type=ResourceType.Feature)
class TestNeo4jProxyHelpers:
CreateAppsTestCase = namedtuple('CreateAppsTestCase',
['input_producing', 'input_consuming', 'table_writer', 'table_apps'])
def test_create_apps(self) -> None:
def _get_test_record(app_id: str) -> dict:
return {'name': 'SomeApp', 'application_url': 'https://foo.bar', 'id': app_id}
test_cases = [
self.CreateAppsTestCase(
input_producing=[],
input_consuming=[],
table_writer=None,
table_apps=[],
),
self.CreateAppsTestCase(
input_producing=[_get_test_record('1')],
input_consuming=[],
table_writer=Application(**_get_test_record('1'), kind='Producing'),
table_apps=[
Application(**_get_test_record('1'), kind='Producing'),
],
),
self.CreateAppsTestCase(
input_producing=[_get_test_record('1'), _get_test_record('2')],
input_consuming=[_get_test_record('3')],
table_writer=Application(**_get_test_record('1'), kind='Producing'),
table_apps=[
Application(**_get_test_record('1'), kind='Producing'),
Application(**_get_test_record('2'), kind='Producing'),
Application(**_get_test_record('3'), kind='Consuming'),
],
),
self.CreateAppsTestCase(
input_producing=[],
input_consuming=[_get_test_record('3')],
table_writer=None,
table_apps=[
Application(**_get_test_record('3'), kind='Consuming'),
],
),
self.CreateAppsTestCase(
input_producing=[_get_test_record('1')],
input_consuming=[_get_test_record('1'), _get_test_record('2')],
table_writer=Application(**_get_test_record('1'), kind='Producing'),
table_apps=[
Application(**_get_test_record('1'), kind='Producing'),
Application(**_get_test_record('2'), kind='Consuming'),
],
)
]
with patch.object(GraphDatabase, 'driver'):
proxy = Neo4jProxy(host='DOES_NOT_MATTER', port=0000)
for tc in test_cases:
actual_table_writer, actual_table_apps = proxy._create_apps(tc.input_producing, tc.input_consuming)
assert (actual_table_writer, actual_table_apps) == (tc.table_writer, tc.table_apps)
if __name__ == '__main__':
unittest.main()
|
py | 1a4c14d518e506ac69f16d20d781c79a7d701cf0 | # generic object factory
from .builder import Builder
class ObjectFactory:
"""
Generic Object factory to leverage the generic Builder interface to create all/any kinds of objects
see : https://realpython.com/factory-method-python/
"""
def __init__(self) -> 'ObjectFactory()':
"""
Constructs an instance of the ObjectFactory
"""
self._builders = {}
def register_builder(self, key: str, builder: Builder) -> None:
"""
adds the builders to the internal builder dictionary.
Thus when we try to invoke a builder, it looks it up in this dictionary
:param key: The name of the builder (key of the dictionary)
:param builder: The builder object that creates an instance of an object
"""
self._builders[key] = builder
def create(self, key, **kwargs):
"""
Returns an instance object built with the keyed builder key and the constructor arguments in kwargs
:param key: The name of the builder registered as key in the _builder dict
:param kwargs: The keyword arguments needed by the builder specified by key
:return: A concrete object built by the builder specified with key
"""
builder = self._builders[key]
if not builder:
raise ValueError(key)
return builder(**kwargs) |
py | 1a4c14f08164a9674bccb2b2ba8f4e1ed78ce839 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Unlimited developers
import asyncio
import time
from test_framework.util import assert_raises_async, waitFor
from test_framework.test_framework import BitcoinTestFramework
from test_framework.loginit import logging
from test_framework.electrumutil import (ElectrumConnection,
address_to_scripthash, bitcoind_electrum_args)
from test_framework.connectrum.exc import ElectrumErrorResponse
MAX_RPC_CONNECTIONS = 5
MAX_SCRIPTHASH_SUBSCRIPTIONS = 5
SCRIPTHASH_ALIAS_BYTES_LIMIT = 54 * 2 # two bitcoin cash addresses
class ElectrumDoSLimitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
max_args = [
"-electrum.rawarg=--scripthash-subscription-limit={}".format(MAX_SCRIPTHASH_SUBSCRIPTIONS),
"-electrum.rawarg=--scripthash-alias-bytes-limit={}".format(SCRIPTHASH_ALIAS_BYTES_LIMIT),
"-electrum.rawarg=--rpc-max-connections={}".format(MAX_RPC_CONNECTIONS)
]
self.extra_args = [bitcoind_electrum_args() + max_args]
def run_test(self):
n = self.nodes[0]
n.generate(1)
async def async_tests(loop):
await self.test_connection_limit(loop)
await self.test_subscribe_limit(n)
await self.test_scripthash_alias_limit(n)
loop = asyncio.get_event_loop()
loop.run_until_complete(async_tests(loop))
async def test_subscribe_limit(self, n):
cli = ElectrumConnection()
await cli.connect()
logging.info("Testing scripthash subscription limit.")
# Subscribe up to limit
scripthashes = []
for i in range(0, MAX_SCRIPTHASH_SUBSCRIPTIONS):
s = address_to_scripthash(n.getnewaddress())
await cli.subscribe('blockchain.scripthash.subscribe', s)
scripthashes.append(s)
# Next subscription should fail
s = address_to_scripthash(n.getnewaddress())
await assert_raises_async(
ElectrumErrorResponse,
cli.call,
"blockchain.scripthash.subscribe", s)
try:
await cli.call("blockchain.scripthash.subscribe", s)
except ElectrumErrorResponse as e:
error_code = "-32600"
assert error_code in str(e)
assert "subscriptions limit reached" in str(e)
# Subscribing to an existing subscription should not affect the limit.
await cli.subscribe('blockchain.scripthash.subscribe', scripthashes[0])
# Unsubscribing should allow for a new subscription
ok = await cli.call('blockchain.scripthash.unsubscribe', scripthashes[0])
assert(ok)
await cli.subscribe('blockchain.scripthash.subscribe', s)
# ... and also enforce the limit again
await assert_raises_async(ElectrumErrorResponse, cli.call,
'blockchain.scripthash.subscribe',
address_to_scripthash(n.getnewaddress()))
cli.disconnect();
async def test_scripthash_alias_limit(self, n):
cli = ElectrumConnection()
await cli.connect()
addresses = ["bitcoincash:ppwk8u8cg8cthr3jg0czzays6hsnysykes9amw07kv",
"bitcoincash:qrsrvtc95gg8rrag7dge3jlnfs4j9pe0ugrmeml950"]
# Alias limit allows to subscribe to two addresses.
for a in addresses:
await cli.subscribe('blockchain.address.subscribe', a)
# Third address should fail
third = n.getnewaddress()
await assert_raises_async(
ElectrumErrorResponse,
cli.call,
"blockchain.address.subscribe", third)
try:
await cli.call("blockchain.address.subscribe", third)
except ElectrumErrorResponse as e:
error_code = "-32600"
assert error_code in str(e)
assert "alias subscriptions limit reached" in str(e)
# Unsubscribing should allow for a new subscription
ok = await cli.call('blockchain.address.unsubscribe', addresses[0])
assert(ok)
await cli.subscribe('blockchain.address.subscribe', third)
# ... and also enforce the limit again
await assert_raises_async(ElectrumErrorResponse, cli.call,
'blockchain.address.subscribe', n.getnewaddress())
cli.disconnect();
async def test_connection_limit(self, loop):
connections = []
for i in range(MAX_RPC_CONNECTIONS):
c = ElectrumConnection()
await c.connect()
connections.append(c)
# Exceed limit, we should get disconnected.
extra_connection = ElectrumConnection()
await extra_connection.connect()
try:
await asyncio.wait_for(extra_connection.call("server.ping"), timeout = 5)
assert(False)
except asyncio.TimeoutError:
# We expect this to timeout
pass
waitFor(5, lambda: not extra_connection.is_connected())
# Drop one connection
connections[0].disconnect()
# New connection should be accepted now.
extra_connection2 = ElectrumConnection()
await extra_connection2.connect();
await asyncio.wait_for(extra_connection2.call("server.ping"), timeout = 5)
for c in connections[1:] + [extra_connection2]:
c.disconnect()
if __name__ == '__main__':
ElectrumDoSLimitTest().main()
|
py | 1a4c15c766f1a7672d4e12e6a74c78400c452cd0 | from datetime import datetime, timedelta
from http import HTTPStatus
from backend.extensions import db
from backend.models import User, JWTToken
from backend.serializers.login_serializer import LoginSchema
from flask import request
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
decode_token,
get_jwt_identity,
jwt_refresh_token_required,
jwt_required,
jwt_optional,
get_raw_jwt,
)
from flask_restful import Resource
from marshmallow import ValidationError
class UserLogin(Resource):
@jwt_optional
def post(self):
current_user = get_jwt_identity()
if current_user:
return (
{"msg": f"User already logged in as {current_user}"},
HTTPStatus.UNAUTHORIZED,
)
if not request.is_json:
return {"msg": "No input data provided"}, HTTPStatus.BAD_REQUEST
schema = LoginSchema()
try:
result = schema.load(request.json)
except ValidationError as error:
return (
{"msg": "Wrong input data", "errors": error.messages},
HTTPStatus.BAD_REQUEST,
)
username = result["username"]
password = result["password"]
if not (username and password):
return ({"msg": "Username and password required"}, HTTPStatus.BAD_REQUEST)
user = User.query.filter_by(username=username).first()
if user and user.check_password(password):
access_token = create_access_token(
identity=username, expires_delta=timedelta(minutes=60)
)
refresh_token = create_refresh_token(
identity=username, expires_delta=timedelta(weeks=1)
)
ret = {"access_token": access_token, "refresh_token": refresh_token}
add_token_to_database(access_token)
add_token_to_database(refresh_token)
return ret, HTTPStatus.CREATED
else:
return {"msg": "Not authorized"}, HTTPStatus.UNAUTHORIZED
class UserLogout(Resource):
@jwt_required
def delete(self):
jti = get_raw_jwt()["jti"]
token = JWTToken.query.filter_by(jti=jti).one()
token.revoked = True
db.session.commit()
return {"msg": "Successfully logged out"}, HTTPStatus.OK
class RefreshAccessToken(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
access_token = create_access_token(
identity=current_user, expires_delta=timedelta(minutes=60)
)
add_token_to_database(access_token)
return {"access_token": access_token}, HTTPStatus.CREATED
class RefreshToken(Resource):
@jwt_refresh_token_required
def delete(self):
jti = get_raw_jwt()["jti"]
token = JWTToken.query.filter_by(jti=jti).one()
token.revoked = True
db.session.commit()
return {"msg": "Refresh token successfully revoked"}, HTTPStatus.OK
def add_token_to_database(encoded_token):
"""
Adds a new token to the database. It is not revoked when it is added.
:param identity_claim:
"""
decoded_token = decode_token(encoded_token)
jti = decoded_token["jti"]
token_type = decoded_token["type"]
user_identity = decoded_token["identity"]
expires = datetime.fromtimestamp(decoded_token["exp"])
revoked = False
db_token = JWTToken(
jti=jti,
token_type=token_type,
user_identity=user_identity,
expires=expires,
revoked=revoked,
)
db.session.add(db_token)
db.session.commit()
|
py | 1a4c16191328f101644c3fa7621906a01fd22593 | from __future__ import division
import torch
from onmt.translate import penalties
class Beam(object):
"""
Class for managing the internals of the beam search process.
Takes care of beams, back pointers, and scores.
Args:
beam_size (int): Number of beams to use.
pad (int): Magic integer in output vocab.
bos (int): Magic integer in output vocab.
eos (int): Magic integer in output vocab.
n_best (int): Don't stop until at least this many beams have
reached EOS.
cuda (bool): use gpu
global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.
min_length (int): Shortest acceptable generation, not counting
begin-of-sentence or end-of-sentence.
stepwise_penalty (bool): Apply coverage penalty at every step.
block_ngram_repeat (int): Block beams where
``block_ngram_repeat``-grams repeat.
exclusion_tokens (set[str]): If a gram contains any of these
tokens, it may repeat.
"""
def __init__(self, size, pad, bos, eos,
n_best=1, cuda=False,
global_scorer=None,
min_length=0,
stepwise_penalty=False,
block_ngram_repeat=0,
exclusion_tokens=set()):
self.size = size
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.all_scores = []
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [self.tt.LongTensor(size)
.fill_(pad)]
self.next_ys[0][0] = bos
# Has EOS topped the beam yet.
self._eos = eos
self.eos_top = False
# The attentions (matrix) for each time.
self.attn = []
# Time and k pair for finished.
self.finished = []
self.n_best = n_best
# Information for global scoring.
self.global_scorer = global_scorer
self.global_state = {}
# Minimum prediction length
self.min_length = min_length
# Apply Penalty at every step
self.stepwise_penalty = stepwise_penalty
self.block_ngram_repeat = block_ngram_repeat
self.exclusion_tokens = exclusion_tokens
@property
def current_predictions(self):
return self.next_ys[-1]
@property
def current_origin(self):
"""Get the backpointers for the current timestep."""
return self.prev_ks[-1]
def advance(self, word_probs, attn_out):
"""
Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Parameters:
* `word_probs`- probs of advancing from the last step (K x words)
* `attn_out`- attention at the last step
Returns: True if beam search is complete.
"""
num_words = word_probs.size(1)
if self.stepwise_penalty:
self.global_scorer.update_score(self, attn_out)
# force the output to be longer than self.min_length
cur_len = len(self.next_ys)
if cur_len <= self.min_length:
# assumes there are len(word_probs) predictions OTHER
# than EOS that are greater than -1e20
for k in range(len(word_probs)):
word_probs[k][self._eos] = -1e20
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_scores = word_probs + self.scores.unsqueeze(1)
# Don't let EOS have children.
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
beam_scores[i] = -1e20
# Block ngram repeats
if self.block_ngram_repeat > 0:
le = len(self.next_ys)
for j in range(self.next_ys[-1].size(0)):
hyp, _ = self.get_hyp(le - 1, j)
ngrams = set()
fail = False
gram = []
for i in range(le - 1):
# Last n tokens, n = block_ngram_repeat
gram = (gram +
[hyp[i].item()])[-self.block_ngram_repeat:]
# Skip the blocking if it is in the exclusion list
if set(gram) & self.exclusion_tokens:
continue
if tuple(gram) in ngrams:
fail = True
ngrams.add(tuple(gram))
if fail:
beam_scores[j] = -10e20
else:
beam_scores = word_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0,
True, True)
self.all_scores.append(self.scores)
self.scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append((best_scores_id - prev_k * num_words))
self.attn.append(attn_out.index_select(0, prev_k))
self.global_scorer.update_global_state(self)
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.next_ys[-1][0] == self._eos:
self.all_scores.append(self.scores)
self.eos_top = True
@property
def done(self):
return self.eos_top and len(self.finished) >= self.n_best
def sort_finished(self, minimum=None):
if minimum is not None:
i = 0
# Add from beam until we have minimum outputs.
while len(self.finished) < minimum:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
i += 1
self.finished.sort(key=lambda a: -a[0])
scores = [sc for sc, _, _ in self.finished]
ks = [(t, k) for _, t, k in self.finished]
return scores, ks
def get_hyp(self, timestep, k):
"""
Walk back to construct the full hypothesis.
"""
hyp, attn = [], []
for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):
hyp.append(self.next_ys[j + 1][k])
attn.append(self.attn[j][k])
k = self.prev_ks[j][k]
return hyp[::-1], torch.stack(attn[::-1])
class GNMTGlobalScorer(object):
"""NMT re-ranking.
Args:
alpha (float): Length parameter.
beta (float): Coverage parameter.
length_penalty (str): Length penalty strategy.
coverage_penalty (str): Coverage penalty strategy.
Attributes:
alpha (float): See above.
beta (float): See above.
length_penalty (callable): See :class:`penalties.PenaltyBuilder`.
coverage_penalty (callable): See :class:`penalties.PenaltyBuilder`.
"""
@classmethod
def from_opt(cls, opt):
return cls(
opt.alpha,
opt.beta,
opt.length_penalty,
opt.coverage_penalty)
def __init__(self, alpha, beta, length_penalty, coverage_penalty):
self.alpha = alpha
self.beta = beta
penalty_builder = penalties.PenaltyBuilder(coverage_penalty,
length_penalty)
# Term will be subtracted from probability
self.cov_penalty = penalty_builder.coverage_penalty()
# Probability will be divided by this
self.length_penalty = penalty_builder.length_penalty()
def score(self, beam, logprobs):
"""
Rescores a prediction based on penalty functions
"""
len_pen = self.length_penalty(len(beam.next_ys), self.alpha)
normalized_probs = logprobs / len_pen
if not beam.stepwise_penalty:
penalty = self.cov_penalty(beam.global_state["coverage"],
self.beta)
normalized_probs -= penalty
return normalized_probs
def update_score(self, beam, attn):
"""
Function to update scores of a Beam that is not finished
"""
if "prev_penalty" in beam.global_state.keys():
beam.scores.add_(beam.global_state["prev_penalty"])
penalty = self.cov_penalty(beam.global_state["coverage"] + attn,
self.beta)
beam.scores.sub_(penalty)
def update_global_state(self, beam):
"Keeps the coverage vector as sum of attentions"
if len(beam.prev_ks) == 1:
beam.global_state["prev_penalty"] = beam.scores.clone().fill_(0.0)
beam.global_state["coverage"] = beam.attn[-1]
self.cov_total = beam.attn[-1].sum(1)
else:
self.cov_total += torch.min(beam.attn[-1],
beam.global_state['coverage']).sum(1)
beam.global_state["coverage"] = beam.global_state["coverage"] \
.index_select(0, beam.prev_ks[-1]).add(beam.attn[-1])
prev_penalty = self.cov_penalty(beam.global_state["coverage"],
self.beta)
beam.global_state["prev_penalty"] = prev_penalty
|
py | 1a4c165f6c615fb2a9c5f6c21a20c58d71ef7c02 | from .BaseRequest import BaseRequest
class UpdateDataAlertRequest(BaseRequest):
"""
Update site request for generating API requests to Tableau Server.
:param ts_connection: The Tableau Server connection object.
:type ts_connection: class
:param subject: (Optional) The string to set as the new subject of the alert.
:type subject: string
:param frequency: (Optional) The frequency of the data-driven alert: once, frequently, hourly,
daily, or weekly.
:type frequency: string
:param alert_owner_id: (Optional) The ID of the user to assign as owner of the data-driven alert.
:type alert_owner_id: string
:param is_public_flag: (Optional) Boolean flag.
Determines the visibility of the data-driven alert. If the flag is True,
users with access to the view containing the alert can see the alert and add
themselves as recipients. If the flag is False, then the alert is only visible
to the owner, site or server administrators, and specific users they add as recipients.
:type is_public_flag: boolean
"""
def __init__(self,
ts_connection,
subject=None,
frequency=None,
alert_owner_id=None,
is_public_flag=None):
super().__init__(ts_connection)
self._subject = subject
self._frequency = frequency
self._alert_owner_id = alert_owner_id
self._is_public_flag = is_public_flag
self.base_update_alert_request
@property
def optional_alert_param_keys(self):
return [
'subject',
'frequency',
'public'
]
@property
def optional_owner_param_keys(self):
return ['id']
@property
def optional_alert_param_values(self):
return [
self._subject,
self._frequency,
self._is_public_flag
]
@property
def optional_owner_param_values(self):
return [self._alert_owner_id]
@property
def base_update_alert_request(self):
self._request_body.update({'dataAlert': {}})
return self._request_body
@property
def modified_update_alert_request(self):
self._request_body['dataAlert'].update(
self._get_parameters_dict(
self.optional_alert_param_keys,
self.optional_alert_param_values))
if self._alert_owner_id:
self._request_body['dataAlert'].update({'owner': {}})
self._request_body['dataAlert']['owner'].update(
self._get_parameters_dict(
self.optional_owner_param_keys,
self.optional_owner_param_values))
return self._request_body
def get_request(self):
return self.modified_update_alert_request
|
py | 1a4c16925d0b63b96a7cde954792b27af3a9b9f0 | from typing import Dict, List, Optional
import shap
import pandas as pd
from sklearn.base import BaseEstimator
def _create_explainer(
clf: BaseEstimator,
X_train: pd.DataFrame,
**kwargs
):
'''
Creates kernel explainer from SHAP.
This is the most complete explainer in terms of model type coverage.
'''
return shap.KernelExplainer(
model=clf.predict_proba,
data=X_train,
**kwargs
)
def explain_local(
clf: BaseEstimator,
X_train: pd.DataFrame,
instance: pd.Series,
class_names: List,
sample_size: Optional[int] = 100,
explainer_kwargs: Optional[Dict] = {},
explanation_kwargs: Optional[Dict] = {}
):
'''
Creates an explainer and explains the given instance using SHAP.
Args:
clf : Fitted classifier from sklearn
X_train: data that was used to train the classifier
instance: instance to explain
class_names: names of class labels
sample_size: how many data points are used to create the SHAP values
explainer_kwargs: Keyword args passed during explainer initialization
explanation_kwargs: Keyword args passed for explanation
Returns:
Enriched SHAP explanation including figure
'''
explainer = _create_explainer(
clf=clf,
X_train=X_train,
**explainer_kwargs
)
shap_values = explainer.shap_values(instance, nsamples=sample_size)
figure = shap.force_plot(
base_value=explainer.expected_value[0],
shap_values=shap_values[0],
features=instance,
out_names=class_names,
matplotlib=True,
show=False,
**explanation_kwargs
)
return {
'explainer': explainer,
'shap_values': shap_values,
'figure': figure
}
def explain_global(
clf: BaseEstimator,
X_train: pd.DataFrame,
X_test: pd.DataFrame,
class_names: List,
sample_size: Optional[int] = 100,
explainer_kwargs: Optional[Dict] = {},
explanation_kwargs: Optional[Dict] = {}
):
'''
Creates an explainer and explanations for a given dataset using SHAP.
Args:
clf : Fitted classifier from sklearn
X_train: data that was used to train the classifier
X_test: data that should be explained
class_names: names of class labels
sample_size: how many data points are used to create the SHAP values
explainer_kwargs: Keyword args passed during explainer initialization
explanation_kwargs: Keyword args passed for explanation
Returns:
Enriched SHAP explanation including interactive figure
'''
explainer = _create_explainer(
clf=clf,
X_train=X_train,
**explainer_kwargs
)
shap_values = explainer.shap_values(X_test, nsamples=sample_size)
figure = shap.force_plot(
base_value=explainer.expected_value[0],
shap_values=shap_values[0],
features=X_test,
out_names=class_names,
show=False,
**explanation_kwargs
)
return {
'explainer': explainer,
'shap_values': shap_values,
'figure': figure
}
|
py | 1a4c17696cfe77e1e45bf851675c805c2b0ae028 | import pyodbc
driver = '{Microsoft Access Driver(*.mdb,*.accdb)}'
filepath = r'C:\Users\weidongc\Desktop\Booking\2020\2020 CN Ads Booking v12.accdb'
myDataSource = pyodbc.dataSources()
access_drive = myDataSource['MS Access Database']
cnxn = pyodbc.connect(driver=access_drive,dbq=filepath,autocommit=True)
crsr = cnxn.cursor()
#grab all the tables
table_list = list(crsr.tables())
# for i in table_list:
# print(i)
table_name = 'wbr'
query = 'select * from {}'.format(table_name)
crsr.execute(query)
crsr.close()
cnxn.close()
#print(result)
# df = pd.DataFrame()
#
# df.append(query)
# one_row = crsr.fetchall()
|
py | 1a4c17ff00184457b786b4b2720dca60293ae53f | # -*- coding: utf-8 -*-
from .uricli import main
main()
|
py | 1a4c1810ae6c01adc18980d87a1e858601d9440b | """ ConViT Model
@article{d2021convit,
title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases},
author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent},
journal={arXiv preprint arXiv:2103.10697},
year={2021}
}
Paper link: https://arxiv.org/abs/2103.10697
Original code: https://github.com/facebookresearch/convit, original copyright below
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
'''These modules are adapted from those of timm, see
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import DropPath, to_2tuple, trunc_normal_, PatchEmbed, Mlp
from .registry import register_model
from .vision_transformer_hybrid import HybridEmbed
from .fx_features import register_notrace_module
import torch
import torch.nn as nn
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# ConViT
'convit_tiny': _cfg(
url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth"),
'convit_small': _cfg(
url="https://dl.fbaipublicfiles.com/convit/convit_small.pth"),
'convit_base': _cfg(
url="https://dl.fbaipublicfiles.com/convit/convit_base.pth")
}
@register_notrace_module # reason: FX can't symbolically trace control flow in forward method
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.,
locality_strength=1.):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.locality_strength = locality_strength
self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.gating_param = nn.Parameter(torch.ones(self.num_heads))
self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None
def forward(self, x):
B, N, C = x.shape
if self.rel_indices is None or self.rel_indices.shape[1] != N:
self.rel_indices = self.get_rel_indices(N)
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def get_attention(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k = qk[0], qk[1]
pos_score = self.rel_indices.expand(B, -1, -1, -1)
pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1, -1, 1, 1)
attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn /= attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map=False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:, :, -1] ** .5
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 # max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads ** .5)
center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1 + kernel_size * h2
self.pos_proj.weight.data[position, 2] = -1
self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance
self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance
self.pos_proj.weight.data *= self.locality_strength
def get_rel_indices(self, num_patches: int) -> torch.Tensor:
img_size = int(num_patches ** .5)
rel_indices = torch.zeros(1, num_patches, num_patches, 3)
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size, img_size)
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
indd = indx ** 2 + indy ** 2
rel_indices[:, :, :, 2] = indd.unsqueeze(0)
rel_indices[:, :, :, 1] = indy.unsqueeze(0)
rel_indices[:, :, :, 0] = indx.unsqueeze(0)
device = self.qk.weight.device
return rel_indices.to(device)
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def get_attention_map(self, x, return_map=False):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0)
img_size = int(N ** .5)
ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1)
indx = ind.repeat(img_size, img_size)
indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1)
indd = indx ** 2 + indy ** 2
distances = indd ** .5
distances = distances.to('cuda')
dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N
if return_map:
return dist, attn_map
else:
return dist
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConViT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, global_pool=None,
local_up_to_layer=3, locality_strength=1., use_pos_embed=True):
super().__init__()
embed_dim *= num_heads
self.num_classes = num_classes
self.local_up_to_layer = local_up_to_layer
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.num_patches = num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i < local_up_to_layer else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
for n, m in self.named_modules():
if hasattr(m, 'local_init'):
m.local_init()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
if self.use_pos_embed:
x = x + self.pos_embed
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
if u == self.local_up_to_layer:
x = torch.cat((cls_tokens, x), dim=1)
x = blk(x)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _create_convit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
return build_model_with_cfg(
ConViT, variant, pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
@register_model
def convit_tiny(pretrained=False, **kwargs):
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
num_heads=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model = _create_convit(variant='convit_tiny', pretrained=pretrained, **model_args)
return model
@register_model
def convit_small(pretrained=False, **kwargs):
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
num_heads=9, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model = _create_convit(variant='convit_small', pretrained=pretrained, **model_args)
return model
@register_model
def convit_base(pretrained=False, **kwargs):
model_args = dict(
local_up_to_layer=10, locality_strength=1.0, embed_dim=48,
num_heads=16, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model = _create_convit(variant='convit_base', pretrained=pretrained, **model_args)
return model
|
py | 1a4c18221a7da1a3f087e6bb1cf586634a45874f | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Brian Scholer (@briantist)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# this file must define the "adapter" fixture at a minimum,
# and anything else that it needs or depends on that isn't already defined in in the test files themselves.
# Keep in mind that this one is for module_utils and so it cannot depend on or import any controller-side code.
from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_common import HashiVaultOptionAdapter
import pytest
class FakeAnsibleModule:
'''HashiVaultOptionAdapter.from_ansible_module() only cares about the AnsibleModule.params dict'''
def __init__(self, params):
self.params = params
@pytest.fixture
def ansible_module(sample_dict):
return FakeAnsibleModule(sample_dict)
@pytest.fixture
def adapter_from_ansible_module(ansible_module):
def _create_adapter_from_ansible_module():
return HashiVaultOptionAdapter.from_ansible_module(ansible_module)
return _create_adapter_from_ansible_module
@pytest.fixture(params=['dict', 'dict_defaults', 'ansible_module'])
def adapter(request, adapter_from_dict, adapter_from_dict_defaults, adapter_from_ansible_module):
return {
'dict': adapter_from_dict,
'dict_defaults': adapter_from_dict_defaults,
'ansible_module': adapter_from_ansible_module,
}[request.param]()
|
py | 1a4c18b648db74e7bc0297f1fa14d0c6a8d5776b | import collections
import datetime
import logging
from celery import shared_task
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from django.db.models import Q, Sum
from django.utils import timezone
from rest_framework import status
from waldur_core.core import utils as core_utils
from waldur_core.structure import models as structure_models
from waldur_mastermind.common.utils import create_request
from waldur_mastermind.invoices import models as invoices_models
from waldur_mastermind.invoices import utils as invoice_utils
from . import exceptions, models, utils, views
logger = logging.getLogger(__name__)
User = get_user_model()
def approve_order(order, user):
order.approve()
order.approved_by = user
order.approved_at = timezone.now()
order.save()
serialized_order = core_utils.serialize_instance(order)
serialized_user = core_utils.serialize_instance(user)
transaction.on_commit(
lambda: process_order.delay(serialized_order, serialized_user)
)
@shared_task
def process_order(serialized_order, serialized_user):
# Skip remote plugin because it is going to processed
# only after it gets approved by service provider
from waldur_mastermind.marketplace_remote import PLUGIN_NAME as REMOTE_PLUGIN_NAME
order = core_utils.deserialize_instance(serialized_order)
user = core_utils.deserialize_instance(serialized_user)
for item in order.items.exclude(offering__type=REMOTE_PLUGIN_NAME):
item.set_state_executing()
item.save(update_fields=['state'])
utils.process_order_item(item, user)
@shared_task
def process_order_item(serialized_order_item, serialized_user):
order_item = core_utils.deserialize_instance(serialized_order_item)
user = core_utils.deserialize_instance(serialized_user)
utils.process_order_item(order_item, user)
@shared_task
def create_screenshot_thumbnail(uuid):
screenshot = models.Screenshot.objects.get(uuid=uuid)
utils.create_screenshot_thumbnail(screenshot)
@shared_task
def notify_order_approvers(uuid):
order = models.Order.objects.get(uuid=uuid)
users = order.get_approvers()
emails = [u.email for u in users if u.email]
link = core_utils.format_homeport_link(
'projects/{project_uuid}/marketplace-order-list/',
project_uuid=order.project.uuid,
)
context = {
'order_url': link,
'order': order,
'site_name': settings.WALDUR_CORE['SITE_NAME'],
}
core_utils.broadcast_mail('marketplace', 'notification_approval', context, emails)
@shared_task
def notify_about_resource_change(event_type, context, resource_uuid):
resource = models.Resource.objects.get(uuid=resource_uuid)
project = structure_models.Project.all_objects.get(id=resource.project_id)
emails = project.get_users().values_list('email', flat=True)
core_utils.broadcast_mail('marketplace', event_type, context, emails)
def filter_aggregate_by_scope(queryset, scope):
scope_path = None
if isinstance(scope, structure_models.Project):
scope_path = 'resource__project'
if isinstance(scope, structure_models.Customer):
scope_path = 'resource__project__customer'
if scope_path:
queryset = queryset.filter(**{scope_path: scope})
return queryset
def aggregate_reported_usage(start, end, scope):
queryset = models.ComponentUsage.objects.filter(
date__gte=start, date__lte=end
).exclude(component__parent=None)
queryset = filter_aggregate_by_scope(queryset, scope)
queryset = queryset.values('component__parent_id').annotate(total=Sum('usage'))
return {row['component__parent_id']: row['total'] for row in queryset}
def aggregate_fixed_usage(start, end, scope):
queryset = models.ResourcePlanPeriod.objects.filter(
# Resource has been active during billing period
Q(start__gte=start, end__lte=end)
| Q(end__isnull=True) # Resource is still active
| Q(
end__gte=start, end__lte=end
) # Resource has been launched in previous billing period and stopped in current
)
queryset = filter_aggregate_by_scope(queryset, scope)
queryset = queryset.values('plan__components__component__parent_id').annotate(
total=Sum('plan__components__amount')
)
return {
row['plan__components__component__parent_id']: row['total'] for row in queryset
}
def calculate_usage_for_scope(start, end, scope):
reported_usage = aggregate_reported_usage(start, end, scope)
fixed_usage = aggregate_fixed_usage(start, end, scope)
# It needs to cover a case when a key is None because OfferingComponent.parent can be None.
fixed_usage.pop(None, None)
components = set(reported_usage.keys()) | set(fixed_usage.keys())
content_type = ContentType.objects.get_for_model(scope)
for component_id in components:
models.CategoryComponentUsage.objects.update_or_create(
content_type=content_type,
object_id=scope.id,
component_id=component_id,
date=start,
defaults={
'reported_usage': reported_usage.get(component_id),
'fixed_usage': fixed_usage.get(component_id),
},
)
@shared_task(name='waldur_mastermind.marketplace.calculate_usage_for_current_month')
def calculate_usage_for_current_month():
start = invoice_utils.get_current_month_start()
end = invoice_utils.get_current_month_end()
scopes = []
for customer in structure_models.Customer.objects.all():
scopes.append(customer)
for project in customer.projects.all():
scopes.append(project)
for scope in scopes:
calculate_usage_for_scope(start, end, scope)
@shared_task(name='waldur_mastermind.marketplace.send_notifications_about_usages')
def send_notifications_about_usages():
for warning in utils.get_info_about_missing_usage_reports():
customer = warning['customer']
emails = [owner.email for owner in customer.get_owners()]
warning['public_resources_url'] = utils.get_public_resources_url(customer)
if customer.serviceprovider.enable_notifications and emails:
core_utils.broadcast_mail(
'marketplace', 'notification_usages', warning, emails
)
@shared_task
def terminate_resource(serialized_resource, serialized_user):
resource = core_utils.deserialize_instance(serialized_resource)
user = core_utils.deserialize_instance(serialized_user)
view = views.ResourceViewSet.as_view({'post': 'terminate'})
response = create_request(view, user, {}, uuid=resource.uuid.hex)
if response.status_code != status.HTTP_200_OK:
raise exceptions.ResourceTerminateException(response.rendered_content)
@shared_task(
name='waldur_mastermind.marketplace.terminate_resources_if_project_end_date_has_been_reached'
)
def terminate_resources_if_project_end_date_has_been_reached():
expired_projects = structure_models.Project.objects.exclude(
end_date__isnull=True
).filter(end_date__lte=timezone.datetime.today())
for project in expired_projects:
resources = models.Resource.objects.filter(project=project).filter(
state__in=(models.Resource.States.OK, models.Resource.States.ERRED)
)
if resources:
utils.schedule_resources_termination(resources)
else:
project.delete()
@shared_task(name='waldur_mastermind.marketplace.notify_about_stale_resource')
def notify_about_stale_resource():
if not settings.WALDUR_MARKETPLACE['ENABLE_STALE_RESOURCE_NOTIFICATIONS']:
return
today = datetime.datetime.today()
prev_1 = today - relativedelta(months=1)
prev_2 = today - relativedelta(months=2)
items = invoices_models.InvoiceItem.objects.filter(
Q(invoice__month=today.month, invoice__year=today.year,)
| Q(invoice__month=prev_1.month, invoice__year=prev_1.year)
| Q(invoice__month=prev_2.month, invoice__year=prev_2.year)
)
actual_resources_ids = []
for item in items:
if item.price:
actual_resources_ids.append(item.resource.id)
resources = (
models.Resource.objects.exclude(id__in=actual_resources_ids)
.exclude(
Q(state=models.Resource.States.TERMINATED)
| Q(state=models.Resource.States.TERMINATING)
| Q(state=models.Resource.States.CREATING)
)
.exclude(offering__billable=False)
)
user_resources = collections.defaultdict(list)
for resource in resources:
owners = resource.project.customer.get_owners().exclude(email='')
resource_url = core_utils.format_homeport_link(
'/projects/{project_uuid}/marketplace-project-resource-details/{resource_uuid}/',
project_uuid=resource.project.uuid.hex,
resource_uuid=resource.uuid.hex,
)
for user in owners:
user_resources[user.email].append(
{'resource': resource, 'resource_url': resource_url}
)
for key, value in user_resources.items():
core_utils.broadcast_mail(
'marketplace',
'notification_about_stale_resources',
{'resources': value},
[key],
)
@shared_task(
name='waldur_mastermind.marketplace.terminate_resource_if_its_end_date_has_been_reached'
)
def terminate_resource_if_its_end_date_has_been_reached():
expired_resources = models.Resource.objects.exclude(
end_date__isnull=True,
state__in=(
models.Resource.States.TERMINATED,
models.Resource.States.TERMINATING,
),
).filter(end_date__lte=timezone.datetime.today())
utils.schedule_resources_termination(expired_resources)
@shared_task
def notify_about_resource_termination(resource_uuid, user_uuid, is_staff_action=None):
resource = models.Resource.objects.get(uuid=resource_uuid)
user = User.objects.get(uuid=user_uuid)
admin_emails = set(
resource.project.get_users(structure_models.ProjectRole.ADMINISTRATOR)
.exclude(email='')
.values_list('email', flat=True)
)
manager_emails = set(
resource.project.get_users(structure_models.ProjectRole.MANAGER)
.exclude(email='')
.values_list('email', flat=True)
)
emails = admin_emails | manager_emails
resource_url = core_utils.format_homeport_link(
'/projects/{project_uuid}/marketplace-project-resource-details/{resource_uuid}/',
project_uuid=resource.project.uuid.hex,
resource_uuid=resource.uuid.hex,
)
context = {'resource': resource, 'user': user, 'resource_url': resource_url}
if is_staff_action:
core_utils.broadcast_mail(
'marketplace',
'marketplace_resource_terminatate_scheduled_staff',
context,
emails,
)
else:
core_utils.broadcast_mail(
'marketplace', 'marketplace_resource_terminatate_scheduled', context, emails
)
|
py | 1a4c1bd3598edaab07b725b5386ce11b5f998ebe | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
new_registers = [
{
"target": "system",
"register": "fcsr",
"size": 64,
"physical_register": "fcsr",
"index": "0x3",
"fields": [
{"field": "WPRI_VAR", "shift": 8, "size": 56},
{"field": "FRM", "shift": 5, "size": 3},
{"field": "NZ", "shift": 4, "size": 1},
{"field": "DZ", "shift": 3, "size": 1},
{"field": "OF", "shift": 2, "size": 1},
{"field": "UF", "shift": 1, "size": 1},
{"field": "NX", "shift": 0, "size": 1},
],
"choice": {
"name": "fcsr",
"value": "0x3",
"weight": "0",
"description": "URW; Floating-point control and "
"status register.",
},
}
]
changed_registers = [
{
"target": "system",
"register": "fflags",
"size": 64,
"physical_register": "fflags",
"index": "0x1",
"fields": [
{"field": "WPRI_VAR", "shift": 5, "size": 59},
{"field": "NZ", "shift": 4, "size": 1},
{"field": "DZ", "shift": 3, "size": 1},
{"field": "OF", "shift": 2, "size": 1},
{"field": "UF", "shift": 1, "size": 1},
{"field": "NX", "shift": 0, "size": 1},
],
},
{
"target": "system",
"register": "frm",
"size": 64,
"physical_register": "frm",
"index": "0x2",
"fields": [
{"field": "WPRI_VAR", "shift": 8, "size": 56},
{"field": "WPRI_VAR", "shift": 0, "size": 5},
{"field": "FRM", "shift": 5, "size": 3},
],
},
{
"target": "system",
"register": "mscratch",
"size": 64,
"physical_register": "mscratch",
"index": "0x340",
"fields": [{"field": "MSCRATCH", "shift": 0, "size": 64}],
},
]
delete_register_choices = [{"name": "mstatus_hyp"}]
|
py | 1a4c1bffc43d00d5572716b32b9b650d7b9f40cc | import requests
from django.contrib.gis.geos import Point
from georiviere.observations.models import Station, StationProfile, Parameter, ParameterTracking, Unit
from . import BaseImportCommand
class Command(BaseImportCommand):
help = "Import physico-chemical quality stations from Hub'Eau API"
api_url = "https://hubeau.eaufrance.fr/api/v1/qualite_rivieres/station_pc"
api_analyse_pc_url = "https://hubeau.eaufrance.fr/api/v1/qualite_rivieres/analyse_pc"
def create_or_update_stations(self, results, verbosity, with_parameters=False):
"""Create or update stations from results"""
station_profile, station_profile_created = StationProfile.objects.get_or_create(
code='PCQUAL'
)
if verbosity >= 2:
if station_profile_created:
self.stdout.write('Created station profile {0}'.format(station_profile))
for station in results:
station_obj, station_created = Station.objects.update_or_create(
code=station['code_station'],
defaults={
'label': station['libelle_station'] or "",
'station_uri': station['uri_station'] or "",
'geom': Point(
station['coordonnee_x'],
station['coordonnee_y'],
srid='2154'
),
'hardness': station['durete'],
}
)
station_obj.station_profiles.add(station_profile)
if verbosity >= 2:
if station_created:
self.stdout.write('Created station {0}'.format(station_obj))
else:
self.stdout.write('Updated station {0}'.format(station_obj))
if with_parameters:
# Get parameters from analyse_pc API endpoint
payload = {
'format': 'json',
'size': 50,
'code_station': station_obj.code,
}
response = requests.get(self.api_analyse_pc_url, params=payload)
response_content = response.json()
analysepc_data = response_content['data']
for measure in analysepc_data:
# Create Parameter and Unit for temperature
unit_obj, unit_created = Unit.objects.get_or_create(
code=measure['code_unite'],
defaults={
'label': measure['symbole_unite'],
'symbol': measure['symbole_unite'],
}
)
parameter_obj, parameter_created = Parameter.objects.get_or_create(
code=measure['code_parametre'],
defaults={
'label': measure['libelle_parametre'],
'unit': unit_obj,
}
)
parameter_tracking, parameter_tracking_created = ParameterTracking.objects.get_or_create(
station=station_obj,
parameter=parameter_obj,
defaults={
'label': measure['libelle_parametre'],
'measure_frequency': "",
'transmission_frequency': "",
'data_availability': ParameterTracking.DataAvailabilityChoice.ONLINE,
}
)
if verbosity >= 2 and parameter_tracking_created:
self.stdout.write('Added parameter {0}'.format(parameter_tracking))
|
py | 1a4c1c48b78fb43dcc9e8199465106c3d6f3fc1a | #!/usr/bin/env python3
"""
REQUIRES simplejson
"""
import datetime
import simplejson as json
from .LogPrimFactory import LogPrimFactory
def datetimeconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
class SimpleJSONLogPrimFactory(LogPrimFactory):
"""
Simple JSON Log Primitive Factory.
Handles datetime objects, Decimals and Iterables
Log Format: JSON (from base dictionary object)
Functions
* logObj - json dump the base logObj output
* ... LogPrimFactories
"""
def logObj(self, *args, **kwargs):
"""
Take the base class output (a dictionary) and use json.dumps to pass back stringified JSON
"""
return json.dumps(
super().logObj(*args, **kwargs)
, default=datetimeconverter
, use_decimal=True
, iterable_as_array=True
)
|
py | 1a4c1c61ed0b8d524e17e03d38d5fc4801cfbf31 | """Doby import build testing"""
import logging
from doby import utils
def build_import(config):
"""Get the required packages and make requirements and import lists"""
if "requirements" in config.keys():
requirements = config["requirements"]
else:
requirements = {}
logging.info("Generating imports: %i", len(requirements))
# Create arrays
requirement_list = ["requests"]
import_list = ["import logging", "import requests"]
for requirement in requirements:
# Add each import into file imports
logging.info("Adding %s to imports", requirement)
import_list.append(f"import {requirement}")
if utils.key_exists_get_value("builtin", requirements[requirement]):
# Built in don't need adding to requirements.txt
# TODO allow requests version pinning
logging.info("%s is built in", requirement)
else:
# Combine all the strings (if not None)
if all([
utils.key_exists("operator", requirements[requirement]),
utils.key_exists("version", requirements[requirement]),
]):
pinned_requirement = (requirement +
requirements[requirement]["operator"] +
requirements[requirement]["version"])
logging.info("Adding pinned dependency %s to requirements",
pinned_requirement)
requirement_list.append(pinned_requirement)
else:
# Add just the name
logging.info("Adding dependency %s to requirements",
requirement)
requirement_list.append(requirement)
# Add extra line for formatting
import_list.append("")
return requirement_list, import_list
def get_clean_non_built_ins(config):
"""Returns a list of non built in functions"""
logging.info("Generating clean non built-in imports")
# Create arrays
requirement_list = ["reqrest"]
for requirement in config["requirements"]:
if not utils.key_exists_get_value("builtin",
config["requirements"][requirement]):
# Not built in
requirement_list.append(requirement)
return requirement_list
|
py | 1a4c1ca1dd1155fb041ab168214ad42db6f14fb0 | BBBBBBB BBBBBBBBBBBBBBBBBBBBBBBBB
BBBB BBBBBBBBBBBBBBBBB
BBBBB BBBBBXXXXXXXX XXXXXXBBBBBBBB
BBBBB BBBBBBBBBBBBXXXXXXXX XXXXXXBBBBBBBB
BBBBB BBBB
BBBBBB BBBB
BBBBBBBB BBBB
|
py | 1a4c1d16775aefd039a1cdc8bd0d476610cf57cf | from . import contest_problem
from . import contest_ranklist
from . import contest_statistic
from . import contest_submissions
from . import contest
from . import contestadd
from . import contestlist
from . import db
from . import home
from . import login
from . import modules
from . import newsubmit
from . import problem_ranklist
from . import problem_statistic
from . import problem
from . import problemadd
from . import problemedit
from . import problemdel
from . import problemlist
from . import record
from . import register
from . import status
from . import useredit
from . import userhome
from . import tool_total_ac_submit_recalculate
|
py | 1a4c1d5517a747e7f18d6b25b42002be24fa7914 | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Methods for generating QAOA cost Hamiltonians corresponding to
different optimization problems.
"""
from typing import Iterable, Union
import networkx as nx
import retworkx as rx
import pennylane as qml
from pennylane import qaoa
########################
# Hamiltonian components
def bit_driver(wires: Union[Iterable, qaoa.Wires], b: int):
r"""Returns the bit-driver cost Hamiltonian.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{b + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`b \ \in \ \{0, \ 1\}`. This Hamiltonian is often used when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the Hamiltonian acts
b (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with a majority of bits being :math:`0` or
a majority of bits being :math:`1`, respectively.
Returns:
.Hamiltonian:
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.bit_driver(wires, 1)
>>> print(hamiltonian)
(1) [Z0]
+ (1) [Z1]
+ (1) [Z2]
"""
if b == 0:
coeffs = [-1 for _ in wires]
elif b == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError(f"'b' must be either 0 or 1, got {b}")
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
def edge_driver(graph: Union[nx.Graph, rx.PyGraph], reward: list):
r"""Returns the edge-driver cost Hamiltonian.
Given some graph, :math:`G` with each node representing a wire, and a binary
colouring where each node/wire is assigned either :math:`|0\rangle` or :math:`|1\rangle`, the edge driver
cost Hamiltonian will assign a lower energy to edges represented by qubit states with endpoint colourings
supplied in ``reward``.
For instance, if ``reward`` is ``["11"]``, then edges
with both endpoints coloured as ``1`` (the state :math:`|11\rangle`) will be assigned a lower energy, while
the other colourings (``"00"``, ``"10"``, and ``"01"`` corresponding to states
:math:`|00\rangle`, :math:`|10\rangle`, and :math:`|10\rangle`, respectively) will be assigned a higher energy.
See usage details for more information.
Args:
graph (nx.Graph or rx.PyGraph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian:
**Example**
>>> import networkx as nx
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0]
+ (0.25) [Z1]
+ (0.25) [Z1]
+ (0.25) [Z2]
+ (0.25) [Z0 Z1]
+ (0.25) [Z1 Z2]
>>> import retworkx as rx
>>> graph = rx.PyGraph()
>>> graph.add_nodes_from([0, 1, 2])
>>> graph.add_edges_from([(0, 1,""), (1,2,"")])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0]
+ (0.25) [Z1]
+ (0.25) [Z1]
+ (0.25) [Z2]
+ (0.25) [Z0 Z1]
+ (0.25) [Z1 Z2]
In the above example, ``"11"``, ``"10"``, and ``"01"`` are assigned a lower
energy than ``"00"``. For example, a quick calculation of expectation values gives us:
.. math:: \langle 000 | H | 000 \rangle \ = \ 1.5
.. math:: \langle 100 | H | 100 \rangle \ = \ 0.5
.. math:: \langle 110 | H | 110\rangle \ = \ -0.5
In the first example, both vertex pairs are not in ``reward``. In the second example, one pair is in ``reward`` and
the other is not. Finally, in the third example, both pairs are in ``reward``.
.. details::
:title: Usage Details
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. With QAOA, it is natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`|01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the ``edge_driver()``
function outputs a Hamiltonian that rewards the pairs in the set, and penalizes the others.
For example, given the reward set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle\}` and the graph :math:`G`,
the ``edge_driver()`` function will output the following Hamiltonian:
.. math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
``reward`` must always contain both :math:`|01\rangle` and :math:`|10\rangle`, or neither of the two.
Within an undirected graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same. Therefore, there is no well-defined way to
penalize one and reward the other.
.. Note::
The absolute difference in energy between colourings in ``reward`` and colourings in its
complement is always :math:`1`.
"""
allowed = ["00", "01", "10", "11"]
if not all(e in allowed for e in reward):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
coeffs = []
ops = []
is_rx = isinstance(graph, rx.PyGraph)
graph_nodes = graph.nodes()
graph_edges = sorted(graph.edge_list()) if is_rx else graph.edges
# In RX each node is assigned to an integer index starting from 0;
# thus, we use the following lambda function to get node-values.
get_nvalue = lambda i: graph_nodes[i] if is_rx else i
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph_nodes]
ops = [qml.Identity(v) for v in graph_nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph_edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[
qml.PauliZ(get_nvalue(e[0])) @ qml.PauliZ(get_nvalue(e[1])),
qml.PauliZ(get_nvalue(e[0])),
qml.PauliZ(get_nvalue(e[1])),
]
)
if reward == "10":
for e in graph_edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(get_nvalue(e[0])) @ qml.PauliZ(get_nvalue(e[1])))
if reward == "11":
for e in graph_edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[
qml.PauliZ(get_nvalue(e[0])) @ qml.PauliZ(get_nvalue(e[1])),
qml.PauliZ(get_nvalue(e[0])),
qml.PauliZ(get_nvalue(e[1])),
]
)
return qml.Hamiltonian(coeffs, ops)
#######################
# Optimization problems
def maxcut(graph: Union[nx.Graph, rx.PyGraph]):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the
MaxCut problem, for a given graph.
The goal of the MaxCut problem for a particular graph is to find a partition of nodes into two sets,
such that the number of edges in the graph with endpoints in different sets is maximized. Formally,
we wish to find the `cut of the graph <https://en.wikipedia.org/wiki/Cut_(graph_theory)>`__ such
that the number of edges crossing the cut is maximized.
The MaxCut cost Hamiltonian is defined as:
.. math:: H_C \ = \ \frac{1}{2} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_i Z_j \ - \ \mathbb{I} \big),
where :math:`G` is a graph, :math:`\mathbb{I}` is the identity, and :math:`Z_i` and :math:`Z_j` are
the Pauli-Z operators on the :math:`i`-th and :math:`j`-th wire respectively.
The mixer Hamiltonian returned from :func:`~qaoa.maxcut` is :func:`~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states
Args:
graph (nx.Graph or rx.PyGraph): a graph defining the pairs of wires on which each term of the Hamiltonian acts
Returns:
(.Hamiltonian, .Hamiltonian): The cost and mixer Hamiltonians
**Example**
>>> import networkx as nx
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> cost_h, mixer_h = qml.qaoa.maxcut(graph)
>>> print(cost_h)
(-1.0) [I0]
+ (0.5) [Z0 Z1]
+ (0.5) [Z1 Z2]
>>> print(mixer_h)
(1) [X0]
+ (1) [X1]
+ (1) [X2]
>>> import retworkx as rx
>>> graph = rx.PyGraph()
>>> graph.add_nodes_from([0, 1, 2])
>>> graph.add_edges_from([(0, 1,""), (1,2,"")])
>>> cost_h, mixer_h = qml.qaoa.maxcut(graph)
>>> print(cost_h)
(-1.0) [I0]
+ (0.5) [Z0 Z1]
+ (0.5) [Z1 Z2]
>>> print(mixer_h)
(1) [X0]
+ (1) [X1]
+ (1) [X2]
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
is_rx = isinstance(graph, rx.PyGraph)
graph_nodes = graph.nodes()
graph_edges = sorted(graph.edge_list()) if is_rx else graph.edges
# In RX each node is assigned to an integer index starting from 0;
# thus, we use the following lambda function to get node-values.
get_nvalue = lambda i: graph_nodes[i] if is_rx else i
identity_h = qml.Hamiltonian(
[-0.5 for e in graph_edges],
[qml.Identity(get_nvalue(e[0])) @ qml.Identity(get_nvalue(e[1])) for e in graph_edges],
)
H = edge_driver(graph, ["10", "01"]) + identity_h
# store the valuable information that all observables are in one commuting group
H.grouping_indices = [list(range(len(H.ops)))]
return (H, qaoa.x_mixer(graph_nodes))
def max_independent_set(graph: Union[nx.Graph, rx.PyGraph], constrained: bool = True):
r"""For a given graph, returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the Maximum Independent Set problem.
Given some graph :math:`G`, an independent set is a set of vertices such that no pair of vertices in the set
share a common edge. The Maximum Independent Set problem, is the problem of finding the largest such set.
Args:
graph (nx.Graph or rx.PyGraph): a graph whose edges define the pairs of vertices on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian): The cost and mixer Hamiltonians
.. details::
:title: Usage Details
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by
`Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas (2019) <https://doi.org/10.3390/a12020034>`__.
The Maximum Independent Set cost Hamiltonian for constrained QAOA is defined as:
.. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v},
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z
operator applied to the :math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|0\rangle` state.
**Unconstrained**
The Maximum Independent Set cost Hamiltonian for unconstrained QAOA is defined as:
.. math:: H_C \ = \ 3 \sum_{(i, j) \in E(G)} (Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \
\displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the set of edges of :math:`G`, :math:`V(G)` is the set of vertices,
and :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states.
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
graph_nodes = graph.nodes()
if constrained:
cost_h = bit_driver(graph_nodes, 1)
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, qaoa.bit_flip_mixer(graph, 0))
cost_h = 3 * edge_driver(graph, ["10", "01", "00"]) + bit_driver(graph_nodes, 1)
mixer_h = qaoa.x_mixer(graph_nodes)
# store the valuable information that all observables are in one commuting group
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, mixer_h)
def min_vertex_cover(graph: Union[nx.Graph, rx.PyGraph], constrained: bool = True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the Minimum Vertex Cover problem,
for a given graph.
To solve the Minimum Vertex Cover problem, we attempt to find the smallest
`vertex cover <https://en.wikipedia.org/wiki/Vertex_cover>`__ of a graph --- a collection of vertices such that
every edge in the graph has one of the vertices as an endpoint.
Args:
graph (nx.Graph or rx.PyGraph): a graph whose edges define the pairs of vertices on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian): The cost and mixer Hamiltonians
.. details::
:title: Usage Details
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in arXiv:1709.03489.
The Minimum Vertex Cover cost Hamiltonian for constrained QAOA is defined as:
.. math:: H_C \ = \ - \displaystyle\sum_{v \in V(G)} Z_{v},
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator
applied to the :math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.bit_flip_mixer` applied to :math:`G`.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|1\rangle` state.
**Unconstrained**
The Minimum Vertex Cover cost Hamiltonian for unconstrained QAOA is defined as:
.. math:: H_C \ = \ 3 \sum_{(i, j) \in E(G)} (Z_i Z_j \ + \ Z_i \ + \ Z_j) \ - \
\displaystyle\sum_{i \in V(G)} Z_i
where :math:`E(G)` is the set of edges of :math:`G`, :math:`V(G)` is the set of vertices,
and :math:`Z_i` is the Pauli-Z operator acting on the :math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states.
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
graph_nodes = graph.nodes()
if constrained:
cost_h = bit_driver(graph_nodes, 0)
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, qaoa.bit_flip_mixer(graph, 1))
cost_h = 3 * edge_driver(graph, ["11", "10", "01"]) + bit_driver(graph_nodes, 0)
mixer_h = qaoa.x_mixer(graph_nodes)
# store the valuable information that all observables are in one commuting group
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, mixer_h)
def max_clique(graph: Union[nx.Graph, rx.PyGraph], constrained: bool = True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the Maximum Clique problem,
for a given graph.
The goal of Maximum Clique is to find the largest `clique <https://en.wikipedia.org/wiki/Clique_(graph_theory)>`__ of a
graph --- the largest subgraph such that all vertices are connected by an edge.
Args:
graph (nx.Graph or rx.PyGraph): a graph whose edges define the pairs of vertices on which each term of the Hamiltonian acts
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian): The cost and mixer Hamiltonians
.. details::
:title: Usage Details
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel, Venturelli, and Biswas
in arXiv:1709.03489.
The Maximum Clique cost Hamiltonian for constrained QAOA is defined as:
.. math:: H_C \ = \ \displaystyle\sum_{v \in V(G)} Z_{v},
where :math:`V(G)` is the set of vertices of the input graph, and :math:`Z_i` is the Pauli-Z operator
applied to the :math:`i`-th
vertex.
The returned mixer Hamiltonian is :func:`~qaoa.bit_flip_mixer` applied to :math:`\bar{G}`,
the complement of the graph.
.. note::
**Recommended initialization circuit:**
Each wire in the :math:`|0\rangle` state.
**Unconstrained**
The Maximum Clique cost Hamiltonian for unconstrained QAOA is defined as:
.. math:: H_C \ = \ 3 \sum_{(i, j) \in E(\bar{G})}
(Z_i Z_j \ - \ Z_i \ - \ Z_j) \ + \ \displaystyle\sum_{i \in V(G)} Z_i
where :math:`V(G)` is the set of vertices of the input graph :math:`G`, :math:`E(\bar{G})` is the set of
edges of the complement of :math:`G`, and :math:`Z_i` is the Pauli-Z operator applied to the
:math:`i`-th vertex.
The returned mixer Hamiltonian is :func:`~qaoa.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states.
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph, got {type(graph).__name__}"
)
graph_nodes = graph.nodes()
graph_complement = (
rx.complement(graph) if isinstance(graph, rx.PyGraph) else nx.complement(graph)
)
if constrained:
cost_h = bit_driver(graph_nodes, 1)
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, qaoa.bit_flip_mixer(graph_complement, 0))
cost_h = 3 * edge_driver(graph_complement, ["10", "01", "00"]) + bit_driver(graph_nodes, 1)
mixer_h = qaoa.x_mixer(graph_nodes)
# store the valuable information that all observables are in one commuting group
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, mixer_h)
def max_weight_cycle(graph: Union[nx.Graph, rx.PyGraph, rx.PyDiGraph], constrained: bool = True):
r"""Returns the QAOA cost Hamiltonian and the recommended mixer corresponding to the
maximum-weighted cycle problem, for a given graph.
The maximum-weighted cycle problem is defined in the following way (see
`here <https://1qbit.com/whitepaper/arbitrage/>`__ for more details).
The product of weights of a subset of edges in a graph is given by
.. math:: P = \prod_{(i, j) \in E} [(c_{ij} - 1)x_{ij} + 1]
where :math:`E` are the edges of the graph, :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)` and :math:`c_{ij}` is the corresponding edge weight.
Our objective is to maximimize :math:`P`, subject to selecting the :math:`x_{ij}` so that
our subset of edges composes a `cycle <https://en.wikipedia.org/wiki/Cycle_(graph_theory)>`__.
Args:
graph (nx.Graph or rx.PyGraph or rx.PyDiGraph): the directed graph on which the Hamiltonians are defined
constrained (bool): specifies the variant of QAOA that is performed (constrained or unconstrained)
Returns:
(.Hamiltonian, .Hamiltonian, dict): The cost and mixer Hamiltonians, as well as a dictionary
mapping from wires to the graph's edges
.. details::
:title: Usage Details
There are two variations of QAOA for this problem, constrained and unconstrained:
**Constrained**
.. note::
This method of constrained QAOA was introduced by Hadfield, Wang, Gorman, Rieffel,
Venturelli, and Biswas in `arXiv:1709.03489 <https://arxiv.org/abs/1709.03489>`__.
The maximum weighted cycle cost Hamiltonian for unconstrained QAOA is
.. math:: H_C = H_{\rm loss}.
Here, :math:`H_{\rm loss}` is a loss Hamiltonian:
.. math:: H_{\rm loss} = \sum_{(i, j) \in E} Z_{ij}\log c_{ij}
where :math:`E` are the edges of the graph and :math:`Z_{ij}` is a qubit Pauli-Z matrix
acting upon the wire specified by the edge :math:`(i, j)` (see :func:`~.loss_hamiltonian`
for more details).
The returned mixer Hamiltonian is :func:`~.cycle_mixer` given by
.. math:: H_M = \frac{1}{4}\sum_{(i, j)\in E}
\left(\sum_{k \in V, k\neq i, k\neq j, (i, k) \in E, (k, j) \in E}
\left[X_{ij}X_{ik}X_{kj} +Y_{ij}Y_{ik}X_{kj} + Y_{ij}X_{ik}Y_{kj} - X_{ij}Y_{ik}Y_{kj}\right]
\right).
This mixer provides transitions between collections of cycles, i.e., any subset of edges
in :math:`E` such that all the graph's nodes :math:`V` have zero net flow
(see the :func:`~.net_flow_constraint` function).
.. note::
**Recommended initialization circuit:**
Your circuit must prepare a state that corresponds to a cycle (or a superposition
of cycles). Follow the example code below to see how this is done.
**Unconstrained**
The maximum weighted cycle cost Hamiltonian for constrained QAOA is defined as:
.. math:: H_C \ = H_{\rm loss} + 3 H_{\rm netflow} + 3 H_{\rm outflow}.
The netflow constraint Hamiltonian :func:`~.net_flow_constraint` is given by
.. math:: H_{\rm netflow} = \sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, of node :math:`i`. It is minimized whenever a
subset of edges in :math:`E` results in zero net flow from each node in :math:`V`.
The outflow constraint Hamiltonian :func:`~.out_flow_constraint` is given by
.. math:: H_{\rm outflow} = \sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
\left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right).
It is minimized whenever a subset of edges in :math:`E` results in an outflow of at most one
from each node in :math:`V`.
The returned mixer Hamiltonian is :func:`~.x_mixer` applied to all wires.
.. note::
**Recommended initialization circuit:**
Even superposition over all basis states.
**Example**
First set up a simple graph:
.. code-block:: python
import pennylane as qml
import numpy as np
import networkx as nx
a = np.random.random((4, 4))
np.fill_diagonal(a, 0)
g = nx.DiGraph(a)
The cost and mixer Hamiltonian as well as the mapping from wires to edges can be loaded
using:
>>> cost, mixer, mapping = qml.qaoa.max_weight_cycle(g, constrained=True)
Since we are using ``constrained=True``, we must ensure that the input state to the QAOA
algorithm corresponds to a cycle. Consider the mapping:
>>> mapping
{0: (0, 1),
1: (0, 2),
2: (0, 3),
3: (1, 0),
4: (1, 2),
5: (1, 3),
6: (2, 0),
7: (2, 1),
8: (2, 3),
9: (3, 0),
10: (3, 1),
11: (3, 2)}
A simple cycle is given by the edges ``(0, 1)`` and ``(1, 0)`` and corresponding wires
``0`` and ``3``. Hence, the state :math:`|100100000000\rangle` corresponds to a cycle and
can be prepared using :class:`~.BasisState` or simple :class:`~.PauliX` rotations on the
``0`` and ``3`` wires.
"""
if not isinstance(graph, (nx.Graph, rx.PyGraph, rx.PyDiGraph)):
raise ValueError(
f"Input graph must be a nx.Graph or rx.PyGraph or rx.PyDiGraph, got {type(graph).__name__}"
)
mapping = qaoa.cycle.wires_to_edges(graph)
if constrained:
cost_h = qaoa.cycle.loss_hamiltonian(graph)
cost_h.grouping_indices = [list(range(len(cost_h.ops)))]
return (cost_h, qaoa.cycle.cycle_mixer(graph), mapping)
cost_h = qaoa.cycle.loss_hamiltonian(graph) + 3 * (
qaoa.cycle.net_flow_constraint(graph) + qaoa.cycle.out_flow_constraint(graph)
)
mixer_h = qaoa.x_mixer(mapping.keys())
return (cost_h, mixer_h, mapping)
|
py | 1a4c1dc4e889cd18c1e17546956797eefa2131a1 | # CSC 321, Assignment 4
#
# This is the main training file for the CycleGAN part of the assignment.
#
# Usage:
# ======
# To train with the default hyperparamters (saves results to samples_cyclegan/):
# python cycle_gan.py
#
# To train with cycle consistency loss (saves results to samples_cyclegan_cycle/):
# python cycle_gan.py --use_cycle_consistency_loss
#
#
# For optional experimentation:
# -----------------------------
# If you have a powerful computer (ideally with a GPU), then you can obtain better results by
# increasing the number of filters used in the generator and/or discriminator, as follows:
# python cycle_gan.py --g_conv_dim=64 --d_conv_dim=64
import os
import pdb
import pickle
import argparse
import warnings
warnings.filterwarnings("ignore")
# Torch imports
import torch
import torch.nn as nn
import torch.optim as optim
# Numpy & Scipy imports
import numpy as np
import scipy
import scipy.misc
# Local imports
import utils
from data_loader import get_emoji_loader
from models import CycleGenerator, DCDiscriminator
SEED = 11
# Set the random seed manually for reproducibility.
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
def print_models(G_XtoY, G_YtoX, D_X, D_Y):
"""Prints model information for the generators and discriminators.
"""
print(" G_XtoY ")
print("---------------------------------------")
print(G_XtoY)
print("---------------------------------------")
print(" G_YtoX ")
print("---------------------------------------")
print(G_YtoX)
print("---------------------------------------")
print(" D_X ")
print("---------------------------------------")
print(D_X)
print("---------------------------------------")
print(" D_Y ")
print("---------------------------------------")
print(D_Y)
print("---------------------------------------")
def create_model(opts):
"""Builds the generators and discriminators.
"""
G_XtoY = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights, batch_norm=not opts.disable_bn)
G_YtoX = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights, batch_norm=not opts.disable_bn)
D_X = DCDiscriminator(conv_dim=opts.d_conv_dim)
D_Y = DCDiscriminator(conv_dim=opts.d_conv_dim)
print_models(G_XtoY, G_YtoX, D_X, D_Y)
if torch.cuda.is_available():
G_XtoY.cuda()
G_YtoX.cuda()
D_X.cuda()
D_Y.cuda()
print('Models moved to GPU.')
return G_XtoY, G_YtoX, D_X, D_Y
def checkpoint(iteration, G_XtoY, G_YtoX, D_X, D_Y, opts):
"""Saves the parameters of both generators G_YtoX, G_XtoY and discriminators D_X, D_Y.
"""
G_XtoY_path = os.path.join(opts.checkpoint_dir, 'G_XtoY.pkl')
G_YtoX_path = os.path.join(opts.checkpoint_dir, 'G_YtoX.pkl')
D_X_path = os.path.join(opts.checkpoint_dir, 'D_X.pkl')
D_Y_path = os.path.join(opts.checkpoint_dir, 'D_Y.pkl')
torch.save(G_XtoY.state_dict(), G_XtoY_path)
torch.save(G_YtoX.state_dict(), G_YtoX_path)
torch.save(D_X.state_dict(), D_X_path)
torch.save(D_Y.state_dict(), D_Y_path)
def load_checkpoint(opts):
"""Loads the generator and discriminator models from checkpoints.
"""
G_XtoY_path = os.path.join(opts.load, 'G_XtoY.pkl')
G_YtoX_path = os.path.join(opts.load, 'G_YtoX.pkl')
D_X_path = os.path.join(opts.load, 'D_X.pkl')
D_Y_path = os.path.join(opts.load, 'D_Y.pkl')
G_XtoY = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights)
G_YtoX = CycleGenerator(conv_dim=opts.g_conv_dim, init_zero_weights=opts.init_zero_weights)
D_X = DCDiscriminator(conv_dim=opts.d_conv_dim)
D_Y = DCDiscriminator(conv_dim=opts.d_conv_dim)
G_XtoY.load_state_dict(torch.load(G_XtoY_path, map_location=lambda storage, loc: storage))
G_YtoX.load_state_dict(torch.load(G_YtoX_path, map_location=lambda storage, loc: storage))
D_X.load_state_dict(torch.load(D_X_path, map_location=lambda storage, loc: storage))
D_Y.load_state_dict(torch.load(D_Y_path, map_location=lambda storage, loc: storage))
if torch.cuda.is_available():
G_XtoY.cuda()
G_YtoX.cuda()
D_X.cuda()
D_Y.cuda()
print('Models moved to GPU.')
return G_XtoY, G_YtoX, D_X, D_Y
def merge_images(sources, targets, opts, k=10):
"""Creates a grid consisting of pairs of columns, where the first column in
each pair contains images source images and the second column in each pair
contains images generated by the CycleGAN from the corresponding images in
the first column.
"""
_, _, h, w = sources.shape
row = int(np.sqrt(opts.batch_size))
merged = np.zeros([3, row*h, row*w*2])
for idx, (s, t) in enumerate(zip(sources, targets)):
i = idx // row
j = idx % row
merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s
merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t
return merged.transpose(1, 2, 0)
def save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, opts):
"""Saves samples from both generators X->Y and Y->X.
"""
fake_X = G_YtoX(fixed_Y)
fake_Y = G_XtoY(fixed_X)
X, fake_X = utils.to_data(fixed_X), utils.to_data(fake_X)
Y, fake_Y = utils.to_data(fixed_Y), utils.to_data(fake_Y)
merged = merge_images(X, fake_Y, opts)
path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration))
scipy.misc.imsave(path, merged)
print('Saved {}'.format(path))
merged = merge_images(Y, fake_X, opts)
path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration))
scipy.misc.imsave(path, merged)
print('Saved {}'.format(path))
def training_loop(dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y, opts):
"""Runs the training loop.
* Saves checkpoint every opts.checkpoint_every iterations
* Saves generated samples every opts.sample_every iterations
"""
# Create generators and discriminators
if opts.load:
G_XtoY, G_YtoX, D_X, D_Y = load_checkpoint(opts)
else:
G_XtoY, G_YtoX, D_X, D_Y = create_model(opts)
g_params = list(G_XtoY.parameters()) + list(G_YtoX.parameters()) # Get generator parameters
d_params = list(D_X.parameters()) + list(D_Y.parameters()) # Get discriminator parameters
# Create optimizers for the generators and discriminators
g_optimizer = optim.Adam(g_params, opts.lr, [opts.beta1, opts.beta2])
d_optimizer = optim.Adam(d_params, opts.lr, [opts.beta1, opts.beta2])
iter_X = iter(dataloader_X)
iter_Y = iter(dataloader_Y)
test_iter_X = iter(test_dataloader_X)
test_iter_Y = iter(test_dataloader_Y)
# Get some fixed data from domains X and Y for sampling. These are images that are held
# constant throughout training, that allow us to inspect the model's performance.
fixed_X = utils.to_var(test_iter_X.next()[0])
fixed_Y = utils.to_var(test_iter_Y.next()[0])
iter_per_epoch = min(len(iter_X), len(iter_Y))
for iteration in range(1, opts.train_iters+1):
# Reset data_iter for each epoch
if iteration % iter_per_epoch == 0:
iter_X = iter(dataloader_X)
iter_Y = iter(dataloader_Y)
images_X, labels_X = iter_X.next()
images_X, labels_X = utils.to_var(images_X), utils.to_var(labels_X).long().squeeze()
images_Y, labels_Y = iter_Y.next()
images_Y, labels_Y = utils.to_var(images_Y), utils.to_var(labels_Y).long().squeeze()
# ============================================
# TRAIN THE DISCRIMINATORS
# ============================================
#########################################
## FILL THIS IN ##
#########################################
# Train with real images
d_optimizer.zero_grad()
# 1. Compute the discriminator losses on real images
#print(images_X.size()[0])
#print(images_X.size().shape)
inv_m = 1 / images_X.size()[0]
inv_n = 1 / images_Y.size()[0]
D_X_loss = torch.sum((D_X(images_X) - 1)**2) * inv_m
D_Y_loss = torch.sum((D_Y(images_Y) - 1)**2) * inv_n
d_real_loss = D_X_loss + D_Y_loss
d_real_loss.backward()
d_optimizer.step()
# Train with fake images
d_optimizer.zero_grad()
# 2. Generate fake images that look like domain X based on real images in domain Y
fake_X = G_YtoX(images_Y)
# 3. Compute the loss for D_X
D_X_loss = inv_n * torch.sum(D_X(fake_X)**2)
# 4. Generate fake images that look like domain Y based on real images in domain X
fake_Y = G_XtoY(images_X)
# 5. Compute the loss for D_Y
D_Y_loss = inv_m * torch.sum(D_Y(fake_Y)**2)
d_fake_loss = D_X_loss + D_Y_loss
d_fake_loss.backward()
d_optimizer.step()
# =========================================
# TRAIN THE GENERATORS
# =========================================
#########################################
## FILL THIS IN: Y--X-->Y CYCLE ##
#########################################
g_optimizer.zero_grad()
# 1. Generate fake images that look like domain X based on real images in domain Y
fake_X = G_YtoX(images_Y)
# 2. Compute the generator loss based on domain X
g_loss = inv_n * torch.sum((D_X(fake_X) - 1)**2)
if opts.use_cycle_consistency_loss:
reconstructed_Y = G_XtoY(fake_X)
# 3. Compute the cycle consistency loss (the reconstruction loss)
cycle_consistency_loss = inv_n * torch.sum((images_Y - reconstructed_Y)**2)
g_loss += cycle_consistency_loss
g_loss.backward()
g_optimizer.step()
#########################################
## FILL THIS IN: X--Y-->X CYCLE ##
#########################################
g_optimizer.zero_grad()
# 1. Generate fake images that look like domain Y based on real images in domain X
fake_Y = G_XtoY(images_X)
# 2. Compute the generator loss based on domain Y
g_loss = inv_m * torch.sum((D_Y(fake_Y) - 1)**2)
if opts.use_cycle_consistency_loss:
reconstructed_X = G_YtoX(fake_Y)
# 3. Compute the cycle consistency loss (the reconstruction loss)
cycle_consistency_loss = inv_m * torch.sum((images_X - reconstructed_X)**2)
g_loss += cycle_consistency_loss
g_loss.backward()
g_optimizer.step()
# Print the log info
if iteration % opts.log_step == 0:
print('Iteration [{:5d}/{:5d}] | d_real_loss: {:6.4f} | d_Y_loss: {:6.4f} | d_X_loss: {:6.4f} | '
'd_fake_loss: {:6.4f} | g_loss: {:6.4f}'.format(
iteration, opts.train_iters, d_real_loss.data[0], D_Y_loss.data[0],
D_X_loss.data[0], d_fake_loss.data[0], g_loss.data[0]))
# Save the generated samples
if iteration % opts.sample_every == 0:
save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, opts)
# Save the model parameters
if iteration % opts.checkpoint_every == 0:
checkpoint(iteration, G_XtoY, G_YtoX, D_X, D_Y, opts)
def main(opts):
"""Loads the data, creates checkpoint and sample directories, and starts the training loop.
"""
# Create train and test dataloaders for images from the two domains X and Y
dataloader_X, test_dataloader_X = get_emoji_loader(emoji_type=opts.X, opts=opts)
dataloader_Y, test_dataloader_Y = get_emoji_loader(emoji_type=opts.Y, opts=opts)
# Create checkpoint and sample directories
utils.create_dir(opts.checkpoint_dir)
utils.create_dir(opts.sample_dir)
# Start training
training_loop(dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y, opts)
def print_opts(opts):
"""Prints the values of all command-line arguments.
"""
print('=' * 80)
print('Opts'.center(80))
print('-' * 80)
for key in opts.__dict__:
if opts.__dict__[key]:
print('{:>30}: {:<30}'.format(key, opts.__dict__[key]).center(80))
print('=' * 80)
def create_parser():
"""Creates a parser for command-line arguments.
"""
parser = argparse.ArgumentParser()
# Model hyper-parameters
parser.add_argument('--image_size', type=int, default=32, help='The side length N to convert images to NxN.')
parser.add_argument('--g_conv_dim', type=int, default=32)
parser.add_argument('--d_conv_dim', type=int, default=32)
parser.add_argument('--use_cycle_consistency_loss', action='store_true', default=False, help='Choose whether to include the cycle consistency term in the loss.')
parser.add_argument('--init_zero_weights', action='store_true', default=False, help='Choose whether to initialize the generator conv weights to 0 (implements the identity function).')
parser.add_argument('--disable_bn', action='store_true', help='Disable Batch Normalization(BN)')
# Training hyper-parameters
parser.add_argument('--train_iters', type=int, default=600, help='The number of training iterations to run (you can Ctrl-C out earlier if you want).')
parser.add_argument('--batch_size', type=int, default=16, help='The number of images in a batch.')
parser.add_argument('--num_workers', type=int, default=0, help='The number of threads to use for the DataLoader.')
parser.add_argument('--lr', type=float, default=0.0003, help='The learning rate (default 0.0003)')
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.999)
# Data sources
parser.add_argument('--X', type=str, default='Apple', choices=['Apple', 'Windows'], help='Choose the type of images for domain X.')
parser.add_argument('--Y', type=str, default='Windows', choices=['Apple', 'Windows'], help='Choose the type of images for domain Y.')
# Saving directories and checkpoint/sample iterations
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints_cyclegan')
parser.add_argument('--sample_dir', type=str, default='samples_cyclegan')
parser.add_argument('--load', type=str, default=None)
parser.add_argument('--log_step', type=int , default=10)
parser.add_argument('--sample_every', type=int , default=100)
parser.add_argument('--checkpoint_every', type=int , default=800)
return parser
if __name__ == '__main__':
parser = create_parser()
opts = parser.parse_args()
if opts.use_cycle_consistency_loss:
opts.sample_dir = 'samples_cyclegan_cycle'
if opts.load:
opts.sample_dir = '{}_pretrained'.format(opts.sample_dir)
opts.sample_every = 20
print_opts(opts)
main(opts)
|
py | 1a4c1eec6fc8390399a397fb35b89a98ebe90aa7 | import os
import flask
from flask import send_from_directory
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from backend.utils import CustomJsonEncoder
app = flask.Flask(__name__)
app.json_encoder = CustomJsonEncoder
app.config["DEBUG"] = os.environ.get("DEBUG")
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("DATABASE_URL")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# noinspection PyBroadException
@app.route("/<path:filename>")
def fallback(filename):
public_dir = os.path.abspath("../frontend/dist/")
try:
return send_from_directory(public_dir, path=filename)
except Exception:
return send_from_directory(public_dir, path="index.html")
|
py | 1a4c20347c0f54a991f0613a7bfc6588b8beddc8 | from __future__ import (absolute_import, division, print_function)
import os
import os.path
import sys
import pipes
from ansible.errors import AnsibleError
from ansible.plugins.connection.ssh import Connection as SSHConnection
from contextlib import contextmanager
__metaclass__ = type
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
DOCUMENTATION = '''
connection: sshlxd
short_description: connect via ssh client binary to lxd container
description:
- This connection plugin allows ansible to communicate to the target machines via normal ssh command line.
author: aizquierdo, forked from Austin Hyde (@austinhyde)
version_added: historical
options:
host:
description: Hostname/ip to connect to.
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_ssh_host
host_key_checking:
description: Determines if ssh should check host keys
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
- section: ssh_connection
key: 'host_key_checking'
version_added: '2.5'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
ssh_args:
description: Arguments to pass to all ssh cli tools
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
ssh_common_args:
description: Common extra args for all ssh CLI tools
vars:
- name: ansible_ssh_common_args
ssh_executable:
default: ssh
description:
- This defines the location of the ssh binary. It defaults to `ssh` which will use the first ssh binary available in $PATH.
- This option is usually not required, it might be useful when access to system ssh is restricted,
or when using ssh wrappers to connect to remote hosts.
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
sftp_executable:
default: sftp
description:
- This defines the location of the sftp binary. It defaults to `sftp` which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
ini:
- {key: sftp_executable, section: ssh_connection}
version_added: "2.6"
scp_executable:
default: scp
description:
- This defines the location of the scp binary. It defaults to `scp` which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SCP_EXECUTABLE}]
ini:
- {key: scp_executable, section: ssh_connection}
version_added: "2.6"
scp_extra_args:
description: Extra exclusive to the 'scp' CLI
vars:
- name: ansible_scp_extra_args
sftp_extra_args:
description: Extra exclusive to the 'sftp' CLI
vars:
- name: ansible_sftp_extra_args
ssh_extra_args:
description: Extra exclusive to the 'ssh' CLI
vars:
- name: ansible_ssh_extra_args
retries:
# constant: ANSIBLE_SSH_RETRIES
description: Number of attempts to connect.
default: 3
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
port:
description: Remote port to connect to.
type: int
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the ssh client binary choose the user as it normally
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
pipelining:
default: ANSIBLE_PIPELINING
description:
- Pipelining reduces the number of SSH operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfer.
- This can result in a very significant performance improvement when enabled.
- However this conflicts with privilege escalation (become).
For example, when using sudo operations you must first disable 'requiretty' in the sudoers file for the target hosts,
which is why this feature is disabled by default.
env:
- name: ANSIBLE_PIPELINING
#- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
#- section: ssh_connection
# key: pipelining
type: boolean
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
control_path:
description:
- This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution.
- Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the `%(directory)s` variable for the control path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
sftp_batch_mode:
default: 'yes'
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: bool
scp_if_ssh:
default: smart
description:
- "Prefered method to use when transfering files over ssh"
- When set to smart, Ansible will try them until one succeeds or they all fail
- If set to True, it will force 'scp', if False it will use 'sftp'
env: [{name: ANSIBLE_SCP_IF_SSH}]
ini:
- {key: scp_if_ssh, section: ssh_connection}
use_tty:
version_added: '2.5'
default: 'yes'
description: add -tt to ssh commands to force tty allocation
env: [{name: ANSIBLE_SSH_USETTY}]
ini:
- {key: usetty, section: ssh_connection}
type: bool
yaml: {key: connection.usetty}
'''
class ConnectionBase(SSHConnection):
pass
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'sshlxc'
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
# self.host == containername@containerhost
self.inventory_hostname = self.host
self.containerspec, self.host = self.host.split('@', 1)
# self.containerspec == containername
# self.host == containerhost
# this way SSHConnection parent class uses the containerhost as the SSH remote host
self.connector = None
# logging.warning(self._play_context.connection)
def get_container_id(self):
return self.containerspec
def get_container_connector(self):
return 'lxc'
def _strip_sudo(self, executable, cmd):
# Get the command without sudo
sudoless = cmd.rsplit(executable + ' -c ', 1)[1]
# Get the quotes
quotes = sudoless.partition('echo')[0]
# Get the string between the quotes
cmd = sudoless[len(quotes):-len(quotes+'?')]
# Drop the first command becasue we don't need it
#cmd = cmd.split('; ', 1)[1]
return cmd
def host_command(self, cmd, do_become=False):
# if self._play_context.become and do_become:
# cmd = self._play_context.make_become_cmd(cmd)
return super(Connection, self).exec_command(cmd, in_data=None, sudoable=True)
def exec_command(self, cmd, in_data=None, executable='/bin/sh', sudoable=True):
''' run a command in the container '''
# if self._play_context.become:
# # display.debug("_low_level_execute_command(): using become for this command")
# cmd = self._play_context.make_become_cmd(cmd)
cmd = '%s exec %s -- %s' % (self.get_container_connector(), self.get_container_id(), cmd)
# display.vvv("CONTAINER (%s) %s" % (local_cmd), host=self.host)
return super(Connection, self).exec_command(cmd, in_data, True)
def container_path(self, path):
return self.get_container_id() + path
@contextmanager
def tempfile(self):
code, stdout, stderr = self.host_command('mktemp')
if sys.version_info.major == 3:
stdout = stdout.decode('utf-8')
if code != 0:
raise AnsibleError("failed to make temp file:\n%s\n%s" % (stdout, stderr))
tmp = stdout.strip().split('\n')[-1]
yield tmp
code, stdout, stderr = self.host_command(' '.join(['rm', tmp]))
if code != 0:
raise AnsibleError("failed to remove temp file %s:\n%s\n%s" % (tmp, stdout, stderr))
def put_file(self, in_path, out_path):
''' transfer a file from local to remote container '''
with self.tempfile() as tmp:
super(Connection, self).put_file(in_path, tmp)
self.host_command(' '.join(['lxc', 'exec', self.get_container_id(), '--', 'mkdir', '-p', os.path.dirname(out_path)]), do_become=True)
self.host_command(' '.join(['lxc', 'file', 'push', '--debug', tmp, self.container_path(out_path)]), do_become=True)
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
with self.tempfile() as tmp:
self.host_command(' '.join(['lxc', 'file', 'pull', self.container_path(in_path), tmp]), do_become=True)
super(Connection, self).fetch_file(tmp, out_path)
def close(self):
''' Close the connection, nothing to do for us '''
super(Connection, self).close()
|
py | 1a4c21b583ff19912cced53613dad886d6cbfeb3 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
from telemetry.core import util
def Run(project_config, no_browser=False,
disable_cloud_storage_io_during_test=False):
args = sys.argv[1:]
assert '--top-level-dir' not in args, (
'Top level directory for running tests should be specified through '
'the instance of telemetry.project_config.ProjectConfig.')
assert '--client-config' not in args, (
'Client config file to be used for telemetry should be specified through '
'the instance of telemetry.project_config.ProjectConfig.')
assert project_config.top_level_dir, 'Must specify top level dir for project'
args.extend(['--top-level-dir', project_config.top_level_dir])
for c in project_config.client_configs:
args.extend(['--client-config', c])
if no_browser and not '--no-browser' in args:
args.extend(['--no-browser'])
if project_config.default_chrome_root and not '--chrome-root' in args:
args.extend(['--chrome-root', project_config.default_chrome_root])
if disable_cloud_storage_io_during_test:
args.extend(['--disable-cloud-storage-io'])
env = os.environ.copy()
telemetry_dir = util.GetTelemetryDir()
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = os.pathsep.join([env['PYTHONPATH'], telemetry_dir])
else:
env['PYTHONPATH'] = telemetry_dir
path_to_run_tests = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'run_tests.py')
return subprocess.call([sys.executable, path_to_run_tests] + args, env=env)
|
py | 1a4c222406efad0e01ae1c41dedd1bb85a81fece | import argparse
import shutil
from pathlib import Path
import time
from pyimzml.ImzMLParser import ImzMLParser
import numpy as np
from matplotlib import pyplot as plt
from sm.browser import utils, mz_search, split_sort
TMP_LOCAL_PATH = Path("/tmp/imzml-browser")
TMP_LOCAL_PATH.mkdir(parents=True, exist_ok=True)
def log(start, message):
elapsed = time.time() - start
print(f"{elapsed:.2f}s: {message}")
def preprocess_dataset_peaks(full_dataset_s3_path: str):
assert full_dataset_s3_path.startswith("s3")
start = time.time()
log(start, "Initialization")
ds = utils.DatasetFiles(full_dataset_s3_path, TMP_LOCAL_PATH)
log(start, f"downloading dataset files from {full_dataset_s3_path} to {ds.ds_path}")
ds.download_imzml()
log(start, f"parsing imzml at {ds.imzml_path}")
imzml_reader = split_sort.ImzMLReader(ds.imzml_path) # replace with ImzmlParser
imzml_reader.add_stream(ds.ibd_path.open("rb"))
log(start, f"segmenting dataset by mz at {ds.segments_path}")
ibd_size_mb = ds.ibd_path.stat().st_size / 1024 ** 2
split_sort.segment_dataset(imzml_reader, ibd_size_mb, ds.segments_path)
log(start, f"sorting, merging, saving segments at {ds.sorted_peaks_path}")
split_sort.sort_merge_segments(ds.segments_path, ds.sorted_peaks_path)
log(start, f"saving dataset coordinates at {ds.ds_coordinates_path}")
np.array(imzml_reader.coordinates, dtype="i").tofile(ds.ds_coordinates_path.open("wb"))
log(start, f"building and saving mz index at {ds.mz_index_path}")
mz_index = mz_search.build_mz_index(ds.sorted_peaks_path)
mz_index.tofile(ds.mz_index_path)
log(start, f"uploading dataset files from {ds.ds_path} to {ds.full_ds_s3_path}")
ds.upload_sorted_mz()
log(start, f"removing {ds.segments_path}")
shutil.rmtree(ds.segments_path, ignore_errors=True)
log(start, f"done")
class DatasetBrowser:
def __init__(
self, full_dataset_s3_path: str,
):
start = time.time()
log(start, f"fetching and initializing mz index files from {full_dataset_s3_path}")
ds = utils.DatasetFiles(full_dataset_s3_path, TMP_LOCAL_PATH)
log(start, f"parsing imzml at {ds.imzml_path}")
self.imzml_path = ds.imzml_path
self.coordinates = np.frombuffer(ds.read_coordinates(), dtype="i").reshape(-1, 2)
self.mz_index = np.frombuffer(ds.read_mz_index(), dtype="f")
self.sorted_peaks_s3_file = ds.make_sorted_peaks_s3_file()
log(start, f"done")
def search(self, mz_lo: int, mz_hi: int) -> np.ndarray:
start = time.time()
log(start, "searching mz image")
mz_peaks = mz_search.search_and_fetch_mz_peaks(
self.sorted_peaks_s3_file, self.mz_index, mz_lo, mz_hi
)
mz_image, alpha, mz_max, mz_min = mz_search.create_mz_image(mz_peaks, self.coordinates)
rgba_image = plt.get_cmap("gray")(mz_image)
rgba_image[:, :, 3] = alpha
log(start, "done")
return rgba_image
def search_pixel(self, x: int, y: int) -> np.ndarray:
start = time.time()
log(start, f"pixel parsing imzml at {self.imzml_path}")
p = ImzMLParser(self.imzml_path)
n = 0
coordinate_x = p.coordinates[n][0]
coordinate_y = p.coordinates[n][1]
if((x, y, 1) in p.coordinates):
n = p.coordinates.index((x, y, 1))
coordinate_x = p.coordinates[n][0]
coordinate_y = p.coordinates[n][1]
mzs, ints = p.getspectrum(n)
log(start, "done")
return dict({'mzs': mzs.tolist(), 'ints': ints.tolist(), 'x': coordinate_x, 'y': coordinate_y})
if __name__ == "__main__":
parser = argparse.ArgumentParser("Build mz search index and search random mz images")
parser.add_argument("--s3-path", type=str, required=True)
parser.add_argument("--sort-peaks", action="store_true")
parser.add_argument("--mz-search", action="store_true")
parser.add_argument("--mz", type=float)
parser.add_argument("--ppm", type=int)
args = parser.parse_args()
if args.sort_peaks:
preprocess_dataset_peaks(args.s3_path)
elif args.mz_search:
dataset_browser = DatasetBrowser(args.s3_path)
mz_lo, mz_hi = utils.mz_ppm_bin(mz=args.mz, ppm=args.ppm)
mz_image = dataset_browser.search(mz_lo, mz_hi)
plt.imshow(mz_image)
plt.show()
|
py | 1a4c227cca9ab885a589d6979bf9a7f174bf34c9 | #!/usr/bin/python
"""
Python Markdown, the Command Line Script
========================================
This is the command line script for Python Markdown.
Basic use from the command line:
python markdown.py source.txt > destination.html
Run "python markdown.py --help" to see more options.
See markdown/__init__.py for information on using Python Markdown as a module.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: [email protected]
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see docs/LICENSE for details).
"""
import sys, os
if sys.platform == 'win32':
# We have to remove the Scripts dir from path on windows.
# If we don't, it will try to import itself rather than markdown lib.
# This appears to *not* be a problem on *nix systems, only Windows.
try:
sys.path.remove(os.path.dirname(__file__))
except (ValueError, NameError):
pass
# Now we can import the markdown lib.
import logging
from markdown import COMMAND_LINE_LOGGING_LEVEL
from markdown import commandline
# Setup a logger manually for compatibility with Python 2.3
logger = logging.getLogger('MARKDOWN')
logger.setLevel(COMMAND_LINE_LOGGING_LEVEL)
logger.addHandler(logging.StreamHandler())
if __name__ == '__main__':
commandline.run()
|
py | 1a4c233448c9106367e6bd37fb1ff9519dfe98b3 | from enum import Enum, Flag, IntEnum, IntFlag
class MyEnum(Enum):
a = "letter a"
b = "letter b"
class MyStrEnum(str, Enum):
a = "letter a"
b = "letter b"
class MyIntEnum(IntEnum):
a = 1
b = 2
class MyFlag(Flag):
a = 1
b = 2
class MyIntFlag(IntFlag):
a = 1
b = 2
|
py | 1a4c2400b96bd82d5b9004f85555460cb72e95c0 | #30 min with 52cpus in LMEM1
#the script uses a maximum of 40GB mem
#%reset -f
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import dask as da
import glob
import time
from tqdm import tqdm #to see progressbar for loops
from scipy.interpolate import interp1d #1d interp
import xesmf as xe #for spatial interpolation in projected or lon-lat coords
#for projections
from pyproj import Proj, transform, Transformer
#run this cell just once----
#from dask_jobqueue import SLURMCluster
from dask.distributed import Client, LocalCluster
#this seems that is working---
client = Client(processes=False,n_workers=1,threads_per_worker=52,memory_limit='120GB')
#this produce memory limit problems:
#client = Client(processes=False,n_workers=12,threads_per_worker=1,memory_limit='4GB')
#
#this seems that is working, but with lots of warnings related to mem issues?---
#this produce the same result as Client, but we can'not see progress neither strem
#client = LocalCluster(processes=False,n_workers=1,threads_per_worker=24,memory_limit='48GB')
#
#this is not calling any partition, just running with 1 core in the node we are now---
#cluster = SLURMCluster(queue='normal',cores=24,memory='48GB',processes=1,interface="lo")
#cluster = SLURMCluster(queue='LMEM1',cores=2,memory='1GB',project='test',interface='lo',scheduler_options={'interface': 'lo'})
#cluster = SLURMCluster(queue='LMEM1',cores=4,memory='2GB',processes=1, interface='lo')
#cluster = SLURMCluster()
#cluster.scale(jobs=2)
#cluster.scale(memory='2GB')
#cluster.adapt(maximum_jobs=2)
#print(cluster.job_script())
#client = Client(cluster)
#
# open dashboard with this if link doesn't work
# http://localhost:8787/status
#----------
home_dir="/export/lv4/user/jfajardourbina/"
ml_dir=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/"
dir_wind=f"{home_dir}dws_ulf_getm_2D_depth_avg/data/atmosphere/" #winds
dir_displacement="net_displacement/"
dir_topo=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/analysis_eulerian_data_36years/data_bathy_grid/" #topo data
file_topo="DWS200m.2012.v03.nc"
file_wind0="UERRA.2009.nc4" #any wind file
#
savee='everyM2' #saving track data every m2
deploy='everyM2'#deploy set of particles every m2
minTsim=60 #mimimum time of simulation (days)
maxTsim=91 #maximum time of simulation (days)
dir_tracks = f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/exp-deployHighVolume_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d/tracks/"
#
npa_per_dep=12967 #number of particles per deployment
m2=int(12.42*3600+2) #period in seconds
nt_interp=283*2 #interpolate wind data every 9.43 min from 1h original data (factor of m2=44714)
ref_time=np.datetime64("1980-01-01") #reference time for time interpolation, could be any value
dx=400/1e3;dy=400/1e3 #particle grid resolution
#
#paths for output data
dir_post_proc_data=f"{ml_dir}post_proc_data/" #to save wind interp files
dir_interp_wind="wind/"
file_interp_wind_root="wind_avg_std_during_1M2_and_interp_to_particle_grid_for_convlstm.nc"
#--------
dsw=xr.open_dataset(dir_wind+file_wind0) #open any wind data
dsw.close()
dsto=xr.open_dataset(dir_topo+file_topo) #topo file
xct0=dsto.xc.min().values/1e3; yct0=dsto.yc.min().values/1e3 #=(0,0)
#--------
#open grid of displacements (use for convlstm)---
file_displacement=sorted(glob.glob(f'{dir_post_proc_data}{dir_displacement}*.nc',recursive=True))[0]
ds_dis=xr.open_dataset(file_displacement); ds_dis.close()
xcdis0,ycdis0=ds_dis.x,ds_dis.y; del ds_dis
xcdis,ycdis=np.meshgrid(xcdis0,ycdis0)
#
#or build it---
#xmin=x0.min();xmax=x0.max();ymin=y0.min();ymax=y0.max()
#extend_grid=10 #so from particle min max positions extend grid 10*dx (to not have problems with convolution)
#xgrid=np.arange(xmin-dx*1e3*extend_grid,xmax+dx*1e3*(extend_grid+1),dx*1e3,dtype='float32')
#ygrid=np.arange(ymin-dy*1e3*extend_grid,ymax+dy*1e3*(extend_grid+1),dy*1e3,dtype='float32')
#xgrid,ygrid=np.meshgrid(xgrid,ygrid)
#define the transformations----------
#1)
#from epgs:28992(DWS) to epgs:4326(LatLon with WGS84 datum used by GPS and Google Earth)
proj = Transformer.from_crs('epsg:28992','epsg:4326',always_xy=True)
#2)
#from epgs:4326(LatLon with WGS84) to epgs:28992(DWS)
inproj = Transformer.from_crs('epsg:4326','epsg:28992',always_xy=True)
#inproj_old=Proj("EPSG:28992") #old method (has errors 10-20m when contrast with the rotated coords)
#lon,lat to 28992(DWS)-projection--------------------
#bathymetry--------
xct=dsto.lonc.values; yct=dsto.latc.values #lon,lat units
xctp,yctp,z = inproj.transform(xct,yct,xct*0.)
#[xctp,yctp] = inproj_old(xct,yct) #old method
xctp=(xctp)/1e3; yctp=(yctp)/1e3
#first projected point to correct the coordinates of model local meter units
xctp0=xctp[0,0]; yctp0=yctp[0,0]
#local meter model units to 28992(DWS)-projection and lon-lat--------------
#matrix rotation -17degrees-----
ang=-17*np.pi/180
angs=np.ones((2,2))
angs[0,0]=np.cos(ang); angs[0,1]=np.sin(ang)
angs[1,0]=-np.sin(ang); angs[1,1]=np.cos(ang)
#bathymetry----
#original topo points in meter
xct2,yct2=np.meshgrid(dsto.xc.values,dsto.yc.values)
xy=np.array([xct2.flatten(),yct2.flatten()]).T
#rotate
xyp=np.matmul(angs,xy.T).T/1e3
xyp0=xyp[0,:] #the first rotated point in the topo data in meter =0,0
#correction from rotation to projection:
#1)substact the first rotated topo point in meter, but give tha same as xyp0=[0,0]
#2)add the first projected point of the case (lon,lat model units to projection)
xyp=xyp-xyp0
xyp[:,0]=xyp[:,0]+xctp0; xyp[:,1]=xyp[:,1]+yctp0
xyp=np.reshape(xyp,(len(dsto.yc.values),len(dsto.xc.values),2))
xctp2=xyp[...,0]; yctp2=xyp[...,1] #km
#
#contrast projections (lon,lat model units to meter) with rotated case
#around 0 meter diff with new method
#10 meter difference in average and maximum of 20 with old method
a=xctp-xctp2; b=yctp-yctp2
print(np.abs(a).max()*1e3, np.abs(b).max()*1e3, np.abs(a).mean()*1e3, np.abs(b).mean()*1e3)
#particle grid of displacements (use for convlstm)------
xy=np.array([xcdis.flatten(),ycdis.flatten()]).T
ny,nx=xcdis.shape
#rotate
xyp=np.matmul(angs,xy.T).T/1e3
#correction from rotation to projection:
#1)substact the first rotated topo point in meter, but give tha same as xyp0=[0,0]
#2)add the first projected point of the case (lon,lat model units to meter)
xyp=xyp-xyp0
xyp[:,0]=xyp[:,0]+xctp0; xyp[:,1]=xyp[:,1]+yctp0
xyp=np.reshape(xyp,(ny,nx,2))
xcdisp=xyp[...,0]; ycdisp=xyp[...,1] #km
#
#get coordinates in lon-lat units (WGS84 )
xcdisp_lon, ycdisp_lat, _ = proj.transform(xcdisp*1e3,ycdisp*1e3, ycdisp*0.)
#for spatial interpolation using lon-lat-----
#build the input grid (lon-lat of original wind file)---
ds_in = xr.Dataset()
ds_in.coords["lon"] = dsw.lon.astype('float32')
ds_in["lon"].attrs['long_name'] = 'longitude'
ds_in.coords["lat"] = dsw.lat.astype('float32')
ds_in["lat"].attrs['long_name'] = 'latidude'
print(ds_in)
print()
#build the output grid (lon-lat of particle displacement)---
#this grid is used for the interpolation
ds_out = xr.Dataset()
ds_out.coords["lon"] = (("yc","xc"),xcdisp_lon.astype('float32'))
ds_out["lon"].attrs['long_name'] = 'longitude'
ds_out.coords["lat"] = (("yc","xc"),ycdisp_lat.astype('float32'))
ds_out["lat"].attrs['long_name'] = 'latidude'
#ds_out=ds_out.drop(["xc","yc"])
print(ds_out)
#regridder-----
#only need to run once
regridder = xe.Regridder(ds_in,ds_out,"patch") #special smooth iterpolator from this package
#regridder_bilinear = xe.Regridder(ds_in,ds_out,"bilinear")
#regridder_nearest = xe.Regridder(ds_in,ds_out,"nearest_s2d") #classical nearest
#for temporal interpolation-----
def interp1d_fun(x,tin,tout):
f = interp1d(tin,x,axis=-1,kind='linear')
return f(tout)
def xr_interp1d(x,tin,tout,idim,odim):
#x: xarray with chunks
#idim: input coordinate that will be changed by output odim
#odim: output coordinate
ds_interp1d = xr.apply_ufunc(
interp1d_fun,x,
input_core_dims=[[idim]],
output_core_dims=[[odim]],
output_dtypes=[np.float32],
dask_gufunc_kwargs={'output_sizes':{odim:len(tout)}},
kwargs={'tin':tin,'tout':tout}, #input to the above function
dask='parallelized',
#vectorize=True,
)
return ds_interp1d
#rotate wind from projection to model coordinates---
def projection_to_model_local_coords(x,y,ang=17*np.pi/180):
return np.cos(ang)*x + np.sin(ang)*y, -np.sin(ang)*x + np.cos(ang)*y
#-----
files_wind=sorted(glob.glob(f'{dir_wind}/**/*.nc4',recursive=True))
for file_wind in tqdm(files_wind):
year=int(str(file_wind)[-8:-4])
print(year)
#open wind data------
dsw=xr.open_dataset(file_wind,chunks={'time':-1,'lon':-1,'lat':-1})[["u10","v10"]];dsw.close() #winds
tw = dsw.time.values #contains data for the full 1st day of the next year
#del these long attributes
del dsw.attrs["history_of_appended_files"], dsw.attrs["history"]
#spatial interpolation-----
dsw_int = regridder(dsw)
#temporal interpolation-----
#first track of this year---
month_sim=1
file_track=f'tracks_{year}{month_sim:02d}_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d.nc'
file_track_path=f'{dir_tracks}{year}/{file_track}'
dst=xr.open_dataset(file_track_path)
t0=dst.time.isel(traj=0,obs=0).values
x0=dst.x.isel(traj=range(npa_per_dep),obs=0).values
y0=dst.y.isel(traj=range(npa_per_dep),obs=0).values
dst.close(); del dst
#
#first track of the following year---
if file_wind!=files_wind[-1]:
file_track=f'tracks_{year+1}{month_sim:02d}_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d.nc'
file_track_path=f'{dir_tracks}{year+1}/{file_track}'
t1=xr.open_dataset(file_track_path).time.isel(traj=0,obs=0).values
#last track of this year (for the final simulated month)---
else:
#for the final year we can not open the next year simulation
#we only have tracks until october, so we can get the wind for the last interval of displacement
last_year_tracks=sorted(glob.glob(f'{dir_tracks}{year}/*.nc',recursive=True))
end_month=len(last_year_tracks)
file_track=f'tracks_{year}{end_month:02d}_coords-xcyc_save-{savee}_deploy-{deploy}_Tsim-{minTsim}-{maxTsim}d.nc'
file_track_path=f'{dir_tracks}{year}/{file_track}'
t1=xr.open_dataset(file_track_path).time.isel(traj=-1,obs=0).values + np.timedelta64(m2,'s')
#
#times to get wind data for this year---
#however if we can not find a factor "nt_interp" of m2, use 10min
#we wont have the same amount of interp data every m2, but it is better to skip 10min of 1 sample than 1h(original data)
#nt_interp=283*2 #interpolate wind data every 9.43 min from 1h original data (factor of m2=44714)
#t_interp:
# - high reolution times to compute avg and std during the interval of net displacement
# - the last data could be close to the beginning of next year, or the same year for the final month (October) of the simulation
#t_dep:
# - times of displacement for the current year, referenced to the initial time of the m2 interval.
t_interp=np.arange(t0,t1+np.timedelta64(1,'s'),nt_interp,dtype='datetime64[s]')
t_dep=np.arange(t0,t1,m2,dtype='datetime64[s]') #only for this year
#1d interp----
#reference with respect to ref_time (so convert timedelta64 to float)
t_interp0=(t_interp-ref_time) / np.timedelta64(1,'s') #dates after interpolation (factor of m2)
tw0=(tw-ref_time) / np.timedelta64(1,'s') #dates of original winds (every 1h)
#
dsw_int=xr_interp1d(dsw_int.chunk({'time':-1,'xc':10,'yc':10}),tw0,t_interp0,idim='time',odim='time_int').transpose("time_int","yc","xc")
#add time, xc and yc coords
dsw_int.coords["time_int"]=t_interp
dsw_int.coords["xc"] = ("xc",xcdis0.values.astype('float32')) #model coords in m
dsw_int.coords["yc"] = ("yc",ycdis0.values.astype('float32'))
#reshape with xarray---
#
#check time dimensions
nt_interval=int(m2/nt_interp) #points in the m2 interval (right border of interval open)
nt_dep=(len(t_interp)-1)//nt_interval #=len(t_dep), final shape after mean or std in the m2 interval. "-1" because we also don't consider the right border of the last interval in the avg
#times after avg or std are referenced with the date of deployment (the begin of the m2 interval of the displacement)
print("check times:",nt_interval,nt_dep,len(t_dep),nt_interval*nt_dep,len(dsw_int.time_int)-1)
#
#https://stackoverflow.com/questions/59504320/how-do-i-subdivide-refine-a-dimension-in-an-xarray-dataset
#steps:
# - assign_coords: create coords time_dep and time_interval
# - stack: create a coord and index called multi_time which is related to the original temporal size of the data,
# that now match a 2d-MultiIndex(nt_dep,nt_interval) which is defined using the new time_dep and time_interval coords,
# and will order the above coords keeping constant time_dep in every time_interval(0:78); which is consistent with how dsw_t_interp was created.
# - reset_index().rename: del the the old time coord, and rename time index as multi_time to remove the old time index.
# - unstack(): use the above 2d-MultiIndex to reshape the data original 1d time data into time_dep, time_interval,
# however, the new dimensions are send by default to the last index,
# - transpose: to fix above issue for the dimensions of variables, however, can not fix the order that dims are shown after Dimensions:
#
dsw_int=dsw_int.isel(time_int=slice(0,-1)
).assign_coords(time_dep=t_dep,time_interval=range(nt_interval)
).stack(multi_time=("time_dep","time_interval")
).reset_index("time_int",drop=True).rename(time_int="multi_time"
).unstack(dim="multi_time").transpose("time_dep","time_interval","yc","xc")
dsw_int #still time in the last on the title of dimensions
#
#instead of above we could also try resample of xarray---
#and then perform avg, std, but not working well
#res=int(nt_interp+m2)
#dsout_m2_avg=dsout.resample(time=f'{res}s',closed="right")#.mean(dim='time');
#print(t_dep[:5])
#for i in dsout_m2_avg: print(i)
#rotate wind from projection to model coordinates---
dsw_int["u10"],dsw_int["v10"]=projection_to_model_local_coords(dsw_int.u10,dsw_int.v10)
#compute wind speed, direction,... (mean and std) based on Farrugia and Micallef (2017)---
wd = np.arctan2(dsw_int.v10,dsw_int.u10) #wd for the interp times
ws = (dsw_int.u10**2 + dsw_int.v10**2)**.5 #ws for the interp times
u10_vec = dsw_int.u10.mean(dim='time_interval')
v10_vec = dsw_int.v10.mean(dim='time_interval')
#
dsw_int["wd_mean"] = np.arctan2(v10_vec,u10_vec)
dsw_int["ws_mean"] = ws.mean(dim='time_interval')
dsw_int["ws_mean_vec"] = (u10_vec**2 + v10_vec**2)**.5
dsw_int["wd_std"] = ( (ws*(2*np.arctan(np.tan(0.5*(wd-dsw_int["wd_mean"]))))**2).mean(dim='time_interval') / dsw_int["ws_mean"] )**.5
#use abs because there is 1 case with very small negative value -1e-7
dsw_int["ws_std"] = ( abs(((ws*np.cos(wd-dsw_int["wd_mean"]))**2).mean(dim='time_interval') - dsw_int["ws_mean_vec"]**2) )**.5
#
#del u10 and v10
del dsw_int["u10"], dsw_int["v10"], dsw_int["time_interval"]
#call computations---
dsw_int=dsw_int.compute()
#save data---
dsw_int=dsw_int.rename(time_dep="time") #rename dim time_dep
#global coords and attrs---
dsw_int["time"].attrs['description'] = 'initial date of the M2 interval of the net particle displacement'
dsw_int["yc"].attrs['long_name'] = 'yc'
dsw_int["yc"].attrs['description'] = 'the same as the net particle displacement grid y-axis'
dsw_int["yc"].attrs['units'] = 'm'
dsw_int["xc"].attrs['long_name'] = 'xc'
dsw_int["xc"].attrs['description'] = 'the same as the net particle displacement grid x-axis'
dsw_int["xc"].attrs['units'] = 'm'
#
dsw_int.attrs["spatial_info"] = "1) xESMF (method: patch) was used to interpolate wind components to the net displacement particle-grid (using lon-lat coords). 2) Then the wind was projected (rotated) to the local model axes."
dsw_int.attrs["temporal_info"] = f"Wind components were linearly interpolated to {nt_interp}s (factor of M2={m2}s), and then the avg and std in the M2 interval of the net displacement were computed."
dsw_int.attrs["std of wind speed and direction"] = "Based on Farrugia and Micallef (2017)."
#
#variables---
#
dsw_int["wd_mean"].attrs['long_name'] = 'M2-mean wind direction'
dsw_int["wd_mean"].attrs['units'] = 'radian'
dsw_int["wd_mean"].attrs['description'] = 'Farrugia and Micallef (2017): eq(7)'
#
dsw_int["ws_mean"].attrs['long_name'] = 'M2-mean wind speed'
dsw_int["ws_mean"].attrs['units'] = 'm/s'
dsw_int["ws_mean"].attrs['description'] = 'eq(9)'
#
dsw_int["ws_mean_vec"].attrs['long_name'] = 'M2-mean wind speed with vectorial method'
dsw_int["ws_mean_vec"].attrs['units'] = 'm/s'
dsw_int["ws_mean_vec"].attrs['description'] = 'eq(8)'
#
dsw_int["wd_std"].attrs['long_name'] = 'M2-std of wind direction'
dsw_int["wd_std"].attrs['units'] = 'radian'
dsw_int["wd_std"].attrs['description'] = 'eq(18): square root of along wind variance'
#
dsw_int["ws_std"].attrs['long_name'] = 'M2-std of wind speed'
dsw_int["ws_std"].attrs['units'] = 'm/s'
dsw_int["ws_std"].attrs['description'] = 'eq(25)'
#
file_out_nc=f"{year}_{file_interp_wind_root}"
dir_out_nc=dir_post_proc_data+dir_interp_wind
dsw_int.to_netcdf(dir_out_nc+file_out_nc)
dsw_int.close(); del dsw_int; del dsw
client.close() |
py | 1a4c24230db8d6b8974c12f6ea636cf557edb538 | import matplotlib.pyplot as plt
import h5py
import glob
from natsort import natsorted
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input-path',type=str,help='input for .h5')
parser.add_argument('--output-path',type=str,help='output for png')
args = parser.parse_args()
image_list = []
im_cntr1 = 0
im_cntr2_list = []
input_path = args.input_path
output_path = args.output_path
#input_path = "./path2svs/"
#output_path = "/path2png/"
output_path = os.path.join(output_path,"png_patches/testA/")
print(output_path)
os.makedirs(output_path)
h5_counter = 0
exception_list = []
for filem in natsorted(glob.glob(input_path+"*.h5")):
print("h5 count",h5_counter)
h5_counter+=1
print(filem)
try:
png_cntr = 0
hdf = h5py.File(filem)
for i in list(hdf['imgs']):
plt.imsave(output_path+filem.split("/")[-1]+"_"+str(png_cntr) +".png",i)
png_cntr+=1
print(png_cntr)
except:
exception_list.append(filem.split("/")[-1])
print("Exception occured!!!")
pass
#im_counter = 0
#for image in sorted(glob.glob(filename_list+"/*")):
#print(image.split("/")[-1])
#if domain_type in image:
#imagename = "/a"+str(im_counter)
#shutil.copy(image,output_folder_name+"/"+image.split("/")[-1])
#im_counter += 1
|
py | 1a4c255c45355fea7bcb66c07eae0ea260d5164e | # -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from collections.abc import MutableMapping
from typing import Any, Dict, List, Optional
from frozendict import frozendict
from nemo.utils import logging
from nemo.utils.neural_graph.connection import StepModulePort
class GraphOutput(object):
""" A helper class represenging a single bound output. """
def __init__(self, ntype: "NeuralType", producer_step_module_port: StepModulePort):
"""
Initializes object.
Args:
ntype: a NeuralType object.
producer_step_module_port: a producer StepModulePort tuple (step number (module name), port name).
"""
self._ntype = ntype
self._producer_step_module_port = producer_step_module_port
@property
def ntype(self) -> "NeuralType":
"""
Returns:
NeuralType of a given output.
"""
return self._ntype
@property
def producer_step_module_port(self) -> StepModulePort:
""" Returns producer step port (step number (module), port name) tuple. """
return self._producer_step_module_port
class GraphOutputs(MutableMapping):
'''
A specialized dictionary that contains bound outputs of a Neural Graph.
In fact stores two lists of "outputs":
- "default" outputs with default keys taken from outputs of modules, and
- "manual" used for specifying the subset of outputs.
When accessing the outputs, it returns the one of those two lists following the rule:
return "manual" outputs if they were define (at least one manual output defined by the user),
otherwise return the "default" outputs.
'''
def __init__(self, tensors_ref):
"""
Initializes two (empty) dictionaries.
Args:
tensors_ref - reference to neural graph's tensor (dict of dict).
"""
# Tensors[step][output_port_name] passed from the external neural graph object.
self._tensors_ref = tensors_ref
# This dictionary stores the output tensors collected during the "default" tensor recording.
# As they are using the default port names, the second/next tensor published on the same port
# will generate a new unique name following the (step_number.module.port_name) pattern.
self._default_outputs = {}
# This dictionary stores list of output tensors of module "manually" indicated by the user.
# In this case tring to overwriting the existing ports with new tensors will be forbidden (Exception).
self._manual_outputs = {}
def __setitem__(self, key: str, value: "NmTensor"):
"""
This method is used to set the manual output - creates a GraphOutput item and adds it to the list.
Args:
key: The name of the output (port).
value: NmTensor that will be used to create a given GraphOutput.
"""
# Make sure that user passed a NmTensor.
if type(value).__name__ != "NmTensor":
raise TypeError("Port `{}` definition must be must be set using a NmTensor".format(key))
if key in self._manual_outputs.keys():
raise KeyError("Overwriting of a port `{}` that was previously manually bound is not allowed".format(key))
# Ok, set thee "manual" output.
self._manual_outputs[key] = GraphOutput(value.ntype, value.producer_step_module_port)
def __getitem__(self, key: str) -> GraphOutput:
"""
Returns the bound output associated with the given key.
Uses default or manual dict depending whether there are some manual outputs or not.
Args:
key: Name of the bound input.
"""
if len(self._manual_outputs) > 0:
return self._manual_outputs[key]
else: # Use default dict.
return self._default_outputs[key]
def __delitem__(self, key: str):
"""
Raises:
TypeError as deletion of a bound input port is not allowed.
"""
raise TypeError("Deleting a bound output is not allowed")
def __iter__(self):
"""
Returns:
Iterator over the outputs - depending whether there are some manual outputs or not.
"""
if len(self._manual_outputs) > 0:
return iter(self._manual_outputs)
else: # Use default dict.
return iter(self._default_outputs)
def __len__(self) -> int:
"""
Returns:
The number of outputs - depending whether there are some manual outputs or not.
"""
if len(self._manual_outputs) > 0:
return len(self._manual_outputs)
else: # Use default dict.
return len(self._default_outputs)
def bind(self, tensors_ref: List["NmTensor"], port_names: Optional[str] = None):
"""
Binds the "default" outputs.
Args:
tensors_ref: List of tensors to be added.
port_names: List of port names (visible outside). If None: using internal tensor "output port names".
"""
# Set names.
if port_names is None:
port_names = [tensor.name for tensor in tensors_ref]
for name, tensor in zip(port_names, tensors_ref):
# Check the presence of the port name in "default" dictionary.
if name in self._default_outputs.keys():
# Name present - use the name being combination of producer and port names.
name = (
str(tensor.producer_step_number) + "_" + tensor.producer_name + "_" + tensor.name
) # last = port name
logging.debug(
"Setting unigue name of the default output port `{}` produced in step {} by `{}` to `{}`".format(
tensor.name, tensor.producer_step_number, tensor.producer_name, name
)
)
# Store the output.
self._default_outputs[name] = GraphOutput(tensor.ntype, tensor.producer_step_module_port)
@property
def definitions(self) -> Dict[str, GraphOutput]:
"""
Property returns definitions of the output ports by extracting them on the fly from the bound outputs.
..info:
This property actually returns a FrozenDict containing port definitions to indicate that
port definitions SHOULD not be used during the actual binding.
Returns:
Dictionary of neural types associated with bound outputs.
"""
# Get the right output dictionary.
d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs
# Extract port definitions (Neural Types) and return an immutable dictionary,
# so the user won't be able to modify its content by an accident!
return frozendict({k: v.ntype for k, v in d.items()})
@property
def tensors(self) -> Dict[str, "NmTensor"]:
"""
Property returns output tensors by extracting them on the fly from the bound outputs.
Returns:
Dictionary of tensors in the format (output-name: tensor).
"""
# Get the right output dictionary.
d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs
output_tensors = {}
# Get tensors by acessing the producer-ports.
# At that point all keys (k) are unigue - we made sure of that during binding/__setitem__.
for k, v in d.items():
producer_step = v.producer_step_module_port.step_number
producer_port_name = v.producer_step_module_port.port_name
# Find the right output tensor.
tensor = self._tensors_ref[producer_step][producer_port_name]
# Add it to the dictionary.
output_tensors[k] = tensor
# Return the result as an immutable dictionary,
# so the user won't be able to modify its content by an accident!
return frozendict(output_tensors)
@property
def tensor_list(self) -> List["NmTensor"]:
"""
Property returns output tensors by extracting them on the fly from the bound outputs.
Returns:
List of tensors.
"""
# Get the right output dictionary.
d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs
output_tensor_list = []
# Get tensors by acessing the producer-ports.
for k, v in d.items():
producer_step = v.producer_step_module_port.step_number
producer_port_name = v.producer_step_module_port.port_name
# Find the right output tensor.
tensor = self._tensors_ref[producer_step][producer_port_name]
# Add it to the list.
output_tensor_list.append(tensor)
# Return the result.
return output_tensor_list
def serialize(self) -> Dict[str, Any]:
""" Method responsible for serialization of the graph outputs.
Returns:
List containing mappings (step.module.output_port -> output | ntype).
"""
serialized_outputs = {"mappings": []}
# Get the right output dictionary.
if len(self._manual_outputs) > 0:
serialized_outputs["type"] = "manual"
d = self._manual_outputs
else:
serialized_outputs["type"] = "default"
d = self._default_outputs
# Iterate through "bindings" (GraphOutputs).
for key, binding in d.items():
# Serialize: step.module.port -> output | ntype.
smp = binding.producer_step_module_port
source = str(smp.step_number) + "." + smp.module_name + "." + smp.port_name
# Get type.
ntype_str = str(binding.ntype)
# Serialize!
serialized_outputs["mappings"].append(source + "->" + key + " | " + ntype_str)
# Return the result.
return serialized_outputs
def deserialize(self, serialized_outputs: Dict[str, Any], modules: Dict[str, 'NeuralModule']):
"""
Method responsible for deserialization of graph outputs.
Args:
serialized_outputs: A list of serialized outputs in the form of ("step.module.output_port->key | ntype")
modules: List of modules required for neural type copying/checking.
"""
# Check type.
if serialized_outputs["type"] == "default":
# We still need to deserialize.
# Use-case: deserialization of a graph with nested graph with bound output.
d = self._default_outputs
else:
d = self._manual_outputs
# Iterate through serialized inputs one by one.
for i in serialized_outputs["mappings"]:
# Deserialize!
[producer, key_ntype] = i.split("->")
[key, ntype_str] = key_ntype.split(" | ")
[step_number, producer_name, producer_port_name] = producer.split(".")
# Get neural type from module output port definition.
ntype = modules[producer_name].output_ports[producer_port_name]
# Make sure the graph bound port type matches the deserialized type.
assert ntype_str == str(ntype)
# Create a new input.
go = GraphOutput(ntype, StepModulePort(int(step_number), producer_name, producer_port_name))
d[key] = go
# Done.
|
py | 1a4c26379ba797b0b90e07fc006a57085d44daf7 | #!/usr/bin/env python
DESC = """
____ _ _ __ __ __ ____ _____
| __ ) ___ __ _ _ _| |_(_)/ _|_ _| \/ | _ \___ /
| _ \ / _ \/ _` | | | | __| | |_| | | | |\/| | |_) ||_ \
| |_) | __/ (_| | |_| | |_| | _| |_| | | | | __/___) |
|____/ \___|\__,_|\__,_|\__|_|_| \__, |_| |_|_| |____/
|___/
______________________________________________________________
| |
| Edit Metadata of MP3 files based on file name |
|____________________________________________________________|
"""
import sys
import shutil
import os
from os import chdir, listdir, rename, walk, path, environ
from os.path import basename, dirname, realpath
import spotipy
import argparse
import configparser
import spotipy.oauth2 as oauth2
import re
from titlecase import titlecase
import requests
from bs4 import BeautifulSoup
import eyed3
import argparse
def setup_config():
'''
read api keys from config.ini file
'''
global CONFIG, GENIUS_KEY, SP_SECRET, SP_ID, config_path
CONFIG = configparser.ConfigParser()
config_path = realpath(__file__).replace(basename(__file__), '')
config_path = config_path + 'config.ini'
CONFIG.read(config_path)
GENIUS_KEY = CONFIG['keys']['genius_key']
SP_SECRET = CONFIG['keys']['spotify_client_secret']
SP_ID = CONFIG['keys']['spotify_client_id']
if GENIUS_KEY == '<insert genius key here>':
print('Warning, you are missing Genius key. Add it using --config\n\n')
if SP_SECRET == '<insert spotify client secret here>':
print('Warning, you are missing Spotify Client Secret. Add it using --config\n\n')
if SP_ID == '<insert spotify client id here>':
print('Warning, you are missing Spotify Client ID. Add it using --config\n\n')
def add_config_keys():
'''
Adds configuration keys in the config.ini file
'''
GENIUS_KEY = CONFIG['keys']['genius_key']
SP_SECRET = CONFIG['keys']['spotify_client_secret']
SP_ID = CONFIG['keys']['spotify_client_id']
if GENIUS_KEY == '<insert genius key here>':
genius_key = input('Enter Genius Client Access token : ')
CONFIG['keys']['genius_key'] = str(genius_key)
if SP_SECRET == '<insert spotify client secret here>':
sp_secret = input('Enter Spotify Secret token : ')
CONFIG['keys']['spotify_client_secret'] = str(sp_secret)
if SP_ID == '<insert spotify client id here>':
sp_id = input('Enter Spotify Client ID : ')
CONFIG['keys']['spotify_client_id'] = str(sp_id)
with open(config_path, 'w') as configfile:
CONFIG.write(configfile)
def improve_song_name(song):
'''
removes all unwanted words and numbers from file name so that the spotify search results can be improved
removes all numbers from beginning, then strip all punctuation marks from the string, then remove words in word_filters, then remove unwanted space
'''
audiofile = eyed3.load(song)
tag = audiofile.tag
artist = tag.artist.split(";", 1)[0]
song = artist + ' - ' + tag.title
char_filters = "()[]{}-:_/=!+\"\'"
word_filters = ('lyrics', 'lyric', 'by', 'video', 'official', 'hd', 'dirty', 'with', 'lyrics', 'original', 'mix',
'www', 'com', '.', 'mp3', 'audio', 'full', 'feat', 'version', 'music', 'hq', 'uploaded', 'explicit')
reg_exp = 's/^\d\d //'
song = song.strip()
song = song.lstrip("0123456789.- ")
# re.sub(reg_exp, '', song)
song = song[0:-4]
song = ''.join(
map(lambda c: " " if c in char_filters else c, song))
song = re.sub('|'.join(re.escape(key) for key in word_filters),
"", song, flags=re.IGNORECASE)
song = ' '.join(song.split()).strip()
print(song)
return song
def get_song_name(title, artist):
'''
return search query for spotify api call
'''
return title + ' - ' + artist
def get_metadata_spotify(spotify, song_name):
'''
call spotify.com api to get the metadata required, as much as possible
'''
print("trying to find data on Spotify...")
metadata = {}
try:
meta_tags = spotify.search(song_name, limit=1)['tracks']['items'][0]
except IndexError:
print("Could not find the song on Spotify")
return []
metadata['title'] = meta_tags['name']
metadata['artist'] = meta_tags['artists'][0]['name']
metadata['album'] = meta_tags['album']['name']
metadata['album_artist'] = meta_tags['album']['artists'][0]['name']
album_id = meta_tags['album']['id']
album_meta_tags = spotify.album(album_id)
metadata['release_date'] = album_meta_tags['release_date']
print(album_meta_tags['genres'])
try:
metadata['genre'] = titlecase(album_meta_tags['genres'][0])
#genre = "; ".join((album_meta_tags['genres']))
#metadata['genre'] = titlecase(genre)
except IndexError:
try:
artist_id = meta_tags['artists'][0]['id']
artist_meta_tags = spotify.artist(artist_id)
#metadata['genre'] = titlecase(artist_meta_tags['genres'][0])
genre = "; ".join((artist_meta_tags['genres']))
metadata['genre'] = titlecase(genre)
except IndexError:
print("song genre could not be found.")
pass
metadata['track_num'] = meta_tags['track_number']
metadata['disc_num'] = meta_tags['disc_number']
print()
return metadata
def list_files():
'''
list all files in current directory with extension .mp3
'''
files = []
return [f for f in listdir('.') if f.endswith('.mp3')]
def set_metadata(file_name, metadata):
'''
call eyed3 module to set mp3 song metadata as received from spotify
'''
print("setting metadata for " + file_name)
print()
audiofile = eyed3.load(file_name)
tag = audiofile.tag
if 'genre' in metadata:
#tag.genre = metadata['genre']
#tag.comments.set = metadata['genre']
tag.comments.set(metadata['genre'])
tag.save(version=(2, 3, 0))
#tag.save()
# if not norename:
# song_title = rename_format.format(
# title=metadata['title'] + ' -',
# artist=metadata['artist'] + ' -',
# album=metadata['album'] + ' -')
# song_title = song_title[:-1] if song_title.endswith('-') else song_title
# song_title = ' '.join(song_title.split()).strip()
# print("renaming " + file_name + "to " + song_title)
# new_path = path.dirname(file_name) + '{}.mp3'.format(song_title)
# rename(file_name, new_path)
print()
return
def fix_music_file(spotify, file_name, norename, rename_format):
print("------------------------------------------------------------------------")
print()
print()
print("Currently processing " + file_name)
metadata = get_metadata_spotify(spotify, improve_song_name(file_name))
if not metadata:
is_improvemet_needed = True
return is_improvemet_needed
else:
set_metadata(file_name, metadata)
is_improvemet_needed = False
rename_file = rename_to_format(
file_name, norename, rename_format, metadata)
shutil.move(rename_file, 'Organized')
return is_improvemet_needed
def rename_to_format(file_name, norename, rename_format, metadata):
# if not norename:
# song_title = rename_format.format(
# title=metadata['title'] + ' -',
# artist=metadata['artist'] + ' -',
# album=metadata['album'] + ' -')
# song_title = song_title[:-1] if song_title.endswith('-') else song_title
# song_title = ' '.join(song_title.split()).strip()
song_title = file_name
print("renaming " + file_name + "to " + song_title)
new_path = path.dirname(file_name) + '{}.mp3'.format(song_title)
rename(file_name, new_path)
return new_path
def fix_music_files(spotify, files, norename, rename_format):
need_to_improve = []
for file_name in files:
response = fix_music_file(spotify, file_name, norename, rename_format)
if response is True:
need_to_improve.append(file_name)
("------------------------------------------------------------------------")
print()
print()
return need_to_improve
def main():
'''
Deals with arguements and calls other functions
'''
setup_config()
parser = argparse.ArgumentParser(
description="{}".format(DESC), formatter_class=argparse.RawDescriptionHelpFormatter
)
# group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-d', '--dir', action="store", dest='repair_directory',
help='give path of music files\' directory', default=os.getcwd())
parser.add_argument('-s', '--song', action='store', dest='song_name',
help='Only fix metadata of the file specified', default=None)
parser.add_argument('-c', '--config', action='store_true', dest='config',
help="Add API Keys to config\n\n")
parser.add_argument('-n', '--norename', action='store_true',
help='Does not rename files to song title\n\n')
parser.add_argument('-f', '--format', action='store', dest='rename_format', help='''Specify the Name format used in renaming,
Valid Keywords are:
{title}{artist}{album}\n\n)''')
args = parser.parse_args()
repair_directory = args.repair_directory or '.'
song_name = args.song_name or None
norename = args.norename or False
rename_format = args.rename_format or '{title}'
config = args.config
if config:
add_config_keys()
auth = oauth2.SpotifyClientCredentials(
client_id="622a0e16a4914e3eadc2a37b4a134f1e", client_secret="6fe008a8b7754954a58a9849fa3172df")
token = auth.get_access_token()
spotify = spotipy.Spotify(auth=token)
files = []
if song_name is not None:
need_to_improve = fix_music_file(
spotify, song_name, norename, rename_format)
if need_to_improve is True:
print(song_name)
elif repair_directory:
chdir(repair_directory or '.')
if not os.path.exists("Organized"):
os.makedirs("Organized")
files = list_files()
need_to_improve = fix_music_files(
spotify, files, norename, rename_format)
print(need_to_improve)
if __name__ == "__main__":
main()
|
py | 1a4c26f8d530003c066a4961eceda8fde5b9e730 | #!/usr/bin/python
import urllib, inflect, string, json, sys, Algorithmia
# tests
# python n7.py '{"h2t":"http://slashdot.org", "auth":"API_KEY"}'
# python n7.py '{"url":"http://derstandard.at"}'
# python n7.py '{"text":"life is a miracle"}'
# initialize
p = inflect.engine()
text = ""
offset = 7
start_line = -1
end_line = -1
new_text = []
new_line = []
table = string.maketrans("", "")
dict_url = "https://raw.githubusercontent.com/fabianekc/n7/master/nounlist.txt"
# parse input; sample URL: 'http://www.gutenberg.org/cache/epub/97/pg97.txt'
input = json.loads(str(sys.argv[1]))
if 'url' in input:
text = urllib.urlopen(input['url']).read()
elif 'h2t' in input:
if 'auth' in input:
client = Algorithmia.client(input['auth'])
text = client.algo('util/Html2Text/0.1.3').pipe(input['h2t'])
else:
print("Error: provide authentication when using the html2text preprocessing from Algorithmia")
sys.exit()
elif 'text' in input:
text = input['text']
else:
text = urllib.urlopen(input).read()
if 'offset' in input:
offset = input['offset']
if 'dict' in input:
dict_url = input['dict']
if 'start' in input:
start_line = input['start']
if 'end' in input:
end_line = input['end']
if text == "":
print("Error: no input text provided")
sys.exit()
if isinstance(text, str):
text = text.decode('utf-8')
text = text.encode('ascii', 'replace')
text_split = text.split('\n')
if end_line > -1:
text_split = text_split[0:end_line]
if start_line > -1:
text_split = text_split[start_line:]
dict = urllib.urlopen(dict_url).read().split()
ld = len(dict)
# iterate over text
for line in text_split:
for word in line.split():
# when replacing words we need to take care for
# - punc: punctuation
# - sipl: singular / plural
# - new vs final: uppercase / capitalize / lowercase
punc = word.translate(table, string.punctuation)
sipl = p.singular_noun(punc)
if sipl:
new = sipl
else:
new = punc
if (new.lower() in dict):
if punc == word:
if sipl:
final = p.plural(dict[(dict.index(new.lower())+offset)%ld])
else:
final = dict[dict.index(new.lower())+offset]
else:
if sipl:
final = word.replace(punc, p.plural(dict[(dict.index(new.lower())+offset)%ld]))
else:
final = word.replace(punc, dict[(dict.index(new.lower())+offset)%ld])
if new.lower() != new:
if new.upper() == new:
final = final.upper()
else:
final = final.capitalize()
else:
final = word
new_line.append(final)
new_text.append(" ".join(new_line))
new_line = []
print "\n".join(new_text)
|
py | 1a4c27fb09cb0987b7fdb9b9752a6372e2ae7630 | #!python
import string
# Hint: Use these string constants to ignore capitalization and/or punctuation
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
chars = list(string.ascii_lowercase)
def is_palindrome(text):
"""A string of characters is a palindrome if it reads the same forwards and
backwards, ignoring punctuation, whitespace, and letter casing."""
# implement is_palindrome_iterative and is_palindrome_recursive below, then
# change this to call your implementation to verify it passes all tests
assert isinstance(text, str), 'input is not a string: {}'.format(text)
return is_palindrome_recursive(text)
# return is_palindrome_recursive(text)
def is_palindrome_iterative(text):
# TODO: implement the is_palindrome function iteratively here
if len(text) < 1:
return True
pal = text.lower()
left = 0
right = len(text) - 1
is_pal = True
while is_pal: # worst case O(n) when the string is a palindrome or if the string has no aplha chars
while pal[left] not in chars: # O(n) in case where the string has no alpha chars
left += 1
if left >= right:
return True
while pal[right] not in chars: # O(n) in case where the string has no alpha chars
right -= 1
if right <= left:
return True
if left >= right:
return True
if pal[left] != pal[right]:
return False
left += 1
right -= 1
# once implemented, change is_palindrome to call is_palindrome_iterative
# to verify that your iterative implementation passes all tests
def is_palindrome_recursive(text, left=None, right=None):# worst case O(n) when the string is a palindrome or if the string has no aplha chars
# TODO: implement the is_palindrome function recursively here
if len(text) < 1:
return True
if left is None:
left = 0
right = len(text) - 1
if left >= right:
return True
pal = text.lower()
if pal[left] not in chars:# O(n) in case where the string has no alpha chars
return is_palindrome_recursive(text, left+1, right)
while pal[right] not in chars:# O(n) in case where the string has no alpha chars
return is_palindrome_recursive(text, left, right-1)
if pal[left] != pal[right]:
return False
return is_palindrome_recursive(text, left+1, right-1)
# once implemented, change is_palindrome to call is_palindrome_recursive
# to verify that your iterative implementation passes all tests
def main():
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) > 0:
for arg in args:
is_pal = is_palindrome(arg)
result = 'PASS' if is_pal else 'FAIL'
is_str = 'is' if is_pal else 'is not'
print('{}: {} {} a palindrome'.format(result, repr(arg), is_str))
else:
print('Usage: {} string1 string2 ... stringN'.format(sys.argv[0]))
print(' checks if each argument given is a palindrome')
if __name__ == '__main__':
main()
# print(is_palindrome_recursive("it was a cat I saw?"))
|
py | 1a4c288fcf89efa19779af6a23c792de08629f23 | # Copyright 2010-2012 the SGC project developers.
# See the LICENSE file at the top-level directory of this distribution
# and at http://program.sambull.org/sgc/license.html.
import warnings
import pygame
from pygame.locals import *
from .._locals import *
class SelectableText:
_text = ""
_text_offset = _text_pos = 0
__blink = True
_blink_time = 0
_chars = ((0,0),)
_repeat_key = None
_repeat_time = 0
_select = None # Starting point of selection
__cursor_pos = 0
@property
def _blink(self):
"""Always return False when a selection is made."""
return self.__blink and not bool(self._select)
@_blink.setter
def _blink(self, value):
self.__blink = value
def _select_fix(self):
"""
Returns the selection area corrected, so if the selection is
right-to-left it returns the positions reversed.
"""
if self._select > self._cursor_pos:
return (self._cursor_pos, self._select)
else:
return (self._select, self._cursor_pos)
def _calc_chars(self):
"""
Calculates the position and size of each character.
Stores the results in self._chars as a tuple of (pos, width) tuples.
"""
p = self._settings["font"].size(self._text[0])[0]
chars = [(0,p)]
for c in range(len(self._text)):
w = self._settings["font"].size(self._text[:c+2])[0]
xmax, advance = self._settings["font"].metrics(
self._text[c])[0][1::3]
if xmax > advance: # Adjust for overhang
chars.append((p - (xmax - advance), w - p))
else:
chars.append((p, w - p))
p = w
self._chars = tuple(chars)
def _mouse_cursor(self, mouse_pos):
"""Return the text cursor position of the mouse."""
pos = mouse_pos[0] - self.rect_abs.x - self._text_pos
for index, (p,w) in enumerate(self._chars):
if pos <= p + w/2:
break
return index
def _update_select_text(self, time):
"""
Update text stuff for selectable text.
Should be called from widget's update() method.
"""
# Repeat key if held down
if self._repeat_key:
self._repeat_time += time
while self._repeat_time > self._settings["repeat_begin"]:
self._repeat_time -= self._settings["repeat_interval"]
self._event(self._repeat_key)
def _event_select_text(self, event):
"""
Handles events for selectable text.
Call from widget's _event() method.
"""
if event.type == MOUSEBUTTONDOWN and event.button == 1:
# Begin drawing selection
if pygame.key.get_mods() & KMOD_SHIFT and self._select is None:
self._select = self._cursor_pos
self._cursor_pos = self._mouse_cursor(event.pos)
if not pygame.key.get_mods() & KMOD_SHIFT:
self._select = self._cursor_pos
elif event.type == MOUSEMOTION and event.buttons[0]:
# Continue drawing selection while mouse held down
self._cursor_pos = self._mouse_cursor(event.pos)
elif event.type == MOUSEBUTTONUP:
# Set cursor position with mouse click
self._cursor_pos = self._mouse_cursor(event.pos)
if self._select == self._cursor_pos:
self._select = None
elif event.type == KEYDOWN:
# Save last key press for repeat
if self._repeat_key != event:
self._repeat_key = event
self._repeat_time = 0
if event.key == K_ESCAPE:
self._select = None
elif event.key == K_LEFT:
if not event.mod & KMOD_SHIFT:
self._select = None # Break selection
elif self._select is None:
# Reset selection if not selecting
self._select = self._cursor_pos
self._cursor_pos -= 1
# Remove selection when cursor is at same position
if self._select == self._cursor_pos:
self._select = None
elif event.key == K_RIGHT:
if not event.mod & KMOD_SHIFT:
self._select = None # Break selection
elif self._select is None:
self._select = self._cursor_pos
self._cursor_pos += 1
if self._select == self._cursor_pos:
self._select = None
elif event.key == K_HOME:
if not event.mod & KMOD_SHIFT:
self._select = None
elif self._select is None:
self._select = self._cursor_pos
self._cursor_pos = 0
if self._select == self._cursor_pos:
self._select = None
elif event.key == K_END:
if not event.mod & KMOD_SHIFT:
self._select = None
elif self._select is None:
self._select = self._cursor_pos
self._cursor_pos = len(self._text)
if self._select == self._cursor_pos:
self._select = None
'''elif event.mod & KMOD_CTRL:
if event.key == K_a: # Select all
self._select = 0
self._cursor_pos = len(self._text)
elif event.key == K_c and self._select is not None: # Copy
select = self._select_fix()
string = "".join(self._text[select[0]:select[1]])
try:
pygame.scrap.put(SCRAP_TEXT, string)
except pygame.error:
warnings.warn("Please run 'pygame.scrap.init()'"
" to use the clipboard.", RuntimeWarning)'''
elif event.type == KEYUP:
if self._repeat_key and self._repeat_key.key == event.key:
self._repeat_key = None # Stop repeat
def _update_modify_text(self, time):
"""
Update text stuff for editable text (e.g. input box).
Should be called from widget's update() method.
"""
# If enough time has passed, blink cursor
self._blink_time += time
if self._blink_time > self._settings["blink_interval"]:
self._blink_time -= self._settings["blink_interval"]
self._blink = not self._blink
def _event_modify_text(self, event):
"""
Handles events for editable text (e.g. input box).
Should be called from widget's _event() method.
Will typically be used alongside `_event_select_text()`.
"""
if event.type == KEYDOWN:
# Reset cursor blink when typing
self._blink_time = 0
self._blink = True
if event.key in (9,K_RETURN,K_ESCAPE,K_KP_ENTER): # Keys to ignore
return
if event.key == K_BACKSPACE:
if self._select is not None:
self._delete_selection()
elif self._cursor_pos > 0:
self._cursor_pos -= 1
self._text.pop(self._cursor_pos)
self._calc_chars()
elif event.key == K_DELETE:
if self._select is not None:
self._delete_selection()
elif self._cursor_pos < len(self._text):
self._text.pop(self._cursor_pos)
self._calc_chars()
elif event.unicode:
if event.mod & KMOD_CTRL:
if event.key == K_v: # Paste
text = None#pygame.scrap.get(SCRAP_TEXT)
'''if text:
if self._select is not None:
sel = self._select_fix()
self._select = None
else:
sel = (self._cursor_pos, self._cursor_pos)
# Get list of text to insert into input_text
text = [unicode(char) for char in text]
self._text[sel[0]:sel[1]] = text
self._calc_chars()
self._cursor_pos = sel[0] + len(text)'''
elif event.key == K_x and self._select is not None: # Cut
select = None#self._select_fix()
'''string = "".join(self._text[select[0]:select[1]])
try:
pygame.scrap.put(SCRAP_TEXT, string)
except pygame.error:
warnings.warn("Please run 'pygame.scrap.init()'"
" to use the clipboard",
RuntimeWarning)
self._delete_selection()'''
else:
# Delete selection
if self._select is not None:
self._delete_selection()
# Insert new character
if len(self._text) < self._settings["max_chars"]:
self._text.insert(self._cursor_pos, event.unicode)
self._calc_chars()
self._cursor_pos += 1
def _delete_selection(self):
"""Delete the current selection of text."""
select = self._select_fix()
del self._text[select[0]:select[1]]
self._select = None
self._cursor_pos = select[0]
self._calc_chars()
def _draw_selection(self, image, y, height):
"""Draw selection onto image. Does nothing if no selection."""
if self._select is None:
return
select = self._select_fix()
# Semi-transparent selection rectangle
w = self._chars[select[1]][0] - self._chars[select[0]][0]
x = self._chars[select[0]][0] + self._text_pos - 1
r = Rect((x,y), (w+2,height))
selection = pygame.surface.Surface(r.size, flags=SRCALPHA)
selection.fill(self._settings["col_selection"] + (100,))
image.blit(selection, r)
# Border around selection rectangle
pygame.draw.rect(image, self._settings["col_selection"], r, 1)
@property
def _cursor_pos(self):
"""
The cursor position in characters. Will ensure cursor is always in
valid location when set.
"""
return self.__cursor_pos
@_cursor_pos.setter
def _cursor_pos(self, value):
# Keep cursor position within text
self.__cursor_pos = min(max(value, 0), len(self._text))
# Ensure text is visible when less than full width
if self._chars[-1][0] < self.rect.w - self._text_offset:
self._text_pos = self._text_offset
else:
# Scroll text in input box when it's too long
pos = self._chars[self._cursor_pos][0]
if pos > (self.rect.w - self._text_pos):
self._text_pos = -(pos - self.rect.w + self._text_offset)
elif pos < (self._text_offset - self._text_pos):
self._text_pos = self._text_offset - pos
# Ensure no unnecessary space is left at right-edge
right_edge = self._chars[-1][0] - self.rect.w + self._text_offset
if right_edge > 0:
self._text_pos = max(-right_edge, self._text_pos)
|
py | 1a4c2adbc69f8b23eebe02cfd97b91d95be447c9 | #!/usr/bin/env python3
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Renders a same tournament game
Argument: File path to .json in history folder
"""
import argparse
import os
import multiprocessing
import shutil
from diplomacy import Game
import ujson as json
from diplomacy_research.proto.diplomacy_proto.game_pb2 import SavedGame as SavedGameProto
from diplomacy_research.utils.proto import proto_to_dict, read_next_proto
def render_saved_game(saved_game, output_dir, prefix=''):
""" Renders a specific saved game
:param saved_game: The saved game to render
:param output_dir: The output directory where to save the rendering
:param prefix: An optional prefix to add before the game id
"""
if prefix:
output_dir = os.path.join(output_dir, prefix + '_' + saved_game['id'])
else:
output_dir = os.path.join(output_dir, saved_game['id'])
nb_phases = len(saved_game['phases'])
svg_count = 0
# Checking if already generated
# Otherwise, regenerating completely
if os.path.exists(output_dir):
nb_svg = len([os.path.join(output_dir, file) for file in os.listdir(output_dir) if file[-4:] == '.svg'])
if nb_svg == 2 * nb_phases:
print('Rendered {} (Skipped)'.format(saved_game['id']))
return
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir, exist_ok=True)
# Creating a Game to replay all orders, and a new Game object per phase to validate
entire_game = Game()
if saved_game['phases']:
entire_game.set_state(saved_game['phases'][0]['state'])
# Rendering
for phase in saved_game['phases']:
phase_game = Game()
# Setting state
state = phase['state']
phase_game.set_state(state)
entire_game.note = phase_game.note
# Setting orders
phase_game.clear_orders()
orders = phase['orders']
for power_name in orders:
phase_game.set_orders(power_name, orders[power_name])
entire_game.set_orders(power_name, orders[power_name])
# Validating that we are at the same place
for power_name in orders:
assert sorted(phase_game.get_units(power_name)) == sorted(entire_game.get_units(power_name))
assert sorted(phase_game.get_centers(power_name)) == sorted(entire_game.get_centers(power_name))
# Rendering with and without orders
with open(os.path.join(output_dir, '%03d%s' % (svg_count, '.svg')), 'w') as file:
file.write(entire_game.render(incl_orders=False))
svg_count += 1
with open(os.path.join(output_dir, '%03d%s' % (svg_count, '.svg')), 'w') as file:
file.write(entire_game.render(incl_orders=True))
# Processing (for entire game)
svg_count += 1
entire_game.process()
print('Rendered {}'.format(saved_game['id']))
# =========================================
# ------- JSON RENDERING ----------
# =========================================
def render_json(file_path):
""" Renders a specific json file
:param file_path: The full path to the json file
:return: Nothing, but creates a directory (file_path without '.json') containing the rendered images
"""
dir_path = os.path.dirname(file_path)
# Aborting if file doesn't exist
if not os.path.exists(file_path):
print('File {} does not exist.'.format(file_path))
return
# Loading saved game
file_content = open(file_path, 'r').read()
saved_game = json.loads(file_content)
# Rendering
render_saved_game(saved_game, dir_path)
def render_multi_json_per_folder(history_dir, nb_json_per_folder):
""" Finds all subfolders under history and renders 'nb_jsons' games in each subfolder found
:param history_dir: The full path to the history folder
:param nb_json_per_folder: The number of jsons to render per subfolder
:return: Nothing
"""
jsons_to_render = []
# Finding files to render
subfolders = [os.path.join(history_dir, path)
for path in os.listdir(history_dir)
if os.path.isdir(os.path.join(history_dir, path))]
for folder in subfolders:
json_games = sorted([os.path.join(folder, json_filename)
for json_filename in os.listdir(folder)
if json_filename[-5:] == '.json'])
json_games = json_games[:nb_json_per_folder]
for json_path in json_games:
jsons_to_render += [json_path]
# Running over multiple processes
nb_cores = multiprocessing.cpu_count()
with multiprocessing.Pool(nb_cores) as pool:
pool.map(render_json, jsons_to_render)
# =========================================
# ------- PROTO RENDERING ----------
# =========================================
def render_saved_game_proto(saved_game_proto, output_dir, prefix='', json_only=False):
""" Renders a saved game proto
:param saved_game_proto: A `.proto.game.SavedGame` object
:param output_dir: The output directory where the save the renderings
:param prefix: An optional prefix to add before the game id
:param json_only: Indicates we only want to extract the underlying JSON
"""
saved_game = proto_to_dict(saved_game_proto)
if json_only:
os.makedirs(os.path.join(output_dir, 'json'), exist_ok=True)
output_path = os.path.join(output_dir, 'json', prefix + '_' + saved_game['id'] + '.json')
with open(output_path, 'w') as file:
file.write(json.dumps(saved_game))
print('Saved JSON for {}'.format(saved_game['id']))
else:
render_saved_game(saved_game, output_dir, prefix)
def render_proto_file(file_path, args, compressed=True):
""" Renders all saved game proto in a proto file
:param file_path: The path to the proto file
:param args: The parsed command line arguments
:param compressed: Boolean that indicates if compression was used.
"""
dir_path = os.path.dirname(file_path)
game_count = 0
# Aborting if file doesn't exist
if not os.path.exists(file_path):
print('File {} does not exist.'.format(file_path))
return
# Processing filter
games_to_render = []
if args.filter:
for part in args.filter.split(','):
if '-' in part:
start, stop = part.split('-')
games_to_render += list(range(int(start), int(stop) + 1))
elif ':' in part:
start, stop, step = part.split(':')
games_to_render += list(range(int(start), int(stop) + 1, int(step)))
else:
games_to_render += [int(part)]
# Rendering each game in the proto file
with open(file_path, 'rb') as file:
while True:
saved_game_proto = read_next_proto(SavedGameProto, file, compressed)
if saved_game_proto is None:
break
game_count += 1
if game_count in games_to_render or (not games_to_render and not args.count):
print('(Game #%d) ' % game_count, end='')
render_saved_game_proto(saved_game_proto, dir_path, prefix='%05d' % game_count, json_only=args.json)
if game_count % 100 == 0 and args.count:
print('... %d games found so far.' % game_count)
# Printing the number of games in the proto file
if args.count:
print('Found %d games in the proto file.' % game_count)
# =========================================
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Render some saved games.')
PARSER.add_argument('--count', action='store_true', help='Count the number of games in the file')
PARSER.add_argument('--json', action='store_true', help='Only extract jsons without rendering the games')
PARSER.add_argument('--filter', help='Only render some games e.g. 1-5,6,8,10:100:2')
PARSER.add_argument('--nb_per_folder', type=int, default=0, help='The number of games per folder to generate')
PARSER.add_argument('file_path', help='The file path containing the saved games.')
ARGS = PARSER.parse_args()
# Rendering a single JSON
# Syntax: render.py <json path>
if ARGS.file_path[-5:] == '.json':
render_json(ARGS.file_path)
exit(0)
# Render a series of game in a .pb file
# Syntax: render.py <pb path>
if ARGS.file_path[-3:] == '.pb':
render_proto_file(ARGS.file_path, ARGS, compressed=False)
exit(0)
if ARGS.file_path[-4:] == '.pbz':
render_proto_file(ARGS.file_path, ARGS, compressed=True)
exit(0)
# Rendering a certain number of JSON per folder
# Syntax: render.py <history/> --nb_per_folder <# of json per folder to generate>
if os.path.exists(ARGS.file_path) and ARGS.nb_per_folder:
render_multi_json_per_folder(ARGS.file_path, ARGS.nb_per_folder)
exit(0)
# Invalid syntax
PARSER.print_help()
exit(-1)
|
py | 1a4c2cb44ae5710879e3c8cddb1dbb5d0ebd0e86 | """
Copyright (c) IBM 2015-2017. All Rights Reserved.
Project name: c4-high-availability
This project is licensed under the MIT License, see LICENSE
"""
import sys
from setuptools import setup, find_packages
import versioneer
needs_pytest = {"pytest", "test", "ptr", "coverage"}.intersection(sys.argv)
pytest_runner = ["pytest-runner"] if needs_pytest else []
setup(
name = "c4-high-availability",
version = versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
install_requires = ["c4-systemmanager"],
setup_requires = [] + pytest_runner,
tests_require = ["pytest", "pytest-cov"],
author = "IBM",
author_email = "",
description = "This is a high availability implementation based on device managers",
license = "MIT",
keywords = "python c4 ha",
url = "",
)
|
py | 1a4c2ee39c7f828825a601d257dec7db72ebd15d | # Generated by Django 3.1.5 on 2021-01-19 14:38
import django.contrib.auth.models
import django.contrib.auth.validators
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"password",
models.CharField(max_length=128, verbose_name="password"),
),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
verbose_name="username",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
(
"last_name",
models.CharField(
blank=True, max_length=150, verbose_name="last name"
),
),
(
"email",
models.EmailField(
blank=True,
max_length=254,
verbose_name="email address",
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="date joined",
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
"abstract": False,
},
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
]
|
py | 1a4c2f91771956d8784200844ef461040205d9cf | from __future__ import absolute_import
from __future__ import division
from multiprocessing import cpu_count, Pool
import time, signal
import numpy as np
import random, math
from .decision_tree import DecisionTree
from .util import iterate_with_progress, normalize_values
#################################
# Multi-process funcs & klasses #
#################################
class KeyboardInterruptError(Exception): pass
def prune_tree(args):
try:
tree, data, labels = args
tree.prune(data, labels)
return tree
except KeyboardInterrupt:
raise KeyboardInterruptError()
class AdaBoost:
def __init__(self, impurity, segmentor, **kwargs):
self._impurity = impurity
self._segmentor = segmentor
self._num_trees = kwargs.get('num_trees', 10)
assert self._num_trees > 0
self._max_depth = kwargs.get('max_depth', None)
self._min_samples = kwargs.get('min_samples', 2)
self._trees = []
self._alphas = []
def train(self, data, labels):
assert len(data) == len(labels)
self._klasses = np.unique(labels)
distributions = normalize_values({i:1 for i in xrange(len(data))})
for _ in iterate_with_progress(xrange(self._num_trees)):
sampled_data, sampled_labels = self._sample_data_labels(data, labels, distributions)
tree = DecisionTree(self._impurity,
self._segmentor,
max_depth=self._max_depth,
min_samples=self._min_samples)
tree.train(sampled_data, sampled_labels)
predictions = tree.predict(data)
error = sum([distributions[i] for i in np.nonzero(predictions != labels)[0]])
alpha = float('inf') if error == 0.0 else 0.5 * math.log((1.0 - error)/error)
self._trees.append(tree)
self._alphas.append(alpha)
for i in xrange(len(data)):
weight = alpha if predictions[i] != labels[i] else -alpha
distributions[i] *= math.e ** weight
distributions = normalize_values(distributions)
self._alphas = np.array(self._alphas)
def predict(self, data):
if not self._trees:
raise StandardError("AdaBoost has not been trained.")
def weight(results):
results[np.nonzero(results == 0)[0]] = -1
strong_result = np.dot(self._alphas, results)
return int(strong_result >= 0)
tree_results = np.array([tree.predict(data) for tree in self._trees])
return np.apply_along_axis(weight, 0, tree_results)
def score(self, data, labels):
if not self._trees:
raise StandardError("AdaBoost has not been trained.")
predictions = self.predict(data)
correct_count = np.count_nonzero(predictions == labels)
return correct_count / len(labels)
def prune(self, data, labels):
args_list = []
for tree in self._trees:
args_list.append([tree, data, labels])
num_processes = cpu_count()
print 'Prune in parallel with {0} processes.'.format(num_processes)
pool = Pool(num_processes)
try:
start = time.time()
self._trees = pool.map(prune_tree, args_list)
print 'Pruning takes {0} seconds.'.format(int(time.time() - start))
pool.close()
return self.score(data, labels)
except KeyboardInterrupt:
pool.terminate()
except Exception, e:
pool.terminate()
finally:
pool.join()
def _sample_data_labels(self, data, labels, distributions):
if not sum(distributions.itervalues()) == 1.0:
distributions = normalize_values(distributions)
random.seed()
n, m = data.shape
sampled_data = np.empty((0,data.shape[1]), dtype=data.dtype)
sampled_labels = np.empty((0,), dtype=labels.dtype)
draws = sorted([random.random() for _ in xrange(n)])
sample_i, data_i, cdf = 0, 0, distributions[0]
while sample_i < n:
if draws[sample_i] < cdf:
sample_i += 1
sampled_data = np.append(sampled_data, data[data_i].reshape(1,m), axis=0)
sampled_labels = np.append(sampled_labels, labels[data_i])
else:
data_i += 1
cdf += distributions[data_i]
return sampled_data, sampled_labels
|
py | 1a4c3082fc4ba0dd3cc29649df1a7e44a4ff464f | import diceroller
class HydDetailGenerator:
def __init__(self, planet, seed, orbit, star, orbit_zone):
self.planet = planet
self.dice = diceroller(seed)
self.orbit = orbit # just the number
self.orbit_zone = orbit_zone
self.star = star
self.percentage = None
self.composition = None
self.tectonic_plates = None
self.terraforming = None
self.continents = None
self.oceans = None
self.volcanoes = None
self.weather_control = None
def generate_percentage(self):
percent_roll = self.dice.roll2d6() - 7
percent = self.planet.uwp[3] * 10 + percent_roll
self.percentage = percent
return
def generate_compositon(self):
if self.planet.uwp[2] == 3 or 5 or 6 or 8:
self.composition = "Water"
return
elif self.planet.uwp[2] == 2 or 4 or 7 or 9:
self.composition = "Tainted Water"
return
elif self.planet.uwp[2] == 10:
if self.dice.roll2d6() > 9:
self.composition = "Tainted Liquid Water"
return
else:
self.composition = "Atmosphere Related Chemical Mix"
return
elif self.planet.uwp[2] == 11 or 12:
self.composition = "Atmosphere Related Chemical Mix"
return
else:
self.composition = "Special Case (needs further study)"
return
def generate_tectonic(self):
base = self.planet.uwp[1] + self.planet.uwp[2]
roll = self.dice.roll2d6()
self.tectonic_plates = base - roll
return
def generate_terraforming(self):
terra_score = 0
hydro_score = 0
if self.planet.uwp[1] == 1 or 2:
hydro_score += 2
terra_score += 4
elif self.planet.uwp[1] == 3 or 4:
hydro_score += 1
terra_score += 3
elif self.planet.uwp[1] == 5 or 6:
terra_score += 2
elif self.planet.uwp[1] == 3 or 4:
hydro_score += 1
terra_score += 3
elif self.planet.uwp[1] == 7 or 8:
hydro_score -= 1
terra_score += 1
elif self.planet.uwp[1] >= 9:
hydro_score -= 2
if self.planet.uwp[2] == 0:
hydro_score -= 4
terra_score -= 4
if self.planet.uwp[3] == 0:
terra_score += 1
elif 1 <= self.planet.uwp[3] <= 9:
hydro_score += 1
if self.planet.uwp[4] < 5:
hydro_score -= 2
terra_score -= 2
elif self.planet.uwp[4] > 7:
hydro_score += 2
terra_score += 2
if self.planet.uwp[7] < 5:
hydro_score -= 10
terra_score -= 10
elif 5 <= self.planet.uwp[7] <= 8:
hydro_score += 1
terra_score += 1
elif 9 <= self.planet.uwp[7] <= 11:
hydro_score += 2
terra_score += 3
elif 12 <= self.planet.uwp[7]:
hydro_score += 3
terra_score += 4
if not self.planet.atmo_details.native_life:
hydro_score += 3
terra_score += 3
hydro_roll = self.dice.roll2d6()
if hydro_score > hydro_roll:
hydro_terra = True
else:
hydro_terra = False
terra_roll = self.dice.roll2d6()
if terra_score > terra_roll:
terra_terra = True
else:
terra_terra = False
self.terraforming = [hydro_terra, terra_terra]
if hydro_terra:
modification = self.dice.roll1d3() + self.dice.roll1d3()
if self.planet[3] < 5:
self.percentage += modification
else:
self.percentage -= modification
return
def generate_continents(self):
continent_lol = [
[self.dice.roll2d6 + 1, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll2d6 + 1, self.dice.roll2d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll2d6 + 1, self.dice.rollnd6(3) - 3, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll2d6, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll2d6, self.dice.roll2d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll2d6, self.dice.rollnd6(3) - 3, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6, self.dice.roll2d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6, self.dice.rollnd6(3) - 3, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6 - 1, self.dice.roll1d6(), self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6 - 1, self.dice.roll2d6(), self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6 - 1, self.dice.rollnd6(3), self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6 - 2, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6 - 3, self.dice.roll1d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6()],
[self.dice.roll1d6 - 4, self.dice.roll1d6() - 3, self.dice.roll2d6(), self.dice.roll2d6()],
[0, 0, self.dice.roll1d6() - 3, self.dice.roll2d6()],
[0, 0, 0, self.dice.roll2d6()],
[0, 0, 0, self.dice.roll2d6()],
[0, 0, 0, self.dice.roll1d6()],
[0, 0, 0, 0],
[0, 0, 0, 0]]
if self.percentage >= 50:
land_roll = self.dice.roll1d6()
land_index = land_roll + self.planet.uwp[3] * 3
self.continents = continent_lol[land_index - 16]
for i in self.continents:
if i < 0:
self.continents[i] = 0
return
else:
if self.percentage == 0:
self.continents = [1, 0, 0, 0]
else:
self.continents = [1, 2, 3, 4]
return
def generate_oceans(self):
oceans_lol = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, self.dice.roll1d6()],
[0, 0, 0, self.dice.roll2d6()],
[0, 0, 0, self.dice.roll2d6()],
[0, 0, self.dice.roll1d6() - 3, self.dice.roll2d6()],
[self.dice.roll1d6() - 4, self.dice.roll1d6() - 3, self.dice.roll2d6() - 3, self.dice.roll2d6],
[self.dice.roll1d6() - 4, self.dice.roll1d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6() - 3, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6() - 3, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6() - 2, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6() - 2, self.dice.roll2d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6() - 1, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6() - 1, self.dice.roll2d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6(), self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6(), self.dice.roll2d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[self.dice.roll1d6(), self.dice.rollnd6(3) - 3, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[1, self.dice.roll1d6() - 1, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[1, self.dice.roll2d6() - 2, self.dice.rollnd6(3) - 3, self.dice.roll2d6],
[1, self.dice.rollnd6(3) - 3, self.dice.rollnd6(3) - 3, self.dice.roll2d6]]
if self.percentage < 50:
ocean_roll = self.dice.roll1d6()
ocean_index = ocean_roll + self.planet.uwp[3] * 3
self.oceans = oceans_lol[ocean_index - 1]
for i in self.oceans:
if i < 0:
self.oceans[i] = 0
return
else:
if self.percentage == 100:
self.oceans = [1, 0, 0, 0]
else:
self.oceans = [0, 1, 3, 4]
return
def generate_weather_control(self):
weather_roll = self.dice.roll2d6()
if weather_roll < self.planet.uwp[7] and weather_roll < self.planet.uwp[4]:
weather_control = True
else:
weather_control = False
self.weather_control = weather_control
return
def generate_volcanoes(self):
# FIXME I DO NOT UNDERSTAND VOLCANOES
stress_factor = 0
max_possible_volcanoes = self.continents[0] + self.continents[1] + self.oceans[0]
notable_volcanoes = 0
if self.planet.uwp[3] == 10:
max_possible_volcanoes += self.dice.roll1d6()
if self.continents[0] == 1:
max_possible_volcanoes + + self.dice.roll1d6() - 1
|
py | 1a4c30a93accbddab585a7b148b5152dd47ed32d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import re
from bs4 import BeautifulSoup
import scrape_common as sc
url = 'https://www.jura.ch/fr/Autorites/Coronavirus/Chiffres-H-JU/Evolution-des-cas-COVID-19-dans-le-Jura.html'
d = sc.download(url, silent=True)
d = d.replace(' ', ' ')
soup = BeautifulSoup(d, 'html.parser')
is_first = True
data_table = soup.find('caption', string=re.compile(r'Evolution du nombre de cas.*Jura')).find_parent('table')
if data_table:
headers = [" ".join(cell.stripped_strings) for cell in data_table.find('tr').find_all(['td', 'th'])]
assert len(headers) == 6, f"Number of headers changed: {len(headers)} != 6"
rows = []
for row in data_table.find_all('tr')[1:-1]:
data = {}
for col_num, cell in enumerate(row.find_all(['th', 'td'])):
content = " ".join(cell.stripped_strings).strip()
if content:
data[headers[col_num]] = content
rows.append(data)
if rows:
for i, row in enumerate(rows[:-1]):
if not row.get('Date') or row.get('Date') == 'Date':
continue
if not is_first:
print('-' * 10)
is_first = False
dd = sc.DayData(canton='JU', url=url)
current_year = datetime.datetime.now().year
if row.get('Date') and not re.search(f'{current_year}', row.get('Date')):
dd.datetime = f"{row.get('Date', '')} {current_year}"
else:
dd.datetime = row.get('Date', '')
dd.datetime = dd.datetime.replace('1 er', '1')
dd.cases = row.get('Cumul des cas confirmés')
dd.hospitalized = row.get('Nombre de cas actuellement hospitalisés')
dd.icu = row.get('Nombre de cas actuellement en soins intensifs')
dd.deaths = sum(int(str(r.get('Nombre de nouveaux décès', 0)).replace('*', '')) for r in rows[i:] if r.get('Nombre de nouveaux décès'))
print(dd)
|
py | 1a4c30aa7bd22a76afc32023290d46169cb4e4be | from __future__ import absolute_import
from .train import Train
from .predict import Predict
__all__ = ["Train", "Predict"]
|
py | 1a4c3101fca3e5f64b06384d12daa4933d983696 | """
Source: https://pe.usps.com/text/pub28/welcome.htm
"""
STREET_NAME_POST_ABBREVIATIONS = {
"ALLEE": "ALY",
"ALLEY": "ALY",
"ALLY": "ALY",
"ALY": "ALY",
"ANEX": "ANX",
"ANNEX": "ANX",
"ANNX": "ANX",
"ANX": "ANX",
"ARC": "ARC",
"ARC ": "ARC",
"ARCADE": "ARC",
"ARCADE ": "ARC",
"AV": "AVE",
"AVE": "AVE",
"AVEN": "AVE",
"AVENU": "AVE",
"AVENUE": "AVE",
"AVN": "AVE",
"AVNUE": "AVE",
"BAYOO": "BYU",
"BAYOU": "BYU",
"BCH": "BCH",
"BEACH": "BCH",
"BEND": "BND",
"BLF": "BLF",
"BLUF": "BLF",
"BLUFF": "BLF",
"BLUFFS": "BLFS",
"BLUFFS ": "BLFS",
"BLVD": "BLVD",
"BND": "BND",
"BOT": "BTM",
"BOTTM": "BTM",
"BOTTOM": "BTM",
"BOUL": "BLVD",
"BOULEVARD": "BLVD",
"BOULEVARD ": "BLVD",
"BOULV": "BLVD",
"BR": "BR",
"BRANCH": "BR",
"BRDGE": "BRG",
"BRG": "BRG",
"BRIDGE": "BRG",
"BRK": "BRK",
"BRNCH": "BR",
"BROOK": "BRK",
"BROOKS": "BRKS",
"BROOKS ": "BRKS",
"BTM": "BTM",
"BURG": "BG",
"BURGS": "BGS",
"BYP": "BYP",
"BYPA": "BYP",
"BYPAS": "BYP",
"BYPASS": "BYP",
"BYPS": "BYP",
"CAMP": "CP",
"CANYN": "CYN",
"CANYON": "CYN",
"CAPE": "CPE",
"CAUSEWAY": "CSWY",
"CAUSWA": "CSWY",
"CAUSWAY": "CSWY",
"CEN": "CTR",
"CENT": "CTR",
"CENTER": "CTR",
"CENTERS": "CTRS",
"CENTERS ": "CTRS",
"CENTR": "CTR",
"CENTRE": "CTR",
"CIR": "CIR",
"CIRC": "CIR",
"CIRCL": "CIR",
"CIRCLE": "CIR",
"CIRCLES": "CIRS",
"CK": "CRK",
"CLB": "CLB",
"CLF": "CLF",
"CLFS": "CLFS",
"CLIFF": "CLF",
"CLIFFS": "CLFS",
"CLUB": "CLB",
"CMP": "CP",
"CNTER": "CTR",
"CNTR": "CTR",
"CNYN": "CYN",
"COMMON": "CMN",
"COMMONS": "CMNS",
"COR": "COR",
"CORNER": "COR",
"CORNERS": "CORS",
"CORS": "CORS",
"COURSE": "CRSE",
"COURT": "CT",
"COURTS": "CTS",
"COVE": "CV",
"COVES": "CVS",
"CP": "CP",
"CPE": "CPE",
"CR": "CRK",
"CRCL": "CIR",
"CRCLE": "CIR",
"CRECENT": "CRES",
"CREEK": "CRK",
"CRES": "CRES",
"CRESCENT": "CRES",
"CRESENT": "CRES",
"CREST": "CRST",
"CRK": "CRK",
"CROSSING": "XING",
"CROSSING ": "XING",
"CROSSROAD": "XRD",
"CROSSROADS": "XRDS",
"CRSCNT": "CRES",
"CRSE": "CRSE",
"CRSENT": "CRES",
"CRSNT": "CRES",
"CRSSING": "XING",
"CRSSNG": "XING",
"CRSSNG ": "XING",
"CRT": "CT",
"CSWY": "CSWY",
"CT": "CT",
"CTR": "CTR",
"CTS": "CTS",
"CURVE": "CURV",
"CURVE ": "CURV",
"CV": "CV",
"CYN": "CYN",
"DALE": "DL",
"DALE ": "DL",
"DAM": "DM",
"DAM ": "DM",
"DIV": "DV",
"DIVIDE": "DV",
"DL": "DL",
"DL ": "DL",
"DM": "DM",
"DM ": "DM",
"DR": "DR",
"DRIV": "DR",
"DRIVE": "DR",
"DRIVES": "DRS",
"DRV": "DR",
"DV": "DV",
"DVD": "DV",
"EST": "EST",
"ESTATE": "EST",
"ESTATES": "ESTS",
"ESTS": "ESTS",
"EXP": "EXPY",
"EXPR": "EXPY",
"EXPRESS": "EXPY",
"EXPRESSWAY": "EXPY",
"EXPW": "EXPY",
"EXPY": "EXPY",
"EXT": "EXT",
"EXTENSION": "EXT",
"EXTENSIONS": "EXTS",
"EXTN": "EXT",
"EXTNSN": "EXT",
"EXTS": "EXTS",
"FALL": "FALL",
"FALLS": "FLS",
"FERRY": "FRY",
"FIELD": "FLD",
"FIELDS": "FLDS",
"FLAT": "FLT",
"FLATS": "FLTS",
"FLD": "FLD",
"FLDS": "FLDS",
"FLS": "FLS",
"FLT": "FLT",
"FLTS": "FLTS",
"FORD": "FRD",
"FORDS": "FRDS",
"FOREST": "FRST",
"FORESTS": "FRST",
"FORG": "FRG",
"FORGE": "FRG",
"FORGES": "FRGS",
"FORK": "FRK",
"FORKS": "FRKS",
"FORT": "FT",
"FRD": "FRD",
"FREEWAY": "FWY",
"FREEWY": "FWY",
"FRG": "FRG",
"FRK": "FRK",
"FRKS": "FRKS",
"FRRY": "FRY",
"FRST": "FRST",
"FRT": "FT",
"FRWAY": "FWY",
"FRWY": "FWY",
"FRY": "FRY",
"FT": "FT",
"FWY": "FWY",
"GARDEN": "GDN",
"GARDENS": "GDNS",
"GARDN": "GDN",
"GATEWAY": "GTWY",
"GATEWY": "GTWY",
"GATWAY": "GTWY",
"GDN": "GDN",
"GDNS": "GDNS",
"GLEN": "GLN",
"GLENS": "GLNS",
"GLN": "GLN",
"GRDEN": "GDN",
"GRDN": "GDN",
"GRDNS": "GDNS",
"GREEN": "GRN",
"GREENS": "GRNS",
"GRN": "GRN",
"GROV": "GRV",
"GROVE": "GRV",
"GROVES": "GRVS",
"GRV": "GRV",
"GTWAY": "GTWY",
"GTWY": "GTWY",
"HARB": "HBR",
"HARBOR": "HBR",
"HARBORS": "HBRS",
"HARBR": "HBR",
"HAVEN": "HVN",
"HAVN": "HVN",
"HBR": "HBR",
"HEIGHT": "HTS",
"HEIGHTS": "HTS",
"HGTS": "HTS",
"HIGHWAY": "HWY",
"HIGHWY": "HWY",
"HILL": "HL",
"HILLS": "HLS",
"HIWAY": "HWY",
"HIWY": "HWY",
"HL": "HL",
"HLLW": "HOLW",
"HLS": "HLS",
"HOLLOW": "HOLW",
"HOLLOWS": "HOLW",
"HOLW": "HOLW",
"HOLWS": "HOLW",
"HRBOR": "HBR",
"HT": "HTS",
"HTS": "HTS",
"HVN": "HVN",
"HWAY": "HWY",
"HWY": "HWY",
"INLET": "INLT",
"INLT": "INLT",
"IS": "IS",
"ISLAND": "IS",
"ISLANDS": "ISS",
"ISLE": "ISLE",
"ISLES": "ISLE",
"ISLND": "IS",
"ISLNDS": "ISS",
"ISS": "ISS",
"JCT": "JCT",
"JCTION": "JCT",
"JCTN": "JCT",
"JCTNS": "JCTS",
"JCTS": "JCTS",
"JUNCTION": "JCT",
"JUNCTIONS": "JCTS",
"JUNCTN": "JCT",
"JUNCTON": "JCT",
"KEY": "KY",
"KEYS": "KYS",
"KNL": "KNL",
"KNLS": "KNLS",
"KNOL": "KNL",
"KNOLL": "KNL",
"KNOLLS": "KNLS",
"KY": "KY",
"KYS": "KYS",
"LA": "LN",
"LAKE": "LK",
"LAKES": "LKS",
"LAND": "LAND",
"LANDING": "LNDG",
"LANE": "LN",
"LANES": "LN",
"LCK": "LCK",
"LCKS": "LCKS",
"LDG": "LDG",
"LDGE": "LDG",
"LF": "LF",
"LGT": "LGT",
"LIGHT": "LGT",
"LIGHTS": "LGTS",
"LK": "LK",
"LKS": "LKS",
"LN": "LN",
"LNDG": "LNDG",
"LNDNG": "LNDG",
"LOAF": "LF",
"LOCK": "LCK",
"LOCKS": "LCKS",
"LODG": "LDG",
"LODGE": "LDG",
"LOOP": "LOOP",
"LOOPS": "LOOP",
"MALL": "MALL",
"MANOR": "MNR",
"MANORS": "MNRS",
"MDW": "MDW",
"MDWS": "MDWS",
"MEADOW": "MDW",
"MEADOWS": "MDWS",
"MEDOWS": "MDWS",
"MEWS": "MEWS",
"MILL": "ML",
"MILLS": "MLS",
"MISSION": "MSN",
"MISSN": "MSN",
"ML": "ML",
"MLS": "MLS",
"MNR": "MNR",
"MNRS": "MNRS",
"MNT": "MT",
"MNTAIN": "MTN",
"MNTN": "MTN",
"MNTNS": "MTNS",
"MOTORWAY": "MTWY",
"MOUNT": "MT",
"MOUNTAIN": "MTN",
"MOUNTAINS": "MTNS",
"MOUNTIN": "MTN",
"MSN": "MSN",
"MSSN": "MSN",
"MT": "MT",
"MTIN": "MTN",
"MTN": "MTN",
"NCK": "NCK",
"NECK": "NCK",
"ORCH": "ORCH",
"ORCHARD": "ORCH",
"ORCHRD": "ORCH",
"OVAL": "OVAL",
"OVERPASS": "OPAS",
"OVL": "OVAL",
"PARK": "PARK",
"PARKS": "PARK",
"PARKWAY": "PKWY",
"PARKWAYS": "PKWY",
"PARKWY": "PKWY",
"PASS": "PASS",
"PASSAGE": "PSGE",
"PATH": "PATH",
"PATHS": "PATH",
"PIKE": "PIKE",
"PIKES": "PIKE",
"PINE": "PNE",
"PINES": "PNES",
"PK": "PARK",
"PKWAY": "PKWY",
"PKWY": "PKWY",
"PKWYS": "PKWY",
"PKY": "PKWY",
"PL": "PL",
"PLACE": "PL",
"PLAIN": "PLN",
"PLAINES": "PLNS",
"PLAINS": "PLNS",
"PLAZA": "PLZ",
"PLN": "PLN",
"PLNS": "PLNS",
"PLZ": "PLZ",
"PLZA": "PLZ",
"PNES": "PNES",
"POINT": "PT",
"POINTS": "PTS",
"PORT": "PRT",
"PORTS": "PRTS",
"PR": "PR",
"PRAIRIE": "PR",
"PRARIE": "PR",
"PRK": "PARK",
"PRR": "PR",
"PRT": "PRT",
"PRTS": "PRTS",
"PT": "PT",
"PTS": "PTS",
"RAD": "RADL",
"RADIAL": "RADL",
"RADIEL": "RADL",
"RADL": "RADL",
"RAMP": "RAMP",
"RANCH": "RNCH",
"RANCHES": "RNCH",
"RAPID": "RPD",
"RAPIDS": "RPDS",
"RD": "RD",
"RDG": "RDG",
"RDGE": "RDG",
"RDGS": "RDGS",
"RDS": "RDS",
"REST": "RST",
"RIDGE": "RDG",
"RIDGES": "RDGS",
"RIV": "RIV",
"RIVER": "RIV",
"RIVR": "RIV",
"RNCH": "RNCH",
"RNCHS": "RNCH",
"ROAD": "RD",
"ROADS": "RDS",
"ROUTE": "RTE",
"ROW": "ROW",
"RPD": "RPD",
"RPDS": "RPDS",
"RST": "RST",
"RUE": "RUE",
"RUN": "RUN",
"RVR": "RIV",
"SHL": "SHL",
"SHLS": "SHLS",
"SHOAL": "SHL",
"SHOALS": "SHLS",
"SHOAR": "SHR",
"SHOARS": "SHRS",
"SHORE": "SHR",
"SHORES": "SHRS",
"SHR": "SHR",
"SHRS": "SHRS",
"SKYWAY": "SKWY",
"SMT": "SMT",
"SPG": "SPG",
"SPGS": "SPGS",
"SPNG": "SPG",
"SPNGS": "SPGS",
"SPRING": "SPG",
"SPRINGS": "SPGS",
"SPRNG": "SPG",
"SPRNGS": "SPGS",
"SPUR": "SPUR",
"SPURS": "SPUR",
"SQ": "SQ",
"SQR": "SQ",
"SQRE": "SQ",
"SQRS": "SQS",
"SQU": "SQ",
"SQUARE": "SQ",
"SQUARES": "SQS",
"ST": "ST",
"STA": "STA",
"STATION": "STA",
"STATN": "STA",
"STN": "STA",
"STR": "ST",
"STRA": "STRA",
"STRAV": "STRA",
"STRAVE": "STRA",
"STRAVEN": "STRA",
"STRAVENUE": "STRA",
"STRAVN": "STRA",
"STREAM": "STRM",
"STREET": "ST",
"STREETS": "STS",
"STREME": "STRM",
"STRM": "STRM",
"STRT": "ST",
"STRVN": "STRA",
"STRVNUE": "STRA",
"SUMIT": "SMT",
"SUMITT": "SMT",
"SUMMIT": "SMT",
"TER": "TER",
"TERR": "TER",
"TERRACE": "TER",
"THROUGHWAY": "TRWY",
"TPK": "TPKE",
"TPKE": "TPKE",
"TR": "TRL",
"TRACE": "TRCE",
"TRACES": "TRCE",
"TRACK": "TRAK",
"TRACKS": "TRAK",
"TRAFFICWAY": "TRFY",
"TRAIL": "TRL",
"TRAILER": "TRLR",
"TRAILS": "TRL",
"TRAK": "TRAK",
"TRCE": "TRCE",
"TRFY": "TRFY",
"TRK": "TRAK",
"TRKS": "TRAK",
"TRL": "TRL",
"TRLR": "TRLR",
"TRLRS": "TRLR",
"TRLS": "TRL",
"TRNPK": "TPKE",
"TRPK": "TPKE",
"TUNEL": "TUNL",
"TUNL": "TUNL",
"TUNLS": "TUNL",
"TUNNEL": "TUNL",
"TUNNELS": "TUNL",
"TUNNL": "TUNL",
"TURNPIKE": "TPKE",
"TURNPK": "TPKE",
"UN": "UN",
"UNDERPASS": "UPAS",
"UNION": "UN",
"UNIONS": "UNS",
"VALLEY": "VLY",
"VALLEYS": "VLYS",
"VALLY": "VLY",
"VDCT": "VIA",
"VIA": "VIA",
"VIADCT": "VIA",
"VIADUCT": "VIA",
"VIEW": "VW",
"VIEWS": "VWS",
"VILL": "VLG",
"VILLAG": "VLG",
"VILLAGE": "VLG",
"VILLAGES": "VLGS",
"VILLE": "VL",
"VILLG": "VLG",
"VILLIAGE": "VLG",
"VIS": "VIS",
"VIST": "VIS",
"VISTA": "VIS",
"VL": "VL",
"VLG": "VLG",
"VLGS": "VLGS",
"VLLY": "VLY",
"VLY": "VLY",
"VLYS": "VLYS",
"VST": "VIS",
"VSTA": "VIS",
"VW": "VW",
"VWS": "VWS",
"WALK": "WALK",
"WALKS": "WALK",
"WALL": "WALL",
"WAY": "WAY",
"WAYS": "WAYS",
"WELL": "WL",
"WELLS": "WLS",
"WLS": "WLS",
"WY": "WAY",
"XING": "XING",
"XING ": "XING"
}
# Even though we don't care about normalizing the state names themselves,
# state names may appear inside of street names (i.e. Kentucky Highway).
STATE_ABBREVIATIONS = {
'ALABAMA': 'AL',
'ALA': 'AL',
'ALASKA': 'AK',
'ALAS': 'AK',
'ARIZONA': 'AZ',
'ARIZ': 'AZ',
'ARKANSAS': 'AR',
'ARK': 'AR',
'CALIFORNIA': 'CA',
'CALIF': 'CA',
'CAL': 'CA',
'COLORADO': 'CO',
'COLO': 'CO',
'COL': 'CO',
'CONNECTICUT': 'CT',
'CONN': 'CT',
'DELAWARE': 'DE',
'DEL': 'DE',
'DISTRICT OF COLUMBIA': 'DC',
'FLORIDA': 'FL',
'FLA': 'FL',
'FLOR': 'FL',
'GEORGIA': 'GA',
'GA': 'GA',
'HAWAII': 'HI',
'IDAHO': 'ID',
'IDA': 'ID',
'ILLINOIS': 'IL',
'ILL': 'IL',
'INDIANA': 'IN',
'IND': 'IN',
'IOWA': 'IA',
'KANSAS': 'KS',
'KANS': 'KS',
'KAN': 'KS',
'KENTUCKY': 'KY',
'KEN': 'KY',
'KENT': 'KY',
'LOUISIANA': 'LA',
'MAINE': 'ME',
'MARYLAND': 'MD',
'MASSACHUSETTS': 'MA',
'MASS': 'MA',
'MICHIGAN': 'MI',
'MICH': 'MI',
'MINNESOTA': 'MN',
'MINN': 'MN',
'MISSISSIPPI': 'MS',
'MISS': 'MS',
'MISSOURI': 'MO',
'MONTANA': 'MT',
'MONT': 'MT',
'NEBRASKA': 'NE',
'NEBR': 'NE',
'NEB': 'NE',
'NEVADA': 'NV',
'NEV': 'NV',
'NEW HAMPSHIRE': 'NH',
'NEW JERSEY': 'NJ',
'NEW MEXICO': 'NM',
'N MEX': 'NM',
'NEW M': 'NM',
'NEW YORK': 'NY',
'NORTH CAROLINA': 'NC',
'NORTH DAKOTA': 'ND',
'N DAK': 'ND',
'OHIO': 'OH',
'OKLAHOMA': 'OK',
'OKLA': 'OK',
'OREGON': 'OR',
'OREG': 'OR',
'ORE': 'OR',
'PENNSYLVANIA': 'PA',
'PENN': 'PA',
'RHODE ISLAND': 'RI',
'SOUTH CAROLINA': 'SC',
'SOUTH DAKOTA': 'SD',
'S DAK': 'SD',
'TENNESSEE': 'TN',
'TENN': 'TN',
'TEXAS': 'TX',
'TEX': 'TX',
'UTAH': 'UT',
'VERMONT': 'VT',
'VIRGINIA': 'VA',
'WASHINGTON': 'WA',
'WASH': 'WA',
'WEST VIRGINIA': 'WV',
'W VA': 'WV',
'WISCONSIN': 'WI',
'WIS': 'WI',
'WISC': 'WI',
'WYOMING': 'WY',
'WYO': 'WY'
}
STREET_NAME_ABBREVIATIONS = {
'COUNTY HWY': 'COUNTY HIGHWAY',
'CNTY HWY': 'COUNTY HIGHWAY',
'COUNTY RD': 'COUNTY ROAD',
'CR': 'COUNTY ROAD',
'CNTY RD': 'COUNTY ROAD',
'CORD': 'COUNTY ROAD',
'CO. RD': 'COUNTY ROAD',
'CO RD': 'COUNTY ROAD',
'CR-': 'COUNTY ROAD',
'CR #': 'COUNTY ROAD',
'CNTY. RD': 'COUNTY ROAD',
'CR.': 'COUNTY ROAD',
'FARM TO MARKET': 'FM',
'HWY FM': 'FM',
'HIWAY': 'HIGHWAY',
'HWY': 'HIGHWAY',
'FRONTAGE ROAD': 'FRONTAGE RD',
'BYPASS': 'BYP',
'BYP RD': 'BYPASS RD',
'INTERSTATE HWY': 'INTERSTATE',
'IH': 'INTERSTATE',
'I': 'INTERSTATE', #to account for cases like I10 OR I 55
'RD': 'ROAD',
'RT': 'ROUTE',
'RTE': 'ROUTE',
'RANCH ROAD': 'RANCH ROAD',
'ST HWY': 'STATE HIGHWAY',
'STHWY': 'STATE HIGHWAY',
'ST-HWY': 'STATE HIGHWAY',
'ST.HWY.': 'STATE HIGHWAY',
'STATE HIGH WAY': 'STATE HIGHWAY',
'S HWY': 'STATE HIGHWAY',
'ST HIGHWAY': 'STATE HIGHWAY',
'STATE HWY': 'STATE HIGHWAY',
'SR': 'STATE ROAD',
'ST RT': 'STATE ROUTE',
'STATE RTE': 'STATE ROUTE',
'TSR': 'TOWNSHIP ROAD',
'TWP HWY': 'TOWNSHIP HIGHWAY',
'TWN HWY': 'TOWNSHIP HIGHWAY',
'TNHW': 'TOWNSHIP HIGHWAY',
'US': 'US HIGHWAY',
'US HWY' : 'US HIGHWAY',
'USHWY' : 'US HIGHWAY',
'US HWY': 'US HIGHWAY',
'US-HWY': 'US HIGHWAY',
'US.HWY.': 'US HIGHWAY',
'PR': 'PRIAVATE ROAD',
}
# Can be used for pre and post directional info
DIRECTIONAL_ABBREVIATIONS = {
'EAST': 'E',
'WEST': 'W',
'NORTH': 'N',
'SOUTH': 'S',
'NORTHEAST': 'NE',
'NORTHWEST': 'NW',
'SOUTHEAST': 'SE',
'SOUTHWEST': 'SW',
"NORTE": "N",
"NO": "N",
"NORESTE": "NE",
"NOROESTE": "NW",
"SUR": "S",
"SO": "S",
"SURESTE": "SE",
"SUROESTE": "SW",
"ESTE": "E",
"EA": "E",
"OESTE": "W",
"WE": "W"
}
#From USPS "C2 Secondary Unit Designators"
#Subaddress Type/WSDESC1 (?)
OCCUPANCY_TYPE_ABBREVIATIONS = {
'APARTMENT': 'APT',
'BUILDING': 'BLDG',
'BASEMENT': 'BSMT',
'DEPARTMENT': 'DEPT',
'FLOOR': 'FL',
'FRONT': 'FRNT',
'HANGER': 'HNGR',
'KEY': 'KEY',
'LOBBY': 'LBBY',
'LOT': 'LOT',
'LOWER': 'LOWR',
'OFFICE': 'OFC',
'PENTHOUSE': 'PH',
'PIER': 'PIER',
'REAR': 'REAR',
'ROOM': 'RM',
'SIDE': 'SIDE',
'SLIP': 'SLIP',
'SPACE': 'SPC',
'STOP': 'STOP',
'SUITE': 'STE',
'TRAILER': 'TRLR',
'UNIT': 'UNIT',
'UPPER': 'UPPER'
}
DIRECTION_CODES = {
"N": 1,
"S": 2,
"E": 3,
"W": 4,
"NE": 5,
"NW": 6,
"SE": 7,
"SW": 8
}
EXTENSION_CODES = {
"EXTD": 1,
"EXTN": 2,
"LP": 3,
"BYP": 4,
"ALT": 5,
"BUS": 6,
"OLD": 7,
"SPUR": 8
}
STREET_TYPE_CODES = {
"ALY": 11,
"ALT": 12,
"ARC": 15,
"ARRY": 16,
"APTA": 17,
"AVA": 18,
"AVE": 19,
"BLVD": 26,
"BLV": 32,
"BSRT": 33,
"BYP": 34,
"CLLE": 36,
"CJA": 37,
"CJON": 38,
"CAM": 39,
"CARR": 47,
"CSWY": 48,
"CTR": 51,
"CIR": 57,
"CORD": 70,
"CT": 71,
"CV": 73,
"CRES": 76,
"XING": 77,
"CRU": 78,
"DR": 87,
"EXP": 93,
"EXPY": 94,
"FM": 99,
"4WD": 110,
"FWY": 112,
"HWY": 122,
"I-": 133,
"JPTR": 138,
"LN": 146,
"LOOP": 151,
"MARG": 154,
"MTWY": 164,
"MRO": 167,
"OVPS": 178,
"PARK": 179,
"PKY": 180,
"PAS": 182,
"PSO": 183,
"PASS": 185,
"PATH": 187,
"PIKE": 189,
"PSTA": 191,
"PL": 192,
"PLZ": 193,
"PTE": 202,
"RML": 208,
"RMP": 210,
"ROAD": 223,
"RT": 227,
"ROW": 228,
"RUE": 229,
"RUN": 230,
"RUTA": 232,
"SNDR": 239,
"SVRD": 240,
"SKWY": 248,
"SPWY": 253,
"SQ": 256,
"STHY": 259,
"ST": 263,
"TER": 268,
"THFR": 269,
"THWY": 270,
"TWHY": 273,
"TFWY": 274,
"TRL": 275,
"TUN": 278,
"TUNL": 279,
"TPKE": 280,
"UNPS": 281,
"USHY": 283,
"UNRD": 286,
"VRDA": 288,
"VIA": 289,
"WALK": 291,
"WKWY": 292,
"WALL": 293,
"WAY": 296,
"NFD": 302,
"OVAL": 303,
"EST": 304,
"VLLA": 305,
"DRWY": 306,
"RDWY": 307,
"STRA": 308,
"CLUB": 309,
"CTS": 310,
"JCT": 311,
"LNDG": 312,
"LDGE": 313,
"MALL": 314,
"MNR": 315,
"STA": 316,
"VLG": 317,
"CORS": 318,
"COMN": 319,
"PRRD": 320,
"EMS": 321
}
#Frankly, I don't know when we actually use this
Building_Codes = {
"AFB": 1,
"ARPT": 2,
"APTS": 3,
"ARC": 4,
"BAZR": 5,
"BLDG": 6,
"BSPK": 7,
"CTR": 8,
"CLUB": 9,
"CLTN": 10,
"CMMN": 11,
"CPLX": 12,
"COND": 13,
"CNCN": 14,
"CORS": 15,
"CTHS": 16,
"CTS": 17,
"CTYD": 18,
"XING": 19,
"XRDS": 20,
"EDIF": 21,
"ESP": 22,
"EXCH": 23,
"FEST": 24,
"GALR": 25,
"HALL": 26,
"HOME": 27,
"HOSP": 28,
"HOTEL": 29,
"HOUSE": 30,
"INPK": 31,
"INN": 32,
"JCT": 33,
"LNDG": 34,
"LDGE": 35,
"MALL": 36,
"MNR": 37,
"MKT": 38,
"MERC": 39,
"MTL": 40,
"NAS": 41,
"OFPK": 42,
"OTLT": 43,
"PARK": 44,
"PAVL": 45,
"PLNT": 46,
"PLZ": 47,
"PROM": 49,
"QTRS": 50,
"RES": 51,
"7 CO": 52,
"SC": 53,
"SQ": 54,
"STA": 55,
"STES": 56,
"TOWER": 57,
"TWNH": 58,
"TRPK": 59,
"VLLA": 60,
"VLG": 61,
"VIVI": 62,
"ESTS": 63,
"COLL": 64,
"COTT": 65,
"PROJ": 66,
"TORRE": 67
} |
py | 1a4c3174009a08d4958dfa0f0071f1efd41d6b21 | import nltk
class Analyzer():
"""Implements sentiment analysis."""
def __init__(self, positives, negatives):
"""Initialize Analyzer."""
# load positive and negative words
# Set, list or dict?:
# http://stackoverflow.com/questions/3489071/in-python-when-to-use-a-dictionary-list-or-set
# In this case, only having a particular value matters and not the order. Hence use set()
self.positives = set()
self.negatives = set()
# with open - opens the file and automatically closes once finishes
with open ("positive-words.txt", "r") as fpos:
for line in fpos:
# C (!) is (not) in python
# http://stackoverflow.com/questions/6117733/negation-in-python
if not line.startswith((";", " ")):
# Standardization that every text files has "\n" for every end of line
self.positives.add(line.strip("\n"))
with open ("negative-words.txt", "r") as fneg:
for line in fneg:
if not line.startswith((";", " ")):
self.negatives.add(line.strip("\n"))
def analyze(self, text):
"""Analyze text for sentiment, returning its score."""
# http://www.nltk.org/api/nltk.tokenize.html
# This breaks the lines into list of words
# and stores them as tokens
self.tokenizer = nltk.tokenize.TweetTokenizer()
tokens = self.tokenizer.tokenize(text)
ind = 0
# Cross check text with positive and negative list and returns appropriate indicator
for token in tokens:
# indicator for sentiment score
if token.lower() in self.positives:
ind += 1
elif token.lower() in self.negatives:
ind -= 1
return ind
|
py | 1a4c31a98391a0932320252f78770e6200884606 | import numpy as np
from .trading_env import TradingEnv, Actions, Positions
class ForexEnv(TradingEnv):
def __init__(self, df, window_size, frame_bound, min_index_start, unit_side='left'):
assert len(frame_bound) == 2
assert unit_side.lower() in ['left', 'right']
self.frame_bound = frame_bound
self.unit_side = unit_side.lower()
self.min_index_start = min_index_start
super().__init__(df, window_size, min_index_start)
self.trade_fee = 0.0003 # unit
def _process_data(self):
prices = self.df.loc[:, 'Close'].to_numpy()
prices[self.frame_bound[0] - self.window_size] # validate index (TODO: Improve validation)
prices = prices[self.frame_bound[0]-self.window_size:self.frame_bound[1]]
diff = np.insert(np.diff(prices), 0, 0)
signal_features = np.column_stack((prices, diff))
return prices, signal_features
def _calculate_reward(self, action):
step_reward = 0 # pip
trade = False
if ((action == Actions.Buy.value and self._position == Positions.Short) or
(action == Actions.Sell.value and self._position == Positions.Long)):
trade = True
if trade:
current_price = self.prices[self._current_tick]
last_trade_price = self.prices[self._last_trade_tick]
price_diff = current_price - last_trade_price
if self._position == Positions.Short:
step_reward += -price_diff * 10000
elif self._position == Positions.Long:
step_reward += price_diff * 10000
return step_reward
def _update_profit(self, action):
trade = False
if ((action == Actions.Buy.value and self._position == Positions.Short) or
(action == Actions.Sell.value and self._position == Positions.Long)):
trade = True
if trade or self._done:
current_price = self.prices[self._current_tick]
last_trade_price = self.prices[self._last_trade_tick]
if self.unit_side == 'left':
if self._position == Positions.Short:
quantity = self._total_profit * (last_trade_price - self.trade_fee)
self._total_profit = quantity / current_price
elif self.unit_side == 'right':
if self._position == Positions.Long:
quantity = self._total_profit / last_trade_price
self._total_profit = quantity * (current_price - self.trade_fee)
def max_possible_profit(self):
current_tick = self._start_tick
last_trade_tick = current_tick - 1
profit = 1.
while current_tick <= self._end_tick:
position = None
if self.prices[current_tick] < self.prices[current_tick - 1]:
while (current_tick <= self._end_tick and
self.prices[current_tick] < self.prices[current_tick - 1]):
current_tick += 1
position = Positions.Short
else:
while (current_tick <= self._end_tick and
self.prices[current_tick] >= self.prices[current_tick - 1]):
current_tick += 1
position = Positions.Long
current_price = self.prices[current_tick - 1]
last_trade_price = self.prices[last_trade_tick]
if self.unit_side == 'left':
if position == Positions.Short:
quantity = profit * (last_trade_price - self.trade_fee)
profit = quantity / current_price
elif self.unit_side == 'right':
if position == Positions.Long:
quantity = profit / last_trade_price
profit = quantity * (current_price - self.trade_fee)
last_trade_tick = current_tick - 1
return profit
|
py | 1a4c3293fd0d4ff60bafe2ae93bfd629ae8decde | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from lib.actions import OrionBaseAction
from lib.utils import send_user_error
class GetNodeCustomProperty(OrionBaseAction):
def run(self, node, custom_property):
"""
Gets a specific Node Custom Property.
"""
self.connect()
orion_node = self.get_node(node)
if not orion_node.npm:
msg = "Node ({}) does not exist".format(node)
send_user_error(msg)
raise ValueError(msg)
swql = """SELECT {1}
FROM Orion.NodesCustomProperties
WHERE NodeID={0}""".format(orion_node.npm_id, custom_property)
data = self.query(swql)
if 'results' not in data:
msg = "No results from Orion: {}".format(data)
self.logger.info(msg)
raise Exception(msg)
if len(data['results']) == 1:
results = data['results'][0]
return results.get(custom_property)
elif len(data['results']) >= 2:
self.logger.debug(
"Muliple Properties match '{}'".format(node))
raise ValueError("Muliple Properties match '{}'".format(node))
|
py | 1a4c32cc15fae16aed0b273b3d13f4fe79b498c6 | def selecao(a, b, c, d):
if (b > c) and (d > a) and ((c+d) > (a+b)) and (c > 0) and (d > 0) and (a % 2 == 0):
return print('Valores aceitos')
else:
return print('Valores nao aceitos')
def entrada():
valores = input().split(' ')
valor_a = int(valores[0])
valor_b = int(valores[1])
valor_c = int(valores[2])
valor_d = int(valores[3])
return valor_a, valor_b, valor_c, valor_d
n1, n2, n3, n4 = entrada()
selecao(n1, n2, n3, n4)
|
py | 1a4c32dc471223fd2f4e5f8d2ced736b594397ad | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
We undertake not to change the open source license (MIT license) applicable to the current version of
the project delivered to anyone in the future.
"""
from apps.feature_toggle.handlers.toggle import FeatureToggleObject
from apps.feature_toggle.plugins.constants import BKLOG_ES_CONFIG
def get_es_config(bk_biz_id):
feature_toggle = FeatureToggleObject.toggle(BKLOG_ES_CONFIG)
bklog_es_config = feature_toggle.feature_config
for biz_es_config in bklog_es_config.get("bk_biz_es_config", []):
if str(bk_biz_id) == str(biz_es_config.get("bk_biz_id", "")):
return biz_es_config["es_config"]
return bklog_es_config["global_es_config"]
|
py | 1a4c33d6bf63fefdd9d692c283cc3862b503ba7e | """Initialize synapsegenie"""
import logging
from .__version__ import __version__
logging.basicConfig(level=logging.INFO)
logging.getLogger(__name__)
logging.getLogger("keyring").setLevel(logging.WARNING)
|
py | 1a4c34036112438f970d7d3210764fe763e0cab0 | import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import *
from tensorflow.keras import regularizers
import numpy as np
#tf.enable_eager_execution() #added. so as to be able to use numpy arrays easily
def limit_mem():
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
tf.compat.v1.InteractiveSession(config=config)
class PeriodicPadding2D(tf.keras.layers.Layer):
def __init__(self, pad_width, **kwargs):
super().__init__(**kwargs)
self.pad_width = pad_width
def call(self, inputs, **kwargs):
if self.pad_width == 0:
return inputs
inputs_padded = tf.concat(
[inputs[:, :, -self.pad_width:, :], inputs, inputs[:, :, :self.pad_width, :]], axis=2)
# Zero padding in the lat direction
inputs_padded = tf.pad(inputs_padded, [[0, 0], [self.pad_width, self.pad_width], [0, 0], [0, 0]])
return inputs_padded
def get_config(self):
config = super().get_config()
config.update({'pad_width': self.pad_width})
return config
class ChannelReLU2D(tf.keras.layers.Layer):
def __init__(self, relu_idxs, **kwargs):
super().__init__(**kwargs)
self.relu_idxs = relu_idxs if type(relu_idxs) is list else [relu_idxs]
def call(self, inputs, **kwargs):
if inputs.shape[-1] == len(self.relu_idxs):
return tf.nn.relu(inputs)
else:
channels = [inputs[..., i] for i in range(inputs.shape[-1])]
for i, t in enumerate(channels):
if i in self.relu_idxs:
channels[i] = tf.nn.relu(t)
return tf.stack(channels, -1)
def get_config(self):
config = super().get_config()
config.update({'relu_idxs': self.relu_idxs})
return config
class PeriodicConv2D(tf.keras.layers.Layer):
def __init__(self, filters,
kernel_size,
conv_kwargs={},
**kwargs, ):
super().__init__(**kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.conv_kwargs = conv_kwargs
if type(kernel_size) is not int:
assert kernel_size[0] == kernel_size[1], 'PeriodicConv2D only works for square kernels'
kernel_size = kernel_size[0]
pad_width = (kernel_size - 1) // 2
self.padding = PeriodicPadding2D(pad_width)
self.conv = Conv2D(
filters, kernel_size, padding='valid', **conv_kwargs
)
def call(self, inputs):
return self.conv(self.padding(inputs))
def get_config(self):
config = super().get_config()
config.update({'filters': self.filters, 'kernel_size': self.kernel_size, 'conv_kwargs': self.conv_kwargs})
return config
class ChannelSlice(tf.keras.layers.Layer):
def __init__(self, n_out, **kwargs):
self.n_out = n_out
super().__init__(**kwargs)
def _slice(self, inputs):
# Input: [samples, lat, lon, filters]
return inputs[..., :self.n_out]
def __call__(self, inputs):
out = Lambda(self._slice)(inputs)
return out
def convblock(inputs, filters, kernel=3, stride=1, bn_position=None, l2=0,
use_bias=True, dropout=0, activation='relu'):
x = inputs
if bn_position == 'pre': x = BatchNormalization()(x)
x = PeriodicConv2D(
filters, kernel, conv_kwargs={
'kernel_regularizer': regularizers.l2(l2),
'use_bias': use_bias
}
)(x)
if bn_position == 'mid': x = BatchNormalization()(x)
x = LeakyReLU()(x) if activation == 'leakyrelu' else Activation(activation)(x)
if bn_position == 'post': x = BatchNormalization()(x)
if dropout > 0: x = Dropout(dropout)(x)
return x
def resblock(inputs, filters, kernel, bn_position=None, l2=0, use_bias=True,
dropout=0, skip=True, activation='relu', down=False, up=False):
x = inputs
if down:
x = MaxPooling2D()(x)
for i in range(2):
x = convblock(
x, filters, kernel, bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, activation=activation
)
if down or up:
inputs = PeriodicConv2D(
filters, kernel, conv_kwargs={
'kernel_regularizer': regularizers.l2(l2),
'use_bias': use_bias,
'strides': 2 if down else 1
}
)(inputs)
if skip: x = Add()([inputs, x])
return x
def build_uresnet(filters, kernels, unres, input_shape, bn_position=None, use_bias=True, l2=0,
skip=True, dropout=0, activation='relu', **kwargs):
"""
filters
0: init Conv2D
1: first and last resblock
[2:-1]: all down layers
-1: last conv2d
nres has to have len(filters) - 2
"""
if len(unres) == 1: nres = [unres]*(len(filters)-2)
x = input = Input(shape=input_shape)
# First conv block to get up to shape
x = convblock(
x, filters[0], kernels[0], bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, activation=activation
)
# Resblocks
for _ in range(unres[0]):
x = resblock(x, filters[1], kernels[1], bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, skip=skip, activation=activation)
connections = []
for f, k, nr in zip(filters[2:-1], kernels[2:-1], unres[1:]):
connections.append(x)
for i in range(nr):
x = resblock(x, f, k, bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, skip=skip, activation=activation, down=i == 0)
for c, f, k, nr in zip(connections[::-1], filters[1:-2][::-1], kernels[1:-2][::-1], unres[:-1][::-1]):
x = UpSampling2D()(x)
x = Concatenate()([c, x])
for i in range(nr):
x = resblock(x, f, k, bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, skip=skip, activation=activation, up=i == 0)
# Final convolution
output = PeriodicConv2D(
filters[-1], kernels[-1],
conv_kwargs={'kernel_regularizer': regularizers.l2(l2)},
)(x)
output = Activation('linear', dtype='float32')(output)
return keras.models.Model(input, output)
def build_resnet(filters, kernels, input_shape, bn_position=None, use_bias=True, l2=0,
skip=True, dropout=0, activation='relu', long_skip=False, relu_idxs=None,
categorical=False, nvars=None,
**kwargs):
x = input = Input(shape=input_shape)
# First conv block to get up to shape
x = ls = convblock(
x, filters[0], kernels[0], bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, activation=activation
)
# Resblocks
for f, k in zip(filters[1:-1], kernels[1:-1]):
x = resblock(x, f, k, bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, skip=skip, activation=activation)
if long_skip:
x = Add()([x, ls])
# Final convolution
output = PeriodicConv2D(
filters[-1], kernels[-1],
conv_kwargs={'kernel_regularizer': regularizers.l2(l2)},
)(x)
if not relu_idxs is None:
output = ChannelReLU2D(relu_idxs)(output)
if categorical:
bins = int(filters[-1] / nvars)
outputs = []
for i in range(nvars):
o = Softmax()(output[..., i*bins:(i+1)*bins])
outputs.append(o)
output = tf.stack(outputs, axis=3)
output = Activation('linear', dtype='float32')(output)
return keras.models.Model(input, output)
def build_unet(input_shape, n_layers, filters_start, channels_out, kernel=3, u_skip=True,
res_skip=True, l2=0, bn_position=None, dropout=0):
"https://github.com/Nishanksingla/UNet-with-ResBlock/blob/master/resnet34_unet_model.py"
x = input = Input(shape=input_shape)
filters = filters_start
# Down
down_layers = []
for i in range(n_layers):
# Resblock
x_res = PeriodicConv2D(
filters, 1, conv_kwargs={
'use_bias': False, 'kernel_regularizer': regularizers.l2(l2)})(x)
x = convblock(x, filters, kernel, bn_position=bn_position, l2=l2, dropout=dropout)
x = convblock(x, filters, kernel, bn_position=bn_position, l2=l2, activation='linear',
dropout=dropout)
if res_skip: x = Add()([x, x_res])
x = ReLU()(x)
if not i == n_layers - 1:
down_layers.append(x)
# Downsampling
x = MaxPooling2D()(x)
filters *= 2
# Up
for dl in reversed(down_layers):
filters //= 2
# Upsample
x = UpSampling2D()(x)
x = PeriodicConv2D(filters, 3, conv_kwargs={'kernel_regularizer': regularizers.l2(l2)})(x)
x = ReLU()(x)
# Concatenate
if u_skip:
x = Concatenate()([x, dl])
# Resblock
x_res = PeriodicConv2D(filters, 1, conv_kwargs={'use_bias': False})(x)
x = convblock(x, filters, kernel, bn_position=bn_position, l2=l2, dropout=dropout)
x = convblock(x, filters, kernel, bn_position=bn_position, l2=l2, activation='linear',
dropout=dropout)
if res_skip: x = Add()([x, x_res])
x = ReLU()(x)
x = PeriodicConv2D(channels_out, 1, conv_kwargs={'kernel_regularizer': regularizers.l2(l2)})(x)
return keras.models.Model(input, x)
def create_lat_mse(lat):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def lat_mse(y_true, y_pred):
error = y_true - y_pred
mse = error**2 * weights_lat[None, : , None, None]
return mse
return lat_mse
def create_lat_mae(lat):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def lat_mae(y_true, y_pred):
error = y_true - y_pred
mae = tf.abs(error) * weights_lat[None, : , None, None]
return mae
return lat_mae
def create_lat_rmse(lat):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def lat_rmse(y_true, y_pred):
error = y_true - y_pred
mse = error**2 * weights_lat[None, : , None, None]
return tf.math.sqrt(tf.math.reduce_mean(mse, axis=(1, 2, 3)))
return lat_rmse
def create_lat_crps(lat, n_vars, relu=False):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def crps_loss(y_true, y_pred):
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
# To stop sigma from becoming negative we first have to
# convert it the the variance and then take the square
# root again.
if relu:
sigma = tf.nn.relu(sigma)
else:
sigma = tf.math.sqrt(tf.math.square(sigma))
# The following three variables are just for convenience
loc = (y_true - mu) / tf.maximum(1e-7, sigma)
phi = 1.0 / np.sqrt(2.0 * np.pi) * tf.math.exp(-tf.math.square(loc) / 2.0)
Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))
# First we will compute the crps for each input/target pair
crps = sigma * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))
crps = crps * weights_lat[None, : , None, None]
# Then we take the mean. The cost is now a scalar
return tf.reduce_mean(crps)
return crps_loss
def gev_cdf_tf(y, mu, sigma, xi):
y = (y-mu)/sigma
x = 1 + xi * y
# x[x < 0] = 0
x = tf.where(x<0, 0., x)
x = x**(-1/xi)
return tf.where(tf.math.is_inf(tf.exp(-x)), 0., tf.exp(-x))
def crps_lcgev_tf(y, mu, sigma, xi, dtype='float32'):
SCdSH = sigma/xi
Gam1mSH = tf.exp(tf.math.lgamma(1-xi))
# print(Gam1mSH)
probY = gev_cdf_tf(y, mu, sigma, xi)
prob0 = gev_cdf_tf(tf.constant(0., dtype), mu, sigma, xi)
igammaY = tf.cast(tf.math.igamma(1-tf.cast(xi, 'float64'), -tf.math.log(tf.cast(probY, 'float64'))), dtype)
igamma0 = tf.cast(tf.math.igamma(1-tf.cast(xi, 'float64'), -2 * tf.math.log(tf.cast(prob0, 'float64'))), dtype)
T1 = (y-mu) * (2*probY-1) + mu * prob0**2
T2 = SCdSH * ( 1-prob0**2 - 2**xi*Gam1mSH*igamma0)
T3 = -2*SCdSH * ( 1-probY - Gam1mSH*igammaY)
# print(T1, T2, T3)
return T1 + T2 + T3
def create_lat_crps_lcgev(lat, n_vars):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def crps_lcgev_loss(y_true, y_pred):
mu, sigma, xi = y_pred[..., 0], y_pred[..., 1], y_pred[..., 2]
sigma = tf.nn.relu(sigma)
# Make sure xi isn't 0
# eps = 1e-7
# xi = tf.where(tf.abs(xi)<eps, eps, xi)
# # Keep xi in bounds
xi = tf.clip_by_value(xi, -0.278, 0.999)
# import pdb
# pdb.set_trace()
return crps_lcgev_tf(y_true[..., 0], mu, sigma, xi) * weights_lat[None, : , None]
return crps_lcgev_loss
def create_lat_crps_mae(lat, n_vars, beta=1.):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def crps_mae(y_true, y_pred):
### CRPS
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
# To stop sigma from becoming negative we first have to
# convert it the the variance and then take the square
# root again.
sigma = tf.nn.relu(sigma)
# The following three variables are just for convenience
loc = (y_true - mu) / tf.maximum(1e-7, sigma)
phi = 1.0 / np.sqrt(2.0 * np.pi) * tf.math.exp(-tf.math.square(loc) / 2.0)
Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))
# First we will compute the crps for each input/target pair
crps = sigma * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))
crps = crps * weights_lat[None, : , None, None]
# Then we take the mean. The cost is now a scalar
crps = tf.reduce_mean(crps)
### MAE
error = y_true - mu
mae = tf.abs(error) * weights_lat[None, :, None, None]
mae = tf.reduce_mean(mae)
return crps + beta * mae
return crps_mae
def create_lat_log_loss(lat, n_vars):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def log_loss(y_true, y_pred):
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
sigma = tf.nn.relu(sigma)
# Compute PDF
eps = 1e-7
sigma = tf.maximum(eps, sigma)
prob = 1 / sigma / np.sqrt(2 * np.pi) * tf.math.exp(
-0.5 * ((y_true - mu) / sigma) ** 2
)
# Compute log loss
ll = - tf.math.log(tf.maximum(prob, eps))
ll = ll * weights_lat[None, :, None, None]
return tf.reduce_mean(ll)
return log_loss
def create_lat_categorical_loss(lat, n_vars):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def categorical_loss(y_true, y_pred):
cce = tf.keras.losses.categorical_crossentropy
loss = 0
for i in range(n_vars):
loss += cce(y_true[:,:,:,i,:], y_pred[:,:,:,i,:])*weights_lat[None, :, None]
return loss
return categorical_loss
# Agrawal et al version
def basic_block(x, filters, dropout):
shortcut = x
x = PeriodicConv2D(filters, kernel_size=3)(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = PeriodicConv2D(filters, kernel_size=3)(x)
if dropout > 0: x = Dropout(dropout)(x)
shortcut = PeriodicConv2D(filters, kernel_size=3)(shortcut)
return Add()([x, shortcut])
def downsample_block(x, filters, dropout):
shortcut = x
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = MaxPooling2D()(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = PeriodicConv2D(filters, kernel_size=3)(x)
if dropout > 0: x = Dropout(dropout)(x)
shortcut = PeriodicConv2D(filters, kernel_size=3, conv_kwargs={'strides': 2})(shortcut)
return Add()([x, shortcut])
def upsample_block(x, from_down, filters, dropout):
x = Concatenate()([x, from_down])
x = UpSampling2D()(x)
shortcut = x
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = PeriodicConv2D(filters, kernel_size=3)(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = PeriodicConv2D(filters, kernel_size=3)(x)
if dropout > 0: x = Dropout(dropout)(x)
shortcut = PeriodicConv2D(filters, kernel_size=3)(shortcut)
return Add()([x, shortcut])
def build_unet_google(filters, input_shape, output_channels, dropout=0):
inputs = x = Input(input_shape)
x = basic_block(x, filters[0], dropout=dropout)
# Encoder
from_down = []
for f in filters[:-1]:
x = downsample_block(x, f, dropout=dropout)
from_down.append(x)
# Bottleneck
x = basic_block(x, filters[-1], dropout=dropout)
# Decoder
for f, d in zip(filters[:-1][::-1], from_down[::-1]):
x = upsample_block(x, d, f, dropout=dropout)
# Final
outputs = PeriodicConv2D(output_channels, kernel_size=1)(x)
return keras.models.Model(inputs, outputs)
###
def create_multi_dt_model(model, multi_dt, dg_train):
const_inp = Input((len(dg_train.data.lat), len(dg_train.data.lon), len(dg_train.const_idxs)))
x = inp = Input((len(dg_train.data.lat), len(dg_train.data.lon), len(dg_train.not_const_idxs)))
outputs = []
for _ in range(multi_dt):
x = model(Concatenate()([x, const_inp]))
outputs.append(x)
model2 = keras.models.Model([inp, const_inp], outputs)
return model2
|
py | 1a4c345547f9ca5cb260510190db19bb6729e46f | import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
# from sklearn.utils import check_random_state
# n = 100
# x = np.arange(n)
# rs = check_random_state(0)
# y = rs.randint(-50, 50, size=(n,)) + 50. * np.log1p(np.arange(n))
# x_train = np.array(x.reshape(-1, 1), dtype=np.float32)
# y_train = np.array(y.reshape(-1, 1), dtype=np.float32)
# numpy array to tensor
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
# define a linear regression model
class linear_regression(nn.Module):
def __init__(self):
super(linear_regression, self).__init__()
self.linear = nn.Linear(1, 1) # input and output is 1 dimension
def forward(self, x):
out = self.linear(x)
return out
model = linear_regression()
# define loss and optimization function
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
# start training
num_epochs = 1000
epoch = 0
loss = float('inf')
for epoch in range(1, 21):
inputs = x_train
target = y_train
# forward
out = model(inputs)
loss = criterion(out, target)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 20 == 0:
print(f'Epoch[{epoch}/{num_epochs}], loss: {loss.item(): .6f}')
# eval mode - prevent batchnorm and dropout operations
model.eval()
with torch.no_grad():
predictions = model(x_train)
predictions = predictions.data.numpy()
fig = plt.figure(figsize=(10, 5))
plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data') # ro for red circles
plt.plot(x_train.numpy(), predictions, 'o-', color='#1f77b4', label='Fitting Line')
# show diagram
plt.legend()
plt.show()
# # save the model to file
PATH = './linear.pth'
# model parameters only
# torch.save(model.state_dict(), PATH)
# parameters needed for resuming training
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss}, PATH)
# load the model and continuously train for another 999 epochs
model = linear_regression()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
checkpoint = torch.load(PATH)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
print(f'last epoch is {epoch}, loss is {loss}')
criterion = nn.MSELoss()
# training mode
model.train()
# eval mode - prevent batchnorm and dropout operations
# model.eval()
for epoch in range(epoch+1, num_epochs+1):
inputs = x_train
target = y_train
# forward
out = model(inputs)
loss = criterion(out, target)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 20 == 0:
print(f'Epoch[{epoch}/{num_epochs}], loss: {loss.item(): .6f}')
# eval mode - prevent batchnorm and dropout operations
model.eval()
with torch.no_grad():
predictions = model(x_train)
predictions = predictions.data.numpy()
fig = plt.figure(figsize=(10, 5))
plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data') # ro for red circles
plt.plot(x_train.numpy(), predictions, 'o-', color='#1f77b4', label='Fitting Line')
plt.legend()
plt.show()
# -------------------------------------------------
# model save load another way (entire model) -- not recommand
# torch.save(model, './entire_model.pth')
# model = torch.load('./entire_model.pth')
|
py | 1a4c346cb48d400b8f64605874175ee2fd878648 | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Sends a post-release email
"""
from __future__ import print_function
from rez.release_hook import ReleaseHook
from rez.system import system
from email.mime.text import MIMEText
from rez.utils.logging_ import print_warning, print_error
from rez.utils.yaml import load_yaml
from rez.utils.scope import scoped_formatter
from rez.vendor.schema.schema import Or
from rez.vendor.six import six
import os.path
import smtplib
basestring = six.string_types[0]
class EmailReleaseHook(ReleaseHook):
schema_dict = {
"subject": basestring,
"body": basestring,
"smtp_host": basestring,
"smtp_port": int,
"sender": basestring,
"recipients": Or(basestring, [basestring])
}
@classmethod
def name(cls):
return "emailer"
def __init__(self, source_path):
super(EmailReleaseHook, self).__init__(source_path)
def post_release(self, user, install_path, variants, release_message=None,
changelog=None, previous_version=None, **kwargs):
if not variants:
return # nothing was released
# construct email body
release_dict = dict(path=install_path,
previous_version=previous_version or "None.",
message=release_message or "No release message.",
changelog=changelog or "No changelog.")
paths_str = '\n'.join(x.root for x in variants)
variants_dict = dict(count=len(variants),
paths=paths_str)
formatter = scoped_formatter(release=release_dict,
variants=variants_dict,
system=system,
package=self.package)
body = formatter.format(self.settings.body)
body = body.strip()
body = body.replace("\n\n\n", "\n\n")
# construct subject line, send email
subject = formatter.format(self.settings.subject)
self.send_email(subject, body)
def send_email(self, subject, body):
if not self.settings.recipients:
return # nothing to do, sending email to nobody
if not self.settings.smtp_host:
print_warning("did not send release email: "
"SMTP host is not specified")
return
recipients = self.get_recipients()
if not recipients:
return
print("Sending release email to:")
print('\n'.join("- %s" % x for x in recipients))
msg = MIMEText(body)
msg["Subject"] = subject
msg["From"] = self.settings.sender
msg["To"] = str(',').join(recipients)
try:
s = smtplib.SMTP(self.settings.smtp_host, self.settings.smtp_port)
s.sendmail(from_addr=self.settings.sender,
to_addrs=recipients,
msg=msg.as_string())
print('Email(s) sent.')
except Exception as e:
print_error("release email delivery failed: %s" % str(e))
def get_recipients(self):
value = self.settings.recipients
if isinstance(value, list):
return value
if os.path.exists(value):
filepath = value
try:
return self.load_recipients(filepath)
except Exception as e:
print_error("failed to load recipients config: %s. Emails "
"not sent" % str(e))
elif '@' in value:
return [value] # assume it's an email address
else:
print_error("email recipient file does not exist: %s. Emails not "
"sent" % value)
return []
def load_recipients(self, filepath):
def test(value, type_):
if not isinstance(value, type_):
raise TypeError("Expected %s, not %s" % type_, value)
return value
conf = load_yaml(filepath)
recipients = set()
for rule in test(conf.get("rules", []), list):
filters = rule.get("filters")
match = True
if filters:
for attr, test_value in test(filters, dict).items():
missing = object()
value = getattr(self.package, attr, missing)
if value is missing:
match = False
elif test_value is None:
match = True
elif isinstance(test_value, list):
match = (value in test_value)
else:
match = (value == test_value)
if not match:
break
if match:
rule_recipients = rule.get("recipients")
recipients.update(test(rule_recipients, list))
return sorted(recipients)
def register_plugin():
return EmailReleaseHook
|
py | 1a4c350a0d388a50f18409b76e3bbf7e8de2c10c | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Sparse related transformation.
"""
from .sparse import SparseToDense
__all__ = [
"SparseToDense",
]
|
py | 1a4c35de3a2bb5f8030686b6d9813e2ab1be29df | #coding:utf-8
'''
filename:relationship_of_point_circle.py
chap:6
subject:8
conditions:Point(),Circle()
solution:relationship between circle and point
'''
from circle import Circle
from point import Point
import math
class Relationship:
def __init__(self,circle:Circle,point:Point):
self.c=circle
self.p=point
def get_relation(self):
distance = abs(self.c.center - self.p)
rst = ''
if distance < self.c.r:
rst = 'in'
elif math.isclose(distance,self.c.r):
rst = 'on'
elif distance > self.c.r:
rst = 'outside'
return 'The {!r} is {} the {!r}.'.format(self.p,rst,self.c)
def __str__(self):
return self.get_relation()
if __name__ == '__main__':
p1 = Point(0,0)
p2 = Point(1,0)
p3 = Point(1,1)
c = Circle(1,p2)
print(Relationship(c,p1))
print(Relationship(c,p2))
print(Relationship(c,p3))
|
py | 1a4c36a9e51183950a38b1e172bb3a5872a1eef5 | #!/usr/bin/python
"""
(C) Copyright 2018-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
import traceback
from daos_utils_base import DaosCommandBase
class DaosCommand(DaosCommandBase):
# pylint: disable=too-many-ancestors,too-many-public-methods
"""Defines a object representing a daos command."""
METHOD_REGEX = {
"run": r"(.*)",
"container_query":
r"Pool UUID:\s+([0-9a-f-]+)\n" +
r"Container UUID:\s+([0-9a-f-]+)\n" +
r"Number of snapshots:\s+(\d+)\n" +
r"Latest Persistent Snapshot:\s+(\d+)\n" +
r"Highest Aggregated Epoch:\s+(\d+)",
}
def pool_query(self, pool, sys_name=None, sys=None):
"""Query a pool.
Args:
pool (str): pool UUID
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
sys (str, optional): [description]. Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos pool query command fails.
"""
return self._get_json_result(
("pool", "query"), pool=pool, sys_name=sys_name, sys=sys)
def pool_autotest(self, pool):
"""Runs autotest for pool
Args:
pool (str): pool UUID
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool autotest command fails.
"""
return self._get_result(
("pool", "autotest"), pool=pool)
def container_create(self, pool, sys_name=None, cont=None,
path=None, cont_type=None, oclass=None,
chunk_size=None, properties=None, acl_file=None):
# pylint: disable=too-many-arguments
"""Create a container.
Args:
pool (str): UUID of the pool in which to create the container
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
cont (str, optional): container UUID. Defaults to None.
path (str, optional): container namespace path. Defaults to None.
cont_type (str, optional): the type of container to create. Defaults
to None.
oclass (str, optional): object class. Defaults to None.
chunk_size (str, optional): chunk size of files created. Supports
suffixes: K (KB), M (MB), G (GB), T (TB), P (PB), E (EB).
Defaults to None.
properties (str, optional): String of comma-separated <name>:<value>
pairs defining the container properties. Defaults to None
acl_file (str, optional): ACL file. Defaults to None.
Returns:
dict: the daos json command output converted to a python dictionary
Raises:
CommandFailure: if the daos container create command fails.
"""
return self._get_json_result(
("container", "create"), pool=pool, sys_name=sys_name,
cont=cont, path=path, type=cont_type, oclass=oclass,
chunk_size=chunk_size, properties=properties, acl_file=acl_file)
def container_clone(self, src, dst):
"""Clone a container to a new container.
Args:
src (str): the source, formatted as daos://<pool>/<cont>
dst (str): the destination, formatted as daos://<pool>/<cont>
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container clone command fails.
"""
return self._get_result(
("container", "clone"), src=src, dst=dst)
def container_destroy(self, pool, cont, force=None, sys_name=None):
"""Destroy a container.
Args:
pool (str): UUID of the pool in which to create the container
cont (str): container UUID.
force (bool, optional): Force the container destroy. Defaults to
None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container destroy command fails.
"""
return self._get_result(
("container", "destroy"), pool=pool, sys_name=sys_name,
cont=cont, force=force)
def container_check(self, pool, cont, sys_name=None, path=None):
"""Check the integrity of container objects.
Args:
pool (str): UUID of the pool in which to create the container
cont (str): container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
path (str): Container namespace path. Defaults to None
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container check command fails.
"""
return self._get_result(
("container", "check"), pool=pool, cont=cont,
sys_name=sys_name, path=path)
def container_get_acl(self, pool, cont,
verbose=False, outfile=None):
"""Get the ACL for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
verbose (bool, optional): Verbose mode.
outfile (str, optional): Write ACL to file.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-acl command fails.
"""
return self._get_result(
("container", "get-acl"), pool=pool, cont=cont,
verbose=verbose, outfile=outfile)
def container_delete_acl(self, pool, cont, principal):
"""Delete an entry for a given principal in an existing container ACL.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
principal (str): principal portion of the ACL.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container delete-acl command fails.
"""
return self._get_result(
("container", "delete-acl"), pool=pool, cont=cont,
principal=principal)
def container_overwrite_acl(self, pool, cont, acl_file):
"""Overwrite the ACL for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
acl_file (str): input file containing ACL
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container overwrite-acl command fails.
"""
return self._get_result(
("container", "overwrite-acl"), pool=pool, cont=cont,
acl_file=acl_file)
def container_update_acl(self, pool, cont, entry=None, acl_file=None):
"""Add or update the ACL entries for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
entry (bool, optional): Add or modify a single ACL entry
acl_file (str, optional): Input file containing ACL
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-acl command fails.
"""
return self._get_result(
("container", "update-acl"), pool=pool, cont=cont,
entry=entry, acl_file=acl_file)
def container_list(self, pool, sys_name=None):
"""List containers in the given pool.
Args:
pool (str): Pool label or UUID
sys_name (str, optional): System name. Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos container list command fails.
"""
# Sample output.
# {
# "response": [
# {
# "UUID": "bad80a98-aabd-498c-b001-6547cd061c8c",
# "Label": "container_label_not_set"
# },
# {
# "UUID": "dd9fc365-5729-4736-9d34-e46504a4a92d",
# "Label": "mkc1"
# }
# ],
# "error": null,
# "status": 0
# }
return self._get_json_result(
("container", "list"), pool=pool, sys_name=sys_name)
def pool_set_attr(self, pool, attr, value, sys_name=None):
"""Set pool attribute.
Args:
pool (str): Pool UUID.
attr (str): Attribute name.
value (str): Attribute value.
sys_name (str): DAOS system name. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool set-attr command fails.
"""
return self._get_result(
("pool", "set-attr"), pool=pool, attr=attr, value=value,
sys_name=sys_name)
def pool_get_attr(self, pool, attr, sys_name=None):
"""Set pool attribute.
Args:
pool (str): Pool UUID.
attr (str): Pool UUID.
sys_name (str): DAOS system name. Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos pool query command fails.
"""
return self._get_json_result(
("pool", "get-attr"), pool=pool, attr=attr, sys_name=sys_name)
def pool_list_attrs(self, pool, sys_name=None, verbose=False):
"""List pool attributes.
Args:
pool (str): Pool UUID.
sys_name (str): DAOS system name. Defaults to None.
verbose (bool): False - name only. True - name and value. Defaults
to False.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos pool list-attrs command fails.
"""
return self._get_json_result(
("pool", "list-attrs"), pool=pool, sys_name=sys_name,
verbose=verbose)
def container_query(self, pool, cont, sys_name=None):
"""Query a container.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos container query command fails.
"""
return self._get_json_result(
("container", "query"), pool=pool, cont=cont, sys_name=sys_name)
def container_set_prop(self, pool, cont, prop, value):
"""Call daos container set-prop.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
prop (str): Container property-name.
value (str): Container property-name value to set.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-prop command fails.
"""
prop_value = ":".join([prop, value])
return self._get_result(
("container", "set-prop"),
pool=pool, cont=cont, prop=prop_value)
def container_get_prop(self, pool, cont):
"""Call daos container get-prop.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-prop command fails.
"""
return self._get_result(
("container", "get-prop"), pool=pool, cont=cont)
def container_set_owner(self, pool, cont, user, group):
"""Call daos container set-owner.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
user (str): New-user who will own the container.
group (str): New-group who will own the container.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-owner command fails.
"""
return self._get_result(
("container", "set-owner"),
pool=pool, cont=cont, user=user, group=group)
def container_set_attr(
self, pool, cont, attr, val, sys_name=None):
"""Call daos container set-attr.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
attr (str): Attribute name.
val (str): Attribute value.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-attr command fails.
"""
return self._get_result(
("container", "set-attr"), pool=pool, cont=cont,
sys_name=sys_name, attr=attr, value=val)
def container_get_attr(self, pool, cont, attr, sys_name=None):
"""Call daos container get-attr.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
attr (str): Attribute name.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: the daos json command output converted to a python dictionary
Raises:
CommandFailure: if the daos get-attr command fails.
"""
return self._get_json_result(
("container", "get-attr"), pool=pool, cont=cont, attr=attr, sys_name=sys_name)
def container_list_attrs(self, pool, cont, sys_name=None, verbose=False):
"""Call daos container list-attrs.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
verbose (bool, optional): True - fetch values of all attributes.
Returns:
dict: the daos json command output converted to a python dictionary
Raises:
CommandFailure: if the daos container list-attrs command fails.
"""
return self._get_json_result(
("container", "list-attrs"), pool=pool, cont=cont, sys_name=sys_name,
verbose=verbose)
def container_create_snap(self, pool, cont, snap_name=None, epoch=None,
sys_name=None):
"""Call daos container create-snap.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
snap_name (str, optional): Snapshot name. Defaults to None.
epoch (str, optional): Epoch number. Defaults to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: Dictionary that stores the created epoch in the key "epoch".
Raises:
CommandFailure: if the daos container create-snap command fails.
"""
self._get_result(
("container", "create-snap"), pool=pool, cont=cont,
sys_name=sys_name, snap=snap_name, epc=epoch)
# Sample create-snap output.
# snapshot/epoch 0x51e719907180000 has been created
data = {}
match = re.findall(r"[A-Za-z\/]+\s(0x[0-9a-fA-F]+)\s[a-z\s]+", self.result.stdout_text)
if match:
data["epoch"] = match[0]
return data
def container_destroy_snap(self, pool, cont, snap_name=None, epc=None,
sys_name=None, epcrange=None):
"""Call daos container destroy-snap.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
snap_name (str, optional): Snapshot name. Defaults to None.
epc (str, optional): Epoch value of the snapshot to be destroyed.
Defaults to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
epcrange (str, optional): Epoch range in the format "<start>-<end>".
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container destroy-snap command fails.
"""
kwargs = {
"pool": pool,
"cont": cont,
"sys_name": sys_name,
"snap": snap_name,
"epc": epc,
"epcrange": epcrange
}
return self._get_result(("container", "destroy-snap"), **kwargs)
def container_list_snaps(self, pool, cont):
"""List snapshot in a container.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
Returns:
dict: Dictionary that contains epoch values in key "epochs". Value
is a list of string.
"""
self._get_result(
("container", "list-snaps"), pool=pool, cont=cont)
# Sample container list-snaps output.
# Container's snapshots :
# 0x51ebe2f21500000
# 0x51ebe4f5b6c0000
# 0x51ebe5233780000
data = {}
match = re.findall(r"(0x[0-9a-fA-F]+)", self.result.stdout_text)
if match:
data["epochs"] = match
return data
def object_query(self, pool, cont, oid, sys_name=None):
"""Call daos object query and return its output with a dictionary.
Args:
pool (str): Pool UUID
cont (str): Container UUID
oid (str): oid hi lo value in the format <hi>.<lo>
sys_name (str, optional): System name. Defaults to None.
Returns:
dict: cmd output
oid: (oid.hi, oid.lo)
ver: num
grp_nr: num
layout: [{grp: num, replica: [(n0, n1), (n2, n3)...]}, ...]
Each row of replica nums is a tuple and stored top->bottom.
Raises:
CommandFailure: if the daos object query command fails.
"""
self._get_result(
("object", "query"), pool=pool, cont=cont,
oid=oid, sys_name=sys_name)
# Sample daos object query output.
# oid: 1152922453794619396.1 ver 0 grp_nr: 2
# grp: 0
# replica 0 1
# replica 1 0
# grp: 1
# replica 0 0
# replica 1 1
data = {}
vals = re.findall(
r"oid:\s+([\d.]+)\s+ver\s+(\d+)\s+grp_nr:\s+(\d+)|"\
r"grp:\s+(\d+)\s+|"\
r"replica\s+(\d+)\s+(\d+)\s*", self.result.stdout_text)
try:
oid_vals = vals[0][0]
oid_list = oid_vals.split(".")
oid_hi = oid_list[0]
oid_lo = oid_list[1]
data["oid"] = (oid_hi, oid_lo)
data["ver"] = vals[0][1]
data["grp_nr"] = vals[0][2]
data["layout"] = []
for i in range(1, len(vals)):
if vals[i][3] == "":
if "replica" in data["layout"][-1]:
data["layout"][-1]["replica"].append(
(vals[i][4], vals[i][5]))
else:
data["layout"][-1]["replica"] = [(
vals[i][4], vals[i][5])]
else:
data["layout"].append({"grp": vals[i][3]})
except IndexError:
traceback.print_exc()
self.log.error("--- re.findall output ---")
self.log.error(vals)
return data
def filesystem_copy(self, src, dst, preserve_props=None):
"""Copy a POSIX container or path to another POSIX container or path.
Args:
src (str): The source, formatted as
daos:<pool>/<cont>/<path> or posix:<path>
dst (str): The destination, formatted as
daos:<pool>/<cont>/<path> or posix:<path>
preserve_props (str): The filename to read or write container properties
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos filesystem copy command fails.
"""
return self._get_result(
("filesystem", "copy"), src=src, dst=dst, preserve_props=preserve_props)
def version(self):
"""Call daos version.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage query command fails.
"""
return self._get_result(["version"])
|
py | 1a4c37351de622536d261889f482c02f6013fa77 | # -*- coding: utf-8 -*-
from app.constants import S_OK, S_ERR
import random
import math
import base64
import time
import ujson as json
from wand.image import Image
from StringIO import StringIO
from app.constants import *
from app import cfg
from app import util
_CONTENT_TYPE_POSTFIX_MAP = {
'image/jpeg': 'jpg',
'image/jpg': 'jpg',
'image/gif': 'gif',
'image/png': 'png',
'application/pdf': 'pdf',
'application/x-pdf': 'pdf',
}
_IMG_TYPES = ['png', 'jpg', 'gif']
_IMG_TYPE_MAP = {
'jpg': 'jpeg',
}
def p_img_handler(data, content_type, idx):
idx = util._int(idx)
postfix = _parse_postfix(content_type)
result = _save_img(data, postfix, content_type)
result['the_idx'] = idx
return result
def _save_img(data, postfix, content_type):
the_timestamp = util.get_timestamp()
the_datetime = util.timestamp_to_datetime(the_timestamp)
the_id = str(the_timestamp) + "_" + util.uuid()
filename = the_id + '.' + postfix
the_dir = '/data/img/bee/' + the_datetime.strftime('%Y-%m-%d')
util.makedirs(the_dir)
with open(the_dir + '/' + filename, 'w') as f:
f.write(data)
(the_thumbnail, thumbnail_postfix) = _make_thumbnail(data, postfix)
the_dir = '/data/thumbnail/bee/' + the_datetime.strftime('%Y-%m-%d')
util.makedirs(the_dir)
thumbnail_filename = the_id + '.' + thumbnail_postfix
with open(the_dir + '/' + thumbnail_filename, 'w') as f:
f.write(the_thumbnail)
db_data = {"filename": the_datetime.strftime('%Y-%m-%d/') + filename, "thumbnail_filename": the_datetime.strftime("%Y-%m-%d/") + thumbnail_filename, "the_id": the_id, 'content_type': content_type, 'save_time': the_timestamp}
util.db_insert('bee_img', [db_data])
if '_id' in db_data:
del db_data['_id']
return db_data
def _parse_postfix(content_type):
return _CONTENT_TYPE_POSTFIX_MAP.get(content_type.lower(), 'unknown')
def _make_thumbnail(data, postfix):
postfix = 'png'
converted_data = ''
try:
with Image(blob=data) as img:
(width, height) = img.size
(resized_width, resized_height) = _parse_resize(width, height)
img.resize(resized_width, resized_height)
converted_data = img.make_blob(_IMG_TYPE_MAP.get(postfix, postfix))
except Exception as e:
logging.exception('unable to _make_thumbnail: postfix: %s e: %s', postfix, e)
converted_data = data
return (converted_data, postfix)
def _parse_resize(width, height):
max_size = max(width, height)
the_ratio = float(RESIZE_SIZE) / max_size
resize_width = int(width * the_ratio)
resize_height = int(height * the_ratio)
cfg.logger.debug('width: %s height: %s resize_width: %s resize_height: %s', width, height, resize_width, resize_height)
return (resize_width, resize_height)
|
py | 1a4c37fdf2c284632939733647d82c9aa27b18b5 | """Hive API: Post list methods"""
import logging
from hive.server.hive_api.common import (
get_account_id, valid_sort, url_to_id, valid_limit)
from hive.server.hive_api.objects import posts_by_id
log = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
async def list_community_posts(context, community, sort='trending',
start='', limit=10, observer=None):
"""Paginated list of posts in a community. Includes pinned posts at the beginning.
Observer: includes vote/reblog status on each post.
Community:
- `all`: renders site default
- `my`: render's observer's subs
- (blank): show global trending
- (string): show community trending
"""
db = context['db']
pinned_ids = []
if not community:
# global trending: prefix home posts
communities = []
#if not start: pinned_ids = _pinned(db, DEFAULT_COMMUNITY)
elif community[0] == '#':
# feed for specific tag
communities = [community[1:]]
elif community[0] == '@':
# user's subscribed communities feed
communities = await _subscribed(db, community[1:])
#if not start: pinned_ids = _pinned(db, DEFAULT_COMMUNITY)
else:
# specific community feed
communities = [community]
if not start: pinned_ids = await _pinned(db, community)
post_ids = await ranked_pids(db,
sort=valid_sort(sort),
start_id=await url_to_id(db, start) if start else None,
limit=valid_limit(limit, 50),
communities=communities)
# TODO: fetch account role/title, include in response
# NOTE: consider including & interspercing promoted posts here
posts = await posts_by_id(db, pinned_ids + post_ids, observer=observer)
# Add `pinned` flag to all pinned
for pinned_id in pinned_ids:
posts[pinned_id]['is_pinned'] = True
return posts
async def _subscribed(db, account):
sql = """SELECT c.name FROM hive_communities c
JOIN hive_subscriptions s
ON c.id = s.community_id
WHERE s.account_id = :account_id"""
return await db.query_col(sql, account_id=await get_account_id(db, account))
async def _pinned(db, community):
"""Get a list of pinned post `id`s in `community`."""
sql = """SELECT id FROM hive_posts
WHERE is_pinned = '1'
AND is_deleted = '0'
AND community = :community
ORDER BY id DESC"""
return await db.query_col(sql, community=community)
async def ranked_pids(db, sort, start_id, limit, communities):
"""Get a list of post_ids for a given posts query.
`sort` can be trending, hot, created, promoted, or payout.
"""
assert sort in ['trending', 'hot', 'created', 'promoted', 'payout']
table = 'hive_posts_cache'
field = ''
where = []
if not sort == 'created':
where.append("is_paidout = '0'")
if sort == 'trending':
field = 'sc_trend'
elif sort == 'hot':
field = 'sc_hot'
elif sort == 'created':
field = 'post_id'
where.append('depth = 0')
elif sort == 'promoted':
field = 'promoted'
where.append('promoted > 0')
elif sort == 'payout':
field = 'payout'
elif sort == 'muted':
field = 'payout'
# TODO: index hive_posts (is_muted, category, id)
# TODO: copy is_muted and category from hive_posts to hive_posts_cache?
_filt = "is_muted = '%d'" % (1 if sort == 'muted' else 0)
if communities: _filt += " AND category IN :communities"
where.append("post_id IN (SELECT id FROM hive_posts WHERE %s)" % _filt)
if start_id:
sql = "%s <= (SELECT %s FROM %s WHERE post_id = :start_id)"
where.append(sql % (field, field, table))
sql = ("SELECT post_id FROM %s WHERE %s ORDER BY %s DESC LIMIT :limit"
% (table, ' AND '.join(where), field))
return await db.query_col(sql, communities=tuple(communities),
start_id=start_id, limit=limit)
|
py | 1a4c39f4fed6e8112cb6833243ee704d7aa41cef | from collections import (
defaultdict,
)
from operator import (
attrgetter,
)
from typing import (
Any,
Iterable,
Optional,
Union,
get_args,
get_origin,
)
from uuid import (
UUID,
)
from minos.common import (
TypeHintBuilder,
is_model_type,
)
from .models import (
ModelRef,
)
class ModelRefExtractor:
"""Model Reference Extractor class."""
def __init__(self, value: Any, type_: Optional[type] = None, as_uuids: bool = True):
if type_ is None:
type_ = TypeHintBuilder(value).build()
self.value = value
self.type_ = type_
self.as_uuids = as_uuids
def build(self) -> dict[str, set[UUID]]:
"""Run the model reference extractor.
:return: A dictionary in which the keys are the class names and the values are the identifiers.
"""
ans = defaultdict(set)
self._build(self.value, self.type_, ans)
if self.as_uuids:
ans = {k: set(map(attrgetter("uuid"), v)) for k, v in ans.items()}
return ans
def _build(self, value: Any, type_: type, ans: dict[str, set[ModelRef]]) -> None:
if get_origin(type_) is Union:
type_ = next((t for t in get_args(type_) if get_origin(t) is ModelRef), type_)
if isinstance(value, (tuple, list, set)):
self._build_iterable(value, get_args(type_)[0], ans)
elif isinstance(value, dict):
self._build_iterable(value.keys(), get_args(type_)[0], ans)
self._build_iterable(value.values(), get_args(type_)[1], ans)
elif isinstance(value, ModelRef):
cls = value.data_cls or get_args(type_)[0]
name = cls.__name__
ans[name].add(value)
elif is_model_type(value):
# noinspection PyUnresolvedReferences
for field in value.fields.values():
self._build(field.value, field.type, ans)
def _build_iterable(self, value: Iterable, value_: type, ans: dict[str, set[ModelRef]]) -> None:
for sub_value in value:
self._build(sub_value, value_, ans)
|
py | 1a4c3a2c186efffa8a5a66b01ef048df8ac3de2b | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils import timezone
from .custom_functions import isOnlyOneTrue
from users.models import OpticUser, Account
import decimal
from termcolor import colored
# Create your models here.
class Patient(models.Model):
"""
Tabla con los campos del paciente
"""
class Gender(models.TextChoices):
MALE = 'MALE', 'Masculino'
FEMALE = 'FEMALE', 'Femenino'
OTHER = 'OTHER', 'Otro'
patient_optic_id = models.PositiveIntegerField(blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
full_name = models.CharField("Nombre completo", max_length=100)
dni = models.CharField(
"Dni o Pasaporte", max_length=20, blank=True, null=True)
age = models.PositiveSmallIntegerField("Edad", blank=True, null=True)
gender = models.CharField("Genero", max_length=20,
blank=True, choices=Gender.choices)
phone = models.CharField("Celular", max_length=30, blank=True)
job = models.CharField('Ocupacion', max_length=70, blank=True)
class Meta:
verbose_name = "Paciente"
verbose_name_plural = "Pacientes"
unique_together = (('optic', 'dni'), ('optic', 'patient_optic_id'))
def __str__(self):
return f"{self.full_name}"
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if self._state.adding is True:
optic = OpticUser.objects.get(pk=self.optic.id)
last_patient = optic.patient_set.last()
if last_patient:
patient_optic_id = last_patient.patient_optic_id + 1
else:
patient_optic_id = 1
self.patient_optic_id = patient_optic_id
return super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields)
class DiagnosisChoices(models.TextChoices):
"""
Diagnosticos a sugerir
"""
MYOPIA = 'MYOPIA', 'Miopía'
ASTIGMATISM = 'ASTIGMATISM', 'Astigmatismo'
FARSIGHTEDNESS = 'FARSIGHTEDNESS', 'Hipermetropía'
PRESBYOPIA = 'PRESBYOPIA', 'Presbicia'
SQUINT = 'SQUINT', 'Estrabismo'
AMBLYOPIA = 'AMBLYOPIA', 'Ambliopía'
DIOPIA = 'DIOPIA', 'Diopía'
GLAUCOMA = 'GLAUCOMA', 'Glaucoma'
DETACHED_RETINA = 'DETACHED_RETINA', 'Desprendimiento de la retina'
CATARACT = 'CATARACT', 'Catarata'
DALTONISM = 'DALTONISM', 'Daltonismo'
CONJUNCTIVITIS = 'CONJUNCTIVITIS', 'Conjuntivitis'
DIABETIC_RETINOPATHY = 'DIABETIC_RETINOPATHY', 'Retinopatía diabética'
DRY_EYE = 'DRY_EYE', 'Ojo seco'
MACULAR_DEGENERATION = 'MACULAR_DEGENERATION', 'Degeneración macular'
class Subsidiary(models.Model):
"""
Tabla de sucursales
"""
subsidiary_name = models.CharField(
"Nombre Sucursal", max_length=30, blank=True)
direction = models.CharField("Dirección", max_length=50, blank=True)
phone = models.CharField("Telefono", max_length=23, blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Sucursal"
verbose_name_plural = "Sucursales"
def __str__(self):
return self.subsidiary_name
class Laboratory(models.Model):
"""
Tabla de laboratorio
"""
laboratory_name = models.CharField(
"Laboratorio", max_length=40, null=False)
direction = models.CharField("Dirección", max_length=50, blank=True)
phone = models.CharField("Telefono", max_length=23, blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Laboratorio"
verbose_name_plural = "Laboratorios"
def __str__(self):
return self.laboratory_name
class CrystalTreatments(models.Model):
"""
Tabla de tratamiento
"""
treatment_name = models.CharField("Nombre del tratamiento", max_length=50)
description = models.TextField("Descripcion", blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Tratamiento"
verbose_name_plural = "Tratamientos"
def __str__(self):
return self.treatment_name
class CrystalMaterial(models.Model):
"""
Tabla de material
"""
material_name = models.CharField("Nombre del Material", max_length=50)
refractive_index = models.DecimalField(
"Indice de refracción", max_digits=4, decimal_places=3, blank=True, null=True)
abbe = models.DecimalField(
"Valor abbe", max_digits=3, decimal_places=1, blank=True, null=True)
description = models.TextField("Descripcion", blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Material de la luna"
verbose_name_plural = "Materiales de las lunas"
def __str__(self):
return self.material_name
class Crystal(models.Model):
"""
Tabla de lunas
"""
crystal_name = models.CharField("Nombre Luna", max_length=70)
material = models.ForeignKey(
CrystalMaterial, verbose_name="Material", on_delete=models.SET_NULL, null=True, blank=True)
treatments = models.ManyToManyField(
CrystalTreatments, verbose_name="Tratamientos", blank=True)
default_price = models.DecimalField('Precio de los lentes', max_digits=10, decimal_places=2, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Luna"
verbose_name_plural = "Lunas"
def __str__(self):
return self.crystal_name
def get_treatments(self):
treatments = list(self.treatments.all())
treatments = [treatment.treatment_name for treatment in treatments]
if len(treatments) == 0:
return "--"
return ", ".join(treatments)
class Prescription(models.Model):
"""
Tabla de prescripcion
"""
class PrescriptionType(models.TextChoices):
MONOFOCAL = 'MONOFOCAL', 'Monofocal'
BIFOCAL = 'BIFOCAL', 'Bifocal'
OCCUPATIONAL = 'OCCUPATIONAL', 'Ocupacional'
PROGRESSIVE = 'PROGRESSIVE', 'Progressivo'
@staticmethod
def generateChoices(start, end):
choices = [(decimal.Decimal(f'{i*0.25}0') if i % 2 == 0 else decimal.Decimal(f'{i*0.25}'), (f'{i*0.25}0' if i <=
0 else f'+{i*0.25}0') if i % 2 == 0 else (f'{i*0.25}' if i <= 0 else f'+{i*0.25}')) for i in range(end-1, start-1, -1)]
for i, (value, name) in enumerate(choices):
if value == decimal.Decimal(0):
choices.insert(i, ('', '--'))
break
return choices
spherical_choices = generateChoices.__func__(-100, 101)
cylinder_choices = generateChoices.__func__(-40, 1)
axis_choices = [(i, f'{i}°') for i in range(180, -1, -1)]
axis_choices.append(('', '--'))
dip_choices = [(i, f'{i}mm') for i in range(81, 40, -1)]
dip_choices.append(('', '--'))
dnp_choices = [(decimal.Decimal(f'{i/2}') if i % 2 == 0 else decimal.Decimal(
f'{i/2}'), f'{i/2}mm' if i % 2 == 1 else f'{int(i/2)}mm') for i in range(81, 40, -1)]
dnp_choices.append(('', '--'))
add_choices = generateChoices.__func__(1, 25)
add_choices.append(('', '--'))
# print(colored(spherical_choices,'green'))
# print(colored(cylinder_choices,'red'))
# print(colored(axis_choices,'green'))
# print(colored(dip_choices,'red'))
# print(colored(add_choices,'green'))
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
is_dip = models.BooleanField('Dip o Dnp')
patient = models.ForeignKey(
Patient, on_delete=models.PROTECT, verbose_name="Paciente")
subsidiary = models.ForeignKey(
Subsidiary, on_delete=models.SET_NULL, verbose_name="Sucursal", blank=True, null=True)
laboratory = models.ForeignKey(
Laboratory, verbose_name="Laboratorio", on_delete=models.SET_NULL, null=True, blank=True)
doctor = models.ForeignKey(
Account, verbose_name="Doctor", on_delete=models.SET_NULL, blank=True, null=True)
prescription_optic_id = models.PositiveIntegerField(blank=True)
prescription_type = models.CharField(
"Tipo", max_length=50, choices=PrescriptionType.choices, null=True, blank=True)
date = models.DateField(verbose_name='Fecha', default=timezone.now)
time = models.TimeField(verbose_name='Hora', default=timezone.now)
far_spherical_right = models.DecimalField(
"Esf. derecho Lejos", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
far_cylinder_right = models.DecimalField(
"Cil. derecho Lejos", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
far_axis_right = models.PositiveSmallIntegerField("Eje derecho Lejos", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
far_av_right = models.CharField(
"Av. derecho lejos", max_length=50, blank=True, null=True)
far_dnp_right = models.DecimalField(
"Dnp. derecho lejos", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
far_spherical_left = models.DecimalField(
"Esf. izquierdo Lejos", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
far_cylinder_left = models.DecimalField(
"Cil. izquierdo Lejos", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
far_axis_left = models.PositiveSmallIntegerField("Eje izquierdo Lejos", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
far_av_left = models.CharField(
"Av. izquierdo lejos", max_length=50, blank=True, null=True)
far_dnp_left = models.DecimalField(
"Dnp. izquierdo lejos", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
intermediate_spherical_right = models.DecimalField(
"Esf. derecho intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
intermediate_cylinder_right = models.DecimalField(
"Cil. derecho intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
intermediate_axis_right = models.PositiveSmallIntegerField("Eje derecho Lintermedio", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
intermediate_av_right = models.CharField(
"Av. derecho intermedio", max_length=50, blank=True, null=True)
intermediate_dnp_right = models.DecimalField(
"Dnp. derecho intermedio", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
intermediate_spherical_left = models.DecimalField(
"Esf. izquierdo intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
intermediate_cylinder_left = models.DecimalField(
"Cil. izquierdo intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
intermediate_axis_left = models.PositiveSmallIntegerField("Eje izquierdo intermedio", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
intermediate_av_left = models.CharField(
"Av. izquierdo intermedio", max_length=50, blank=True, null=True)
intermediate_dnp_left = models.DecimalField(
"Dnp. izquierdo intermedio", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
near_spherical_right = models.DecimalField(
"Esf. derecho Cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
near_cylinder_right = models.DecimalField(
"Cil. derecho Cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
near_axis_right = models.PositiveSmallIntegerField("Eje derecho Cerca", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
near_av_right = models.CharField(
"Av. derecho Cerca", max_length=50, blank=True, null=True)
near_dnp_right = models.DecimalField(
"Dnp. derecho Cerca", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
near_spherical_left = models.DecimalField(
"Esf. izquierdo Cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
near_cylinder_left = models.DecimalField(
"Cil. izquierdo Cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
near_axis_left = models.PositiveSmallIntegerField("Eje izquierdo Cerca", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0 y 180')], blank=True, null=True, choices=axis_choices)
near_av_left = models.CharField(
"Av. izquierdo Cerca", max_length=50, blank=True, null=True)
near_dnp_left = models.DecimalField(
"Dnp. izquierdo Cerca", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
patient_notes = models.TextField("Notas para el paciente", blank=True)
laboratory_notes = models.TextField(
"Notas para el laboratorio", blank=True)
optic_notes = models.TextField("Notas para tu optica", blank=True)
intermediate_add = models.DecimalField(
"Add. intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=add_choices)
near_add = models.DecimalField(
"Add. cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=add_choices)
diagnosis = models.CharField("Diagnostico", max_length=84, blank=True,
help_text="Diagnostico del paciente según las medidas")
measure_price = models.DecimalField('Precio de la medida', max_digits=10, decimal_places=2, default=0, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
crystals = models.ForeignKey(
Crystal, on_delete=models.SET_NULL, verbose_name="Lunas", blank=True, null=True)
crystals_cost = models.DecimalField('Costo de las lunas', max_digits=10, decimal_places=2, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
crystals_price = models.DecimalField('Precio de venta de las lunas', max_digits=10, decimal_places=2, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
frame = models.CharField("Descripcion de la montura",
max_length=120, null=True, blank=True)
frame_price = models.DecimalField('Precio de venta de la montura', max_digits=10, decimal_places=2, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
class Meta:
verbose_name = "Receta"
verbose_name_plural = "Recetas"
unique_together = ('optic', 'prescription_optic_id')
def __str__(self):
return f"""{self.patient}"""
# ODL:{self.far_spherical_right if self.far_spherical_right is not None else '?'}({self.far_cylinder_right if self.far_cylinder_right is not None else '?'}){self.far_axis_right if self.far_axis_right is not None else '?'}°
# OIL:{self.far_spherical_left if self.far_spherical_left is not None else '?'}({self.far_cylinder_left if self.far_cylinder_left is not None else '?'}){self.far_axis_left if self.far_axis_left is not None else '?'}°
# ODC:{self.near_spherical_right if self.near_spherical_right is not None else '?'}({self.near_cylinder_right if self.near_cylinder_right is not None else '?'}){self.near_axis_right if self.near_axis_right is not None else '?'}°
# OIC:{self.near_spherical_left if self.near_spherical_left is not None else '?'}({self.near_cylinder_left if self.near_cylinder_left is not None else '?'}){self.near_axis_left if self.near_axis_left is not None else '?'}°
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if self._state.adding is True:
optic = OpticUser.objects.get(pk=self.optic.id)
last_prescription = optic.prescription_set.last()
if last_prescription:
prescription_optic_id = last_prescription.prescription_optic_id + 1
else:
prescription_optic_id = 1
self.prescription_optic_id = prescription_optic_id
near = self.has_near_table() or self.near_add is not None
intermediate = self.has_intermediate_table() or self.intermediate_add is not None
far = self.has_far_table()
if isOnlyOneTrue(near, intermediate, far):
self.prescription_type = Prescription.PrescriptionType.MONOFOCAL
elif near and intermediate and far:
self.prescription_type = Prescription.PrescriptionType.PROGRESSIVE
elif near and intermediate:
self.prescription_type = Prescription.PrescriptionType.OCCUPATIONAL
elif (near and far) or (intermediate and far):
self.prescription_type = Prescription.PrescriptionType.BIFOCAL
else:
self.prescription_type = None
super().save(force_insert=force_insert, force_update=force_update,
using=using, update_fields=update_fields)
def get_total(self):
if self.frame_price is None and self.crystals_price is None and self.measure_price is None:
return None
if self.frame_price is None:
frame_price = 0
else:
frame_price = self.frame_price
if self.crystals_price is None:
crystals_price = 0
else:
crystals_price = self.crystals_price
if self.measure_price is None:
measure_price = 0
else:
measure_price = self.measure_price
total = frame_price + crystals_price + measure_price
return total
def has_far_table(self):
"""
Comprueba si la prescripcion tiene tabla de lejos
"""
if (self.far_spherical_right is not None or self.far_cylinder_right is not None
or self.far_axis_right is not None or self.far_av_right is not None or
self.far_dnp_right is not None
or self.far_spherical_left is not None or self.far_cylinder_left is not None
or self.far_axis_left is not None or self.far_av_left is not None or
self.far_dnp_left is not None):
return True
return False
def has_intermediate_table(self):
"""
Comprueba si la repscripciones tiene tabla intermedia
"""
if (self.intermediate_spherical_right is not None or self.intermediate_cylinder_right is not None
or self.intermediate_axis_right is not None or self.intermediate_av_right is not None or
self.intermediate_dnp_right is not None
or self.intermediate_spherical_left is not None or self.intermediate_cylinder_left is not None
or self.intermediate_axis_left is not None or self.intermediate_av_left is not None or
self.intermediate_dnp_left is not None):
return True
return False
def has_near_table(self):
"""
Comprueba si la prescripcion tiene tabla de cerca
"""
if (self.near_spherical_right is not None or self.near_cylinder_right is not None
or self.near_axis_right is not None or self.near_av_right is not None or
self.near_dnp_right is not None
or self.near_spherical_left is not None or self.near_cylinder_left is not None
or self.near_axis_left is not None or self.near_av_left is not None or
self.near_dnp_left is not None):
return True
return False
|
py | 1a4c3b42c78fb8f0a77a58bd57bcba66cd039ece | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
# A dirty hack to get around some early import/configurations ambiguities
import builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (register_commands, get_debug_option,
get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from configparser import ConfigParser
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'pyvo')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', '')
# order of priority for long_description:
# (1) set in setup.cfg,
# (2) load LONG_DESCRIPTION.rst,
# (3) load README.rst,
# (4) package docstring
readme_glob = 'README*'
_cfg_long_description = metadata.get('long_description', '')
if _cfg_long_description:
LONG_DESCRIPTION = _cfg_long_description
elif os.path.exists('LONG_DESCRIPTION.rst'):
with open('LONG_DESCRIPTION.rst') as f:
LONG_DESCRIPTION = f.read()
elif len(glob.glob(readme_glob)) > 0:
with open(glob.glob(readme_glob)[0]) as f:
LONG_DESCRIPTION = f.read()
else:
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440)
VERSION = metadata.get('version', '0.0.dev')
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {'console_scripts': []}
entry_point_list = conf.items('entry_points')
for entry_point in entry_point_list:
entry_points['console_scripts'].append('{} = {}'.format(entry_point[0],
entry_point[1]))
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
# Note that requires and provides should not be included in the call to
# ``setup``, since these are now deprecated. See this link for more details:
# https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
python_requires='>=3.7',
install_requires=metadata.get('install_requires', 'astropy').strip().split(),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
entry_points=entry_points,
**package_info
)
|
py | 1a4c3b548650bc9e4fc69387af7feb7cfc162a6a | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
py | 1a4c3b5c908b8617493dc61309d9d9a08c2074eb | # Acumuladores Somatorio
soma = 0
cont = 1
while cont <= 5:
x = int(input('Digite o {}º numero: ' .format(cont)))
soma = soma + x
cont = cont + 1
print('Somatório: {}' .format(soma))
|
py | 1a4c3ce656f519dc60d1a5fb55e962f41f675c70 | from PyTsetlinMachineCUDA.tm import MultiClassConvolutionalTsetlinMachine2D
import numpy as np
from time import time
from keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = np.where(X_train >= 75, 1, 0)
X_test = np.where(X_test >= 75, 1, 0)
tm = MultiClassConvolutionalTsetlinMachine2D(2000, 50*15, 5.0, (10, 10), max_weight=16)
print("\nAccuracy over 50 epochs:\n")
for i in range(50):
start_training = time()
tm.fit(X_train, Y_train, epochs=1, incremental=True)
stop_training = time()
start_testing = time()
result = 100*(tm.predict(X_test) == Y_test).mean()
stop_testing = time()
print("#%d Accuracy: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result, stop_training-start_training, stop_testing-start_testing))
|
py | 1a4c3ce90f9e988c7c1060928b27b367a329d98f | # _ __ ____ _ _
# | |/ / | _ \ | \ | |
# | ' / | |_) | | \| |
# | . \ | __/ | |\ |
# |_|\_\ |_| |_| \_|
#
# (c) 2018 KPN
# License: MIT license.
# Author: Jan Bogaerts
#
# actuator example
# boot.py -- run on boot-up
import os
import machine
uart = machine.UART(0, 115200)
os.dupterm(uart)
|
py | 1a4c3d7f21535ab6992bbf876c38845f20a51805 | # Copyright 2021 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase, mock
from delfin import context
from delfin.drivers.dell_emc.vplex.rest_handler import RestHandler
from delfin.drivers.dell_emc.vplex.vplex_stor import VplexStorageDriver
ACCESS_INFO = {
"storage_id": "12345",
"vendor": "dell_emc",
"model": "vplex",
"rest": {
"host": "8.44.162.250",
"port": 443,
"username": "service",
"password": "Abcdef@123"
}
}
TRAP_INFO = {
"1.3.6.1.2.1.1.3.0": "0",
'1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.21.0',
'1.3.6.1.4.1.1139.21.1.5.0': 'this is test',
'1.3.6.1.4.1.1139.21.1.3.0': '123321'
}
trap_result = {
'alert_id': '123321',
'alert_name': 'this is test',
'severity': 'Informational',
'category': 'Fault',
'type': 'EquipmentAlarm',
'occur_time': 1614067724000,
'description': 'this is test',
'resource_type': 'Storage',
'location': '',
'match_key': '8c6d115258631625b625486f81b09532'
}
GET_ALL_CLUSTER = {
"context": [{
"children": [{
"name": "cluster-1",
"type": "cluster"
}
]
}
]
}
GET_ALL_LUNS = {
"context": [
{
"children": [
{
"name": "device_VPLEX_LUN0_1_vol",
"type": "virtual-volume"
}
]
}
]
}
GET_LUN = {
"context": [
{
"attributes": [
{
"name": "capacity",
"value": "644245094400B"
},
{
"name": "health-state",
"value": "ok"
},
{
"name": "operational-status",
"value": "ok"
},
{
"name": "supporting-device",
"value": "device__VPLEX_LUN0_1"
},
{
"name": "thin-enabled",
"value": "unavailable"
},
{
"name": "vpd-id",
"value": "VPD83T3:60000000000000000000000000000000"
}
]
}
]
}
volume_result = [{
'name': 'device_VPLEX_LUN0_1_vol',
'storage_id': '12345',
'description': 'EMC VPlex volume',
'status': 'normal',
'native_volume_id': 'VPD83T3:60000000000000000000000000000000',
'native_storage_pool_id': 'device__VPLEX_LUN0_1',
'type': 'thick',
'total_capacity': 644245094400,
'used_capacity': 644245094400,
'free_capacity': 0,
'wwn': '60000000000000000000000000000000'
}
]
GET_ALL_POOLS = {
"context": [
{
"children": [
{
"name": "Device_KLM_test01",
"type": "local-device"
}
]
}
]
}
GET_POOL = {
"context": [
{
"attributes": [
{
"name": "capacity",
"value": "732212254720B"
},
{
"name": "health-state",
"value": "ok"
},
{
"name": "operational-status",
"value": "ok"
},
{
"name": "system-id",
"value": "Device_KLM_test01"
},
{
"name": "virtual-volume",
"value": "Volume_CLARiiON0041_KLM_test01"
}
]
}
]
}
pool_result = [
{
'name': 'Device_KLM_test01',
'storage_id': '12345',
'native_storage_pool_id': 'Device_KLM_test01',
'description': 'EMC VPlex Pool',
'status': 'normal',
'storage_type': 'block',
'total_capacity': 732212254720,
'used_capacity': 732212254720,
'free_capacity': 0
}
]
GET_HEALH_CHECK = {
"context": None,
"message": "health-check -l",
"exception": None,
"custom-data": "Product Version: 6.1.0.01.00.13\n"
"Product Type: Local\n"
}
GET_CLUSTER = {
"context": [
{
"type": "cluster",
"parent": "/clusters",
"attributes": [
{
"name": "health-state",
"value": "major-failure"
},
{
"name": "operational-status",
"value": "degraded"
},
{
"name": "top-level-assembly",
"value": "FNM00000000000"
}
],
}
]
}
storage_result = {
'name': 'cluster-1',
'vendor': 'DELL EMC',
'description': 'EMC VPlex Storage',
'status': 'abnormal',
'serial_number': 'FNM00000000000',
'firmware_version': ' 6.1.0.01.00.13',
'model': 'EMC VPLEX Local',
'location': '',
'raw_capacity': 12754334882201,
'total_capacity': 11654823254425,
'used_capacity': 8983009998929,
'free_capacity': 2671813255496,
'subscribed_capacity': 0
}
GET_ALL_STORAGE_VOLUME_SUMMARY = {
"custom-data": "Capacity total 11.6T\n\n"
}
GET_ALL_POOLS_SUMMARY = {
"custom-data": "total capacity 1.88T total capacity "
"8.68T total capacity 10.6T\n\n"
}
GET_ALL_LUNS_SUMMARY = {
"custom-data": "Total virtual-volume capacity is 8.17T."
}
class TestVplexStorDriver(TestCase):
RestHandler.login = mock.Mock(return_value=None)
def test_parse_alert(self):
trap = VplexStorageDriver(**ACCESS_INFO).parse_alert(context,
TRAP_INFO)
trap_result['occur_time'] = trap['occur_time']
self.assertDictEqual(trap, trap_result)
@mock.patch.object(RestHandler, 'get_cluster_resp')
@mock.patch.object(RestHandler, 'get_virtual_volume_resp')
@mock.patch.object(RestHandler, 'get_virtual_volume_by_name_resp')
def test_list_volumes(self, mock_name, mock_volume, mock_cluster):
mock_cluster.return_value = GET_ALL_CLUSTER
mock_volume.return_value = GET_ALL_LUNS
mock_name.return_value = GET_LUN
volume = VplexStorageDriver(**ACCESS_INFO).list_volumes(context)
self.assertDictEqual(volume[0], volume_result[0])
@mock.patch.object(RestHandler, 'get_cluster_resp')
@mock.patch.object(RestHandler, 'get_devcie_resp')
@mock.patch.object(RestHandler, 'get_device_by_name_resp')
def test_list_storage_pools(self, mock_name, mock_device, mock_cluster):
mock_cluster.return_value = GET_ALL_CLUSTER
mock_device.return_value = GET_ALL_POOLS
mock_name.return_value = GET_POOL
pool = VplexStorageDriver(**ACCESS_INFO).list_storage_pools(context)
self.assertDictEqual(pool[0], pool_result[0])
def test_get_storage(self):
RestHandler.get_rest_info = mock.Mock(
side_effect=[GET_HEALH_CHECK, GET_ALL_CLUSTER, GET_CLUSTER,
GET_ALL_STORAGE_VOLUME_SUMMARY, GET_ALL_POOLS_SUMMARY,
GET_ALL_LUNS_SUMMARY])
storage = VplexStorageDriver(**ACCESS_INFO).get_storage(context)
self.assertDictEqual(storage, storage_result)
def test_list_alerts(self):
with self.assertRaises(Exception) as exc:
VplexStorageDriver(**ACCESS_INFO).list_alerts(context)
self.assertEqual('list_alerts is not supported in model VPLEX',
str(exc.exception))
|
py | 1a4c3e6227304633f53e83bede2f256c46577006 | from django.contrib import admin
# Register your models here.
from .models import Choice, Question
admin.site.register(Choice)
admin.site.register(Question)
|
py | 1a4c3ee0da02d28ed3985207664b47286aae1d06 | import ConfigParser
import logging
import os
import re
from galaxy import util
from galaxy import web
from galaxy.web.form_builder import build_select_field
from galaxy.webapps.tool_shed.model import directory_hash_id
from tool_shed.dependencies.repository import relation_builder
from tool_shed.util import common_util
from tool_shed.util import hg_util
from tool_shed.util import shed_util_common as suc
log = logging.getLogger( __name__ )
VALID_REPOSITORYNAME_RE = re.compile( "^[a-z0-9\_]+$" )
def build_allow_push_select_field( trans, current_push_list, selected_value='none' ):
options = []
for user in trans.sa_session.query( trans.model.User ):
if user.username not in current_push_list:
options.append( user )
return build_select_field( trans,
objs=options,
label_attr='username',
select_field_name='allow_push',
selected_value=selected_value,
refresh_on_change=False,
multiple=True )
def change_repository_name_in_hgrc_file( hgrc_file, new_name ):
config = ConfigParser.ConfigParser()
config.read( hgrc_file )
config.read( hgrc_file )
config.set( 'web', 'name', new_name )
new_file = open( hgrc_file, 'wb' )
config.write( new_file )
new_file.close()
def check_or_update_tool_shed_status_for_installed_repository( app, repository ):
updated = False
tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( app, repository )
if tool_shed_status_dict:
ok = True
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
app.install_model.context.add( repository )
app.install_model.context.flush()
updated = True
else:
ok = False
return ok, updated
def create_repo_info_dict( app, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_name=None,
repository=None, repository_metadata=None, tool_dependencies=None, repository_dependencies=None ):
"""
Return a dictionary that includes all of the information needed to install a repository into a local
Galaxy instance. The dictionary will also contain the recursive list of repository dependencies defined
for the repository, as well as the defined tool dependencies.
This method is called from Galaxy under four scenarios:
1. During the tool shed repository installation process via the tool shed's get_repository_information()
method. In this case both the received repository and repository_metadata will be objects, but
tool_dependencies and repository_dependencies will be None.
2. When getting updates for an installed repository where the updates include newly defined repository
dependency definitions. This scenario is similar to 1. above. The tool shed's get_repository_information()
method is the caller, and both the received repository and repository_metadata will be objects, but
tool_dependencies and repository_dependencies will be None.
3. When a tool shed repository that was uninstalled from a Galaxy instance is being reinstalled with no
updates available. In this case, both repository and repository_metadata will be None, but tool_dependencies
and repository_dependencies will be objects previously retrieved from the tool shed if the repository includes
definitions for them.
4. When a tool shed repository that was uninstalled from a Galaxy instance is being reinstalled with updates
available. In this case, this method is reached via the tool shed's get_updated_repository_information()
method, and both repository and repository_metadata will be objects but tool_dependencies and
repository_dependencies will be None.
"""
repo_info_dict = {}
repository = suc.get_repository_by_name_and_owner( app, repository_name, repository_owner )
if app.name == 'tool_shed':
# We're in the tool shed.
repository_metadata = suc.get_repository_metadata_by_changeset_revision( app,
app.security.encode_id( repository.id ),
changeset_revision )
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
tool_shed_url = str( web.url_for( '/', qualified=True ) ).rstrip( '/' )
rb = relation_builder.RelationBuilder( app, repository, repository_metadata, tool_shed_url )
# Get a dictionary of all repositories upon which the contents of the received repository depends.
repository_dependencies = rb.get_repository_dependencies_for_changeset_revision()
tool_dependencies = metadata.get( 'tool_dependencies', {} )
if tool_dependencies:
new_tool_dependencies = {}
for dependency_key, requirements_dict in tool_dependencies.items():
if dependency_key in [ 'set_environment' ]:
new_set_environment_dict_list = []
for set_environment_dict in requirements_dict:
set_environment_dict[ 'repository_name' ] = repository_name
set_environment_dict[ 'repository_owner' ] = repository_owner
set_environment_dict[ 'changeset_revision' ] = changeset_revision
new_set_environment_dict_list.append( set_environment_dict )
new_tool_dependencies[ dependency_key ] = new_set_environment_dict_list
else:
requirements_dict[ 'repository_name' ] = repository_name
requirements_dict[ 'repository_owner' ] = repository_owner
requirements_dict[ 'changeset_revision' ] = changeset_revision
new_tool_dependencies[ dependency_key ] = requirements_dict
tool_dependencies = new_tool_dependencies
# Cast unicode to string, with the exception of description, since it is free text and can contain special characters.
repo_info_dict[ str( repository.name ) ] = ( repository.description,
str( repository_clone_url ),
str( changeset_revision ),
str( ctx_rev ),
str( repository_owner ),
repository_dependencies,
tool_dependencies )
return repo_info_dict
def create_repository( app, name, type, description, long_description, user_id, category_ids=[] ):
sa_session = app.model.context.current
# Add the repository record to the database.
repository = app.model.Repository( name=name,
type=type,
description=description,
long_description=long_description,
user_id=user_id )
# Flush to get the id.
sa_session.add( repository )
sa_session.flush()
# Create an admin role for the repository.
repository_admin_role = create_repository_admin_role( app, repository )
# Determine the repository's repo_path on disk.
dir = os.path.join( app.config.file_path, *directory_hash_id( repository.id ) )
# Create directory if it does not exist.
if not os.path.exists( dir ):
os.makedirs( dir )
# Define repo name inside hashed directory.
repository_path = os.path.join( dir, "repo_%d" % repository.id )
# Create local repository directory.
if not os.path.exists( repository_path ):
os.makedirs( repository_path )
# Create the local repository.
repo = hg_util.get_repo_for_repository( app, repository=None, repo_path=repository_path, create=True )
# Add an entry in the hgweb.config file for the local repository.
lhs = "repos/%s/%s" % ( repository.user.username, repository.name )
app.hgweb_config_manager.add_entry( lhs, repository_path )
# Create a .hg/hgrc file for the local repository.
hg_util.create_hgrc_file( app, repository )
flush_needed = False
if category_ids:
# Create category associations
for category_id in category_ids:
category = sa_session.query( app.model.Category ) \
.get( app.security.decode_id( category_id ) )
rca = app.model.RepositoryCategoryAssociation( repository, category )
sa_session.add( rca )
flush_needed = True
if flush_needed:
sa_session.flush()
# Update the repository registry.
app.repository_registry.add_entry( repository )
message = "Repository <b>%s</b> has been created." % str( repository.name )
return repository, message
def create_repository_admin_role( app, repository ):
"""
Create a new role with name-spaced name based on the repository name and its owner's public user
name. This will ensure that the tole name is unique.
"""
sa_session = app.model.context.current
name = get_repository_admin_role_name( str( repository.name ), str( repository.user.username ) )
description = 'A user or group member with this role can administer this repository.'
role = app.model.Role( name=name, description=description, type=app.model.Role.types.SYSTEM )
sa_session.add( role )
sa_session.flush()
# Associate the role with the repository owner.
ura = app.model.UserRoleAssociation( repository.user, role )
# Associate the role with the repository.
rra = app.model.RepositoryRoleAssociation( repository, role )
sa_session.add( rra )
sa_session.flush()
return role
def get_installed_tool_shed_repository( app, id ):
"""Get a tool shed repository record from the Galaxy database defined by the id."""
return app.install_model.context.query( app.install_model.ToolShedRepository ) \
.get( app.security.decode_id( id ) )
def get_repo_info_dict( app, user, repository_id, changeset_revision ):
repository = suc.get_repository_in_tool_shed( app, repository_id )
repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed( user, repository )
repository_metadata = suc.get_repository_metadata_by_changeset_revision( app,
repository_id,
changeset_revision )
if not repository_metadata:
# The received changeset_revision is no longer installable, so get the next changeset_revision
# in the repository's changelog. This generally occurs only with repositories of type
# repository_suite_definition or tool_dependency_definition.
next_downloadable_changeset_revision = \
suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
if next_downloadable_changeset_revision:
repository_metadata = suc.get_repository_metadata_by_changeset_revision( app,
repository_id,
next_downloadable_changeset_revision )
if repository_metadata:
# For now, we'll always assume that we'll get repository_metadata, but if we discover our assumption
# is not valid we'll have to enhance the callers to handle repository_metadata values of None in the
# returned repo_info_dict.
metadata = repository_metadata.metadata
if 'tools' in metadata:
includes_tools = True
else:
includes_tools = False
includes_tools_for_display_in_tool_panel = repository_metadata.includes_tools_for_display_in_tool_panel
repository_dependencies_dict = metadata.get( 'repository_dependencies', {} )
repository_dependencies = repository_dependencies_dict.get( 'repository_dependencies', [] )
has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td = \
suc.get_repository_dependency_types( repository_dependencies )
if 'tool_dependencies' in metadata:
includes_tool_dependencies = True
else:
includes_tool_dependencies = False
else:
# Here's where we may have to handle enhancements to the callers. See above comment.
includes_tools = False
has_repository_dependencies = False
has_repository_dependencies_only_if_compiling_contained_td = False
includes_tool_dependencies = False
includes_tools_for_display_in_tool_panel = False
ctx = hg_util.get_changectx_for_changeset( repo, changeset_revision )
repo_info_dict = create_repo_info_dict( app=app,
repository_clone_url=repository_clone_url,
changeset_revision=changeset_revision,
ctx_rev=str( ctx.rev() ),
repository_owner=repository.user.username,
repository_name=repository.name,
repository=repository,
repository_metadata=repository_metadata,
tool_dependencies=None,
repository_dependencies=None )
return repo_info_dict, includes_tools, includes_tool_dependencies, includes_tools_for_display_in_tool_panel, \
has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td
def get_repository_admin_role_name( repository_name, repository_owner ):
return '%s_%s_admin' % ( str( repository_name ), str( repository_owner ) )
def get_role_by_id( app, role_id ):
"""Get a Role from the database by id."""
sa_session = app.model.context.current
return sa_session.query( app.model.Role ).get( app.security.decode_id( role_id ) )
def handle_role_associations( app, role, repository, **kwd ):
sa_session = app.model.context.current
message = kwd.get( 'message', '' )
status = kwd.get( 'status', 'done' )
repository_owner = repository.user
if kwd.get( 'manage_role_associations_button', False ):
in_users_list = util.listify( kwd.get( 'in_users', [] ) )
in_users = [ sa_session.query( app.model.User ).get( x ) for x in in_users_list ]
# Make sure the repository owner is always associated with the repostory's admin role.
owner_associated = False
for user in in_users:
if user.id == repository_owner.id:
owner_associated = True
break
if not owner_associated:
in_users.append( repository_owner )
message += "The repository owner must always be associated with the repository's administrator role. "
status = 'error'
in_groups_list = util.listify( kwd.get( 'in_groups', [] ) )
in_groups = [ sa_session.query( app.model.Group ).get( x ) for x in in_groups_list ]
in_repositories = [ repository ]
app.security_agent.set_entity_role_associations( roles=[ role ],
users=in_users,
groups=in_groups,
repositories=in_repositories )
sa_session.refresh( role )
message += "Role <b>%s</b> has been associated with %d users, %d groups and %d repositories. " % \
( str( role.name ), len( in_users ), len( in_groups ), len( in_repositories ) )
in_users = []
out_users = []
in_groups = []
out_groups = []
for user in sa_session.query( app.model.User ) \
.filter( app.model.User.table.c.deleted==False ) \
.order_by( app.model.User.table.c.email ):
if user in [ x.user for x in role.users ]:
in_users.append( ( user.id, user.email ) )
else:
out_users.append( ( user.id, user.email ) )
for group in sa_session.query( app.model.Group ) \
.filter( app.model.Group.table.c.deleted==False ) \
.order_by( app.model.Group.table.c.name ):
if group in [ x.group for x in role.groups ]:
in_groups.append( ( group.id, group.name ) )
else:
out_groups.append( ( group.id, group.name ) )
associations_dict = dict( in_users=in_users,
out_users=out_users,
in_groups=in_groups,
out_groups=out_groups,
message=message,
status=status )
return associations_dict
def validate_repository_name( app, name, user ):
# Repository names must be unique for each user, must be at least four characters
# in length and must contain only lower-case letters, numbers, and the '_' character.
if name in [ 'None', None, '' ]:
return 'Enter the required repository name.'
if name in [ 'repos' ]:
return "The term <b>%s</b> is a reserved word in the tool shed, so it cannot be used as a repository name." % name
check_existing = suc.get_repository_by_name_and_owner( app, name, user.username )
if check_existing is not None:
if check_existing.deleted:
return 'You have a deleted repository named <b>%s</b>, so choose a different name.' % name
else:
return "You already have a repository named <b>%s</b>, so choose a different name." % name
if len( name ) < 4:
return "Repository names must be at least 4 characters in length."
if len( name ) > 80:
return "Repository names cannot be more than 80 characters in length."
if not( VALID_REPOSITORYNAME_RE.match( name ) ):
return "Repository names must contain only lower-case letters, numbers and underscore <b>_</b>."
return ''
|
py | 1a4c40aab61b98fe1ffa4fee94239582dab1da1c | import unittest
import os
import sys
import os.path as path
import numpy as np
import scipy
# Path to where the bindings live
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")))
if os.name == 'nt': # if Windows
# handle default location where VS puts binary
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "build", "Debug")))
else:
# normal / unix case
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "build")))
import potpourri3d as pp3d
asset_path = os.path.abspath(os.path.dirname(__file__))
def generate_verts(n_pts=999):
np.random.seed(777)
return np.random.rand(n_pts, 3)
def generate_faces(n_pts=999):
# n_pts should be a multiple of 3 for indexing to work out
np.random.seed(777)
rand_faces = np.random.randint(0, n_pts, size=(2*n_pts,3))
coverage_faces = np.arange(n_pts).reshape(-1, 3)
faces = np.vstack((rand_faces, coverage_faces))
return faces
def is_symmetric(A, eps=1e-6):
resid = A - A.T
return np.all(np.abs(resid.data) < eps)
def is_nonnegative(A, eps=1e-6):
return np.all(A.data > -eps)
class TestCore(unittest.TestCase):
def test_write_read_mesh(self):
for ext in ['obj']:
V = generate_verts()
F = generate_faces()
fname = "test." + ext
# write
pp3d.write_mesh(V,F,fname)
Vnew, Fnew = pp3d.read_mesh(fname)
self.assertLess(np.amax(np.abs(V-Vnew)), 1e-6)
self.assertTrue((F==Fnew).all())
def test_write_read_point_cloud(self):
for ext in ['obj', 'ply']:
V = generate_verts()
fname = "test_cloud." + ext
# write
pp3d.write_point_cloud(V, fname)
Vnew = pp3d.read_point_cloud(fname)
self.assertLess(np.amax(np.abs(V-Vnew)), 1e-6)
# self.assertTrue(is_nonnegative(off_L)) # positive edge weights
# self.assertGreater(L.sum(), -1e-5)
# self.assertEqual(M.sum(), M.diagonal().sum())
def test_mesh_heat_distance(self):
V = generate_verts()
F = generate_faces()
# Test stateful version
solver = pp3d.MeshHeatMethodDistanceSolver(V,F)
dist = solver.compute_distance(7)
self.assertEqual(dist.shape[0], V.shape[0])
dist = solver.compute_distance_multisource([1,2,3])
self.assertEqual(dist.shape[0], V.shape[0])
# = Test one-off versions
dist = pp3d.compute_distance(V,F,7)
self.assertEqual(dist.shape[0], V.shape[0])
dist = pp3d.compute_distance_multisource(V,F,[1,3,4])
self.assertEqual(dist.shape[0], V.shape[0])
def test_mesh_vector_heat(self):
V, F = pp3d.read_mesh(os.path.join(asset_path, "bunny_small.ply"))
solver = pp3d.MeshVectorHeatSolver(V,F)
# Scalar extension
ext = solver.extend_scalar([1, 22], [0., 6.])
self.assertEqual(ext.shape[0], V.shape[0])
self.assertGreaterEqual(np.amin(ext), 0.)
# Get frames
basisX, basisY, basisN = solver.get_tangent_frames()
self.assertEqual(basisX.shape[0], V.shape[0])
self.assertEqual(basisY.shape[0], V.shape[0])
self.assertEqual(basisN.shape[0], V.shape[0])
# TODO could check orthogonal
# Vector heat (transport vector)
ext = solver.transport_tangent_vector(1, [6., 6.])
self.assertEqual(ext.shape[0], V.shape[0])
self.assertEqual(ext.shape[1], 2)
ext = solver.transport_tangent_vectors([1, 22], [[6., 6.], [3., 4.]])
self.assertEqual(ext.shape[0], V.shape[0])
self.assertEqual(ext.shape[1], 2)
# Vector heat (log map)
logmap = solver.compute_log_map(1)
self.assertEqual(logmap.shape[0], V.shape[0])
self.assertEqual(logmap.shape[1], 2)
def test_mesh_cotan_laplace(self):
V, F = pp3d.read_mesh(os.path.join(asset_path, "bunny_small.ply"))
L = pp3d.cotan_laplacian(V,F)
self.assertEqual(L.shape[0],V.shape[0])
self.assertEqual(L.shape[1],V.shape[0])
self.assertLess(np.abs(np.sum(L)), 1e-6)
def test_mesh_areas(self):
V, F = pp3d.read_mesh(os.path.join(asset_path, "bunny_small.ply"))
face_area = pp3d.face_areas(V,F)
self.assertEqual(face_area.shape[0],F.shape[0])
self.assertTrue(np.all(face_area >= 0))
vert_area = pp3d.vertex_areas(V,F)
self.assertLess(np.abs(np.sum(face_area) - np.sum(vert_area)), 1e-6)
def test_mesh_flip_geodesic(self):
V, F = pp3d.read_mesh(os.path.join(asset_path, "bunny_small.ply"))
# Test stateful version
path_solver = pp3d.EdgeFlipGeodesicSolver(V,F)
# Do a first path
path_pts = path_solver.find_geodesic_path(v_start=14, v_end=22)
self.assertEqual(len(path_pts.shape), 2)
self.assertEqual(path_pts.shape[1], 3)
# Do some more
for i in range(5):
path_pts = path_solver.find_geodesic_path(v_start=14, v_end=22+i)
self.assertEqual(len(path_pts.shape), 2)
self.assertEqual(path_pts.shape[1], 3)
# Initialize with a compound path
path_pts = path_solver.find_geodesic_path_poly([1173, 148, 870, 898])
self.assertEqual(len(path_pts.shape), 2)
self.assertEqual(path_pts.shape[1], 3)
# Do a loop
loop_pts = path_solver.find_geodesic_loop([1173, 148, 870, 898])
self.assertEqual(len(loop_pts.shape), 2)
self.assertEqual(loop_pts.shape[1], 3)
# Do another loop
# this one contracts to a point
loop_pts = path_solver.find_geodesic_loop([307, 757, 190])
self.assertEqual(len(loop_pts.shape), 2)
self.assertEqual(loop_pts.shape[1], 3)
def test_point_cloud_distance(self):
P = generate_verts()
solver = pp3d.PointCloudHeatSolver(P)
dist = solver.compute_distance(7)
self.assertEqual(dist.shape[0], P.shape[0])
dist = solver.compute_distance_multisource([1,2,3])
self.assertEqual(dist.shape[0], P.shape[0])
def test_point_cloud_vector_heat(self):
P = generate_verts()
solver = pp3d.PointCloudHeatSolver(P)
# Scalar extension
ext = solver.extend_scalar([1, 22], [0., 6.])
self.assertEqual(ext.shape[0], P.shape[0])
self.assertGreaterEqual(np.amin(ext), 0.)
# Get frames
basisX, basisY, basisN = solver.get_tangent_frames()
self.assertEqual(basisX.shape[0], P.shape[0])
self.assertEqual(basisY.shape[0], P.shape[0])
self.assertEqual(basisN.shape[0], P.shape[0])
# TODO could check orthogonal
# Vector heat (transport vector)
ext = solver.transport_tangent_vector(1, [6., 6.])
self.assertEqual(ext.shape[0], P.shape[0])
self.assertEqual(ext.shape[1], 2)
ext = solver.transport_tangent_vectors([1, 22], [[6., 6.], [3., 4.]])
self.assertEqual(ext.shape[0], P.shape[0])
self.assertEqual(ext.shape[1], 2)
# Vector heat (log map)
logmap = solver.compute_log_map(1)
self.assertEqual(logmap.shape[0], P.shape[0])
self.assertEqual(logmap.shape[1], 2)
if __name__ == '__main__':
unittest.main()
|
py | 1a4c40bafacbecd8c001699bf56a43ee9cf726c1 | #!/usr/bin/env python3
import io
import os
import requests
# Imports the Google Cloud client library
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
# Gazebo
# prefix = "http://10.16.103.133:8080/"
prefix = "http://10.16.104.100:8080/"
# prefix = "http://turtle1.athenian.org:8080/"
def move_robot(direction):
try:
return requests.get(prefix + direction)
except BaseException as e:
print(e)
def main():
# Instantiates a client
client = speech.SpeechClient()
while (True):
input("Hit return to give command")
# os.system("say 'speak'")
os.system("rec --rate 16k --channels=1 test.flac trim 0 1.5")
# The name of the audio file to transcribe
file_name = os.path.join(os.path.dirname(__file__) + '/test.flac')
# Loads the audio into memory
with io.open(file_name, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.FLAC,
sample_rate_hertz=16000,
language_code='en-US')
# Detects speech in the audio file
response = client.recognize(config, audio)
for result in response.results:
translation = result.alternatives[0].transcript
print('Transcript: {}'.format(translation))
print('Confidence: {}'.format(result.alternatives[0].confidence))
if ("left" in translation):
print("Send left command")
resp = move_robot('left')
elif ("right" in translation):
print("Send right command")
resp = move_robot('right')
elif ("forward" in translation):
print("Send forward command")
resp = move_robot('forward')
elif ("back" in translation):
print("Send backward command")
resp = move_robot('backward')
elif ("stop" in translation):
print("Send stop command")
resp = move_robot('stop')
if __name__ == "__main__":
main()
|
py | 1a4c40bf62088745cdbe8b23cfe8e2ef3e7dc8ef | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 21:38:29 2020
@author: oxenb
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mne_features.feature_extraction import FeatureExtractor
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import (GridSearchCV, cross_val_score,
StratifiedKFold)
from mne import Epochs, pick_types, events_from_annotations
import mne
from mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,
cross_val_multiscore, LinearModel, get_coef,
Vectorizer, CSP)
DATA_PATH = "data/"
EXP_NAME = DATA_PATH+"Or_3_raw.fif" ## file name to run the anaylsis on
features = ['app_entropy', 'decorr_time', 'higuchi_fd',
'hjorth_complexity', 'hjorth_complexity_spect', 'hjorth_mobility',
'hjorth_mobility_spect', 'hurst_exp', 'katz_fd', 'kurtosis',
'line_length', 'mean', 'ptp_amp', 'samp_entropy',
'skewness', 'spect_edge_freq', 'spect_entropy', 'spect_slope',
'std', 'svd_entropy', 'svd_fisher_info', 'teager_kaiser_energy',
'variance', 'wavelet_coef_energy', 'zero_crossings', 'max_cross_corr',
'nonlin_interdep', 'phase_lock_val', 'spect_corr', 'time_corr']
selected_features = ["std","mean","kurtosis","skewness"] # can be cgahnged to any feature
def preprocess():
tmin, tmax = -1., 0.8 #: need to check the best
raw = mne.io.read_raw_fif(EXP_NAME, preload=True)
raw.filter(5., 40., fir_design='firwin', skip_by_annotation='edge')
events = mne.find_events(raw, 'STI')
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
event_id = {'Left': 1, 'right': 2,'none': 3}
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs.pick_types(eeg=True, exclude='bads') # remove stim and EOG
return epochs,raw
def train_mne_feature(data,labels,raw):
pipe = Pipeline([('fe', FeatureExtractor(sfreq = raw.info['sfreq'],
selected_funcs = selected_features)),
('scaler', StandardScaler()),
('clf', GradientBoostingClassifier())])
y = labels
# params_grid = {'fe__app_entropy__emb': np.arange(2, 5)} #: can addd gradinet boost hyperparametrs
params_grid = {} #: can addd gradinet boost hyperparametrs
gs = GridSearchCV(estimator=pipe, param_grid=params_grid,
cv=StratifiedKFold(n_splits=5, random_state=42), n_jobs=1,
return_train_score=True)
gs.fit(data, y)
scores = pd.DataFrame(gs.cv_results_)
print(scores[['params', 'mean_test_score', 'mean_train_score']])
# Best parameters obtained with GridSearchCV:
print(gs.best_params_)
#: run the best model maybe need to create test seprate dataset
# gs_best = gs.best_estimator_
# new_scores = cross_val_score(gs_best, data, y, cv=skf)
# print('Cross-validation accuracy score (with optimized parameters) = %1.3f '
# '(+/- %1.5f)' % (np.mean(new_scores), np.std(new_scores)))
return pipe
def main():
epochs,raw = preprocess()
labels = epochs.events[:, -1]
# get MEG and EEG data
epochs_data_train = epochs.get_data()
pipe = train_mne_feature(epochs_data_train,labels,raw)
transformed_data = pipe["fe"].fit_transform(epochs_data_train) #: transformed_data is matrix dim by the featuhers X events
return pipe,epochs_data_train
if __name__ == '__main__':
pipe,epochs_data_train = main()
'''
['app_entropy', 'decorr_time', 'energy_freq_bands', 'higuchi_fd',
'hjorth_complexity', 'hjorth_complexity_spect', 'hjorth_mobility'
'hjorth_mobility_spect', 'hurst_exp', 'katz_fd', 'kurtosis', 'line_length',
'mean', 'pow_freq_bands', 'ptp_amp', 'samp_entropy', 'skewness',
'spect_edge_freq', 'spect_entropy', 'spect_slope', 'std', 'svd_entropy',
'svd_fisher_info', 'teager_kaiser_energy', 'variance', 'wavelet_coef_energy',
'zero_crossings', 'max_cross_corr', 'nonlin_interdep', 'phase_lock_val',
'spect_corr', 'time_corr']
'''
|
py | 1a4c41fed7ddb166b1c0a547c08cbb464de275e4 | from Observer import *
class Observable(object):
"""docstring for Observable"""
def __init__(self):
super(Observable, self).__init__()
self.observers = []
def addObserver(self, observer):
if isinstance(observer, Observer):
self.observers.append(observer)
else:
print("addObserver :", observer, "is not an observer")
def emitSignal(self, signal):
for observer in self.observers:
observer.onReceive(signal, self)
|
py | 1a4c42f657c039073a7aa1542815d3f477a646a3 | # todo test action history
# todo test augmented vectorized observation |
py | 1a4c43a0e857a0f93cc29cf71063b3c19baa2b89 | '''
Uses [[https://github.com/fabianonline/telegram_backup#readme][telegram_backup]] database for messages data
'''
from pathlib import Path
from typing import Optional, Union, TypeVar
from urllib.parse import unquote # TODO mm, make it easier to rememember to use...
from ..common import PathIsh, Visit, get_logger, Loc, extract_urls, from_epoch, Results, echain
# TODO potentially, belongs to my. package
# TODO kython?
T = TypeVar('T')
def unwrap(res: Union[T, Exception]) -> T:
if isinstance(res, Exception):
raise res
else:
return res
# TODO move to common?
def dataset_readonly(db: Path):
import dataset # type: ignore
# see https://github.com/pudo/dataset/issues/136#issuecomment-128693122
import sqlite3
creator = lambda: sqlite3.connect(f'file:{db}?immutable=1', uri=True)
return dataset.connect('sqlite:///' , engine_kwargs={'creator': creator})
def index(database: PathIsh) -> Results:
logger = get_logger()
path = Path(database)
assert path.is_file(), path
# TODO context manager?
db = dataset_readonly(path) # TODO could check is_file inside
def make_query(text_query: str):
return f"""
WITH entities AS (
SELECT 'dialog' as type, id, coalesce(username, id) as handle, coalesce(first_name || " " || last_name, username, id) as display_name FROM users
UNION
SELECT 'group' as type, id, id as handle , coalesce(name, id) as display_name FROM chats
)
SELECT src.display_name AS chatname
, src.handle AS chat
, snd.display_name AS sender
, M.time AS time
, {text_query} AS text
, M.id AS mid
FROM messages AS M
/* chat types are 'dialog' (1-1), 'group' and 'supergroup' */
/* this is abit hacky way to handle all groups in one go */
LEFT JOIN entities AS src ON M.source_id = src.id AND src.type = (CASE M.source_type WHEN 'supergroup' THEN 'group' ELSE M.source_type END)
LEFT JOIN entities AS snd ON M.sender_id = snd.id AND snd.type = 'dialog'
WHERE
M.message_type NOT IN ('service_message', 'empty_message')
/* used to do this, but doesn't really give much of a speedup */
/* AND (M.has_media == 1 OR (text LIKE '%http%')) */
ORDER BY time;
""".strip()
# TODO yield error if chatname or chat or smth else is null?
for row in db.query(make_query('M.text')):
try:
yield from _handle_row(row)
except Exception as ex:
yield echain(RuntimeError(f'While handling {row}'), ex)
# , None, sys.exc_info()[2]
# TODO hmm. traceback isn't preserved; wonder if that's because it's too heavy to attach to every single exception object..
# old (also 'stable') version doesn't have 'json' column yet...
if 'json' in db['messages'].columns:
for row in db.query(make_query("json_extract(json, '$.media.webpage.description')")):
try:
yield from _handle_row(row)
except Exception as ex:
yield echain(RuntimeError(f'While handling {row}'), ex)
def _handle_row(row) -> Results:
text = row['text']
if text is None:
return
urls = extract_urls(text)
if len(urls) == 0:
return
dt = from_epoch(row['time'])
mid: str = unwrap(row['mid'])
# TODO perhaps we could be defensive with null sender/chat etc and still emit the Visit
sender: str = unwrap(row['sender'])
chatname: str = unwrap(row['chatname'])
chat: str = unwrap(row['chat'])
in_context = f'https://t.me/{chat}/{mid}'
for u in urls:
# https://www.reddit.com/r/Telegram/comments/6ufwi3/link_to_a_specific_message_in_a_channel_possible/
# hmm, only seems to work on mobile app, but better than nothing...
yield Visit(
url=unquote(u),
dt=dt,
context=f"{sender}: {text}",
locator=Loc.make(
title=f"chat with {chatname}",
href=in_context,
),
)
|
py | 1a4c43fcabc75d5b67b0685793bce5c64f05423f | """
Policy rules class
"""
from typing import Union, List, Dict
from marshmallow import Schema, fields, post_load
from .conditions.attribute.base import validate_path
from .conditions.schema import ConditionSchema
from ..context import EvaluationContext
class Rules(object):
"""
Policy rules
"""
def __init__(
self,
subject: Union[List, Dict],
resource: Union[List, Dict],
action: Union[List, Dict],
context: Union[List, Dict]
):
self.subject = subject
self.resource = resource
self.action = action
self.context = context
def is_satisfied(self, ctx: EvaluationContext):
"""
Check if request satisfies all conditions
:param ctx: policy evaluation context
:return: True if satisfied else False
"""
return self._is_satisfied("subject", self.subject, ctx) and \
self._is_satisfied("resource", self.resource, ctx) and \
self._is_satisfied("action", self.action, ctx) and \
self._is_satisfied("context", self.context, ctx)
def _is_satisfied(self, ace_name: str, ace_conditions, ctx: EvaluationContext):
"""
Check if the access control element satisfies request
:param ace_name: access control element name
:param ace_conditions: access control element conditions
:param ctx: policy evaluation context
:return: True if satisfied else False
"""
if isinstance(ace_conditions, list):
return self._implicit_or(ace_name, ace_conditions, ctx)
if isinstance(ace_conditions, dict):
return self._implicit_and(ace_name, ace_conditions, ctx)
# If ace is not in correct format, return False. This condition is just for best
# practice and will never happen
return False # pragma: no cover
def _implicit_or(self, ace_name: str, ace_conditions: list, ctx: EvaluationContext):
for _ace_conditions in ace_conditions:
# If even one of the conditions is satisfied, return True
if self._implicit_and(ace_name, _ace_conditions, ctx):
return True
# If no conditions are satisfied, return False
return False
@staticmethod
def _implicit_and(ace_name: str, ace_conditions: dict, ctx: EvaluationContext):
for attribute_path, condition in ace_conditions.items():
ctx.ace = ace_name
ctx.attribute_path = attribute_path
# If even one of the conditions is not satisfied, return False
if not condition.is_satisfied(ctx):
return False
# If all conditions are satisfied, return True
return True
class RuleField(fields.Field):
"""
Marshmallow field class for rules
"""
_implicit_and_field = fields.Dict(
keys=fields.String(validate=validate_path),
values=fields.Nested(ConditionSchema)
)
_implicit_or_field = fields.List(
fields.Dict(
keys=fields.String(validate=validate_path),
values=fields.Nested(ConditionSchema)
)
)
def _serialize(self, value, attr, obj, **kwargs):
if isinstance(value, list):
return self._implicit_or_field._serialize(value, attr, obj, **kwargs) # pylint: disable=protected-access
return self._implicit_and_field._serialize(value, attr, obj, **kwargs) # pylint: disable=protected-access
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, list):
return self._implicit_or_field.deserialize(value, attr, data, **kwargs) # pylint: disable=protected-access
return self._implicit_and_field.deserialize(value, attr, data, **kwargs) # pylint: disable=protected-access
class RulesSchema(Schema):
"""
JSON schema for rules
"""
subject = RuleField(default={}, missing={})
resource = RuleField(default={}, missing={})
action = RuleField(default={}, missing={})
context = RuleField(default={}, missing={})
@post_load
def post_load(self, data, **_): # pylint: disable=missing-docstring,no-self-use
return Rules(**data)
|
py | 1a4c44a470e0b966602e25fb3891a00cad8fe137 | """coBib parser test class."""
import pytest
from cobib.config import config
from .. import get_resource
class ParserTest:
"""The base class for coBib's parser test classes."""
EXAMPLE_BIBTEX_FILE = get_resource("example_entry.bib")
"""Path to the example BibTeX file."""
EXAMPLE_YAML_FILE = get_resource("example_entry.yaml")
"""Path to the example YAML file (matching the BibTeX file)."""
EXAMPLE_ENTRY_DICT = {
"ENTRYTYPE": "article",
"author": "Yudong Cao and Jonathan Romero and Jonathan P. Olson and Matthias Degroote and "
+ "Peter D. Johnson and M{\\'a}ria Kieferov{\\'a} and Ian D. Kivlichan and Tim Menke and "
+ "Borja Peropadre and Nicolas P. D. Sawaya and Sukin Sim and Libor Veis and Al{\\'a}n "
+ "Aspuru-Guzik",
"doi": "10.1021/acs.chemrev.8b00803",
"journal": "Chemical Reviews",
"month": "aug",
"number": 19,
"pages": "10856--10915",
"publisher": "American Chemical Society ({ACS})",
"title": "Quantum Chemistry in the Age of Quantum Computing",
"url": ["https://doi.org/10.1021%2Facs.chemrev.8b00803"],
"volume": 119,
"year": 2019,
}
"""The matching dictionary to the example files also included here."""
@pytest.fixture(autouse=True)
def setup(self) -> None:
# pylint: disable=no-self-use
"""Setup."""
config.defaults()
|
py | 1a4c45628a2566daafb990f0e70c17d0f46afed9 | """
Module contains tools for collecting data from various remote sources
"""
import warnings
import tempfile
import datetime as dt
import time
from collections import defaultdict
import numpy as np
from pandas.compat import(
StringIO, bytes_to_str, range, lmap, zip
)
import pandas.compat as compat
from pandas import Panel, DataFrame, Series, read_csv, concat, to_datetime, DatetimeIndex, DateOffset
from pandas.core.common import is_list_like, PandasError
from pandas.io.common import urlopen, ZipFile, urlencode
from pandas.tseries.offsets import MonthEnd
from pandas.util.testing import _network_error_classes
from pandas.io.html import read_html
warnings.warn("\n"
"The pandas.io.data module is moved to a separate package "
"(pandas-datareader) and will be removed from pandas in a "
"future version.\nAfter installing the pandas-datareader package "
"(https://github.com/pydata/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.",
FutureWarning)
class SymbolWarning(UserWarning):
pass
class RemoteDataError(PandasError, IOError):
pass
def DataReader(name, data_source=None, start=None, end=None,
retry_count=3, pause=0.001):
"""
Imports data from a number of online sources.
Currently supports Yahoo! Finance, Google Finance, St. Louis FED (FRED)
and Kenneth French's data library.
Parameters
----------
name : str or list of strs
the name of the dataset. Some data sources (yahoo, google, fred) will
accept a list of names.
data_source: str
the data source ("yahoo", "google", "fred", or "ff")
start : {datetime, None}
left boundary for range (defaults to 1/1/2010)
end : {datetime, None}
right boundary for range (defaults to today)
Examples
----------
# Data from Yahoo! Finance
gs = DataReader("GS", "yahoo")
# Data from Google Finance
aapl = DataReader("AAPL", "google")
# Data from FRED
vix = DataReader("VIXCLS", "fred")
# Data from Fama/French
ff = DataReader("F-F_Research_Data_Factors", "famafrench")
ff = DataReader("F-F_Research_Data_Factors_weekly", "famafrench")
ff = DataReader("6_Portfolios_2x3", "famafrench")
ff = DataReader("F-F_ST_Reversal_Factor", "famafrench")
"""
start, end = _sanitize_dates(start, end)
if data_source == "yahoo":
return get_data_yahoo(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "google":
return get_data_google(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "fred":
return get_data_fred(name, start, end)
elif data_source == "famafrench":
return get_data_famafrench(name)
def _sanitize_dates(start, end):
from pandas.core.datetools import to_datetime
start = to_datetime(start)
end = to_datetime(end)
if start is None:
start = dt.datetime(2010, 1, 1)
if end is None:
end = dt.datetime.today()
return start, end
def _in_chunks(seq, size):
"""
Return sequence in 'chunks' of size defined by size
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
_yahoo_codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r',
'time': 't1', 'short_ratio': 's7'}
_YAHOO_QUOTE_URL = 'http://finance.yahoo.com/d/quotes.csv?'
def get_quote_yahoo(symbols):
"""
Get current yahoo quote
Returns a DataFrame
"""
if isinstance(symbols, compat.string_types):
sym_list = symbols
else:
sym_list = '+'.join(symbols)
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
request = ''.join(compat.itervalues(_yahoo_codes)) # code request string
header = list(_yahoo_codes.keys())
data = defaultdict(list)
url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)
with urlopen(url_str) as url:
lines = url.readlines()
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
v = float(field.strip('"%'))
elif field[0] == '"':
v = field.strip('"')
else:
try:
v = float(field)
except ValueError:
v = field
data[header[i]].append(v)
idx = data.pop('symbol')
return DataFrame(data, index=idx)
def get_quote_google(symbols):
raise NotImplementedError("Google Finance doesn't have this functionality")
def _retry_read_url(url, retry_count, pause, name):
for _ in range(retry_count):
time.sleep(pause)
# kludge to close the socket ASAP
try:
with urlopen(url) as resp:
lines = resp.read()
except _network_error_classes:
pass
else:
rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
parse_dates=True, na_values='-')[::-1]
# Yahoo! Finance sometimes does this awesome thing where they
# return 2 rows for the most recent business day
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
#Get rid of unicode characters in index name.
try:
rs.index.name = rs.index.name.decode('unicode_escape').encode('ascii', 'ignore')
except AttributeError:
#Python 3 string has no decode method.
rs.index.name = rs.index.name.encode('ascii', 'ignore').decode()
return rs
raise IOError("after %d tries, %s did not "
"return a 200 for url %r" % (retry_count, name, url))
_HISTORICAL_YAHOO_URL = 'http://ichart.finance.yahoo.com/table.csv?'
def _get_hist_yahoo(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from yahoo.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
url = (_HISTORICAL_YAHOO_URL + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
'&c=%s' % start.year +
'&d=%s' % (end.month - 1) +
'&e=%s' % end.day +
'&f=%s' % end.year +
'&g=%s' % interval +
'&ignore=.csv')
return _retry_read_url(url, retry_count, pause, 'Yahoo!')
_HISTORICAL_GOOGLE_URL = 'http://www.google.com/finance/historical?'
def _get_hist_google(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from google.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
url = "%s%s" % (_HISTORICAL_GOOGLE_URL,
urlencode({"q": sym,
"startdate": start.strftime('%b %d, ' '%Y'),
"enddate": end.strftime('%b %d, %Y'),
"output": "csv"}))
return _retry_read_url(url, retry_count, pause, 'Google')
def _adjust_prices(hist_data, price_list=None):
"""
Return modifed DataFrame or Panel with adjusted prices based on
'Adj Close' price. Adds 'Adj_Ratio' column.
"""
if price_list is None:
price_list = 'Open', 'High', 'Low', 'Close'
adj_ratio = hist_data['Adj Close'] / hist_data['Close']
data = hist_data.copy()
for item in price_list:
data[item] = hist_data[item] * adj_ratio
data['Adj_Ratio'] = adj_ratio
del data['Adj Close']
return data
def _calc_return_index(price_df):
"""
Return a returns index from a input price df or series. Initial value
(typically NaN) is set to 1.
"""
df = price_df.pct_change().add(1).cumprod()
mask = df.ix[1].notnull() & df.ix[0].isnull()
df.ix[0][mask] = 1
# Check for first stock listings after starting date of index in ret_index
# If True, find first_valid_index and set previous entry to 1.
if (~mask).any():
for sym in mask.index[~mask]:
tstamp = df[sym].first_valid_index()
t_idx = df.index.get_loc(tstamp) - 1
df[sym].ix[t_idx] = 1
return df
_YAHOO_COMPONENTS_URL = 'http://download.finance.yahoo.com/d/quotes.csv?'
def get_components_yahoo(idx_sym):
"""
Returns DataFrame containing list of component information for
index represented in idx_sym from yahoo. Includes component symbol
(ticker), exchange, and name.
Parameters
----------
idx_sym : str
Stock index symbol
Examples:
'^DJI' (Dow Jones Industrial Average)
'^NYA' (NYSE Composite)
'^IXIC' (NASDAQ Composite)
See: http://finance.yahoo.com/indices for other index symbols
Returns
-------
idx_df : DataFrame
"""
stats = 'snx'
# URL of form:
# http://download.finance.yahoo.com/d/quotes.csv?s=@%5EIXIC&f=snxl1d1t1c1ohgv
url = _YAHOO_COMPONENTS_URL + 's={0}&f={1}&e=.csv&h={2}'
idx_mod = idx_sym.replace('^', '@%5E')
url_str = url.format(idx_mod, stats, 1)
idx_df = DataFrame()
mask = [True]
comp_idx = 1
# LOOP across component index structure,
# break when no new components are found
while True in mask:
url_str = url.format(idx_mod, stats, comp_idx)
with urlopen(url_str) as resp:
raw = resp.read()
lines = raw.decode('utf-8').strip().strip('"').split('"\r\n"')
lines = [line.strip().split('","') for line in lines]
temp_df = DataFrame(lines, columns=['ticker', 'name', 'exchange'])
temp_df = temp_df.drop_duplicates()
temp_df = temp_df.set_index('ticker')
mask = ~temp_df.index.isin(idx_df.index)
comp_idx = comp_idx + 50
idx_df = idx_df.append(temp_df[mask])
return idx_df
def _dl_mult_symbols(symbols, start, end, interval, chunksize, retry_count, pause,
method):
stocks = {}
failed = []
passed = []
for sym_group in _in_chunks(symbols, chunksize):
for sym in sym_group:
try:
stocks[sym] = method(sym, start, end, interval, retry_count, pause)
passed.append(sym)
except IOError:
warnings.warn('Failed to read symbol: {0!r}, replacing with '
'NaN.'.format(sym), SymbolWarning)
failed.append(sym)
if len(passed) == 0:
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
try:
if len(stocks) > 0 and len(failed) > 0 and len(passed) > 0:
df_na = stocks[passed[0]].copy()
df_na[:] = np.nan
for sym in failed:
stocks[sym] = df_na
return Panel(stocks).swapaxes('items', 'minor')
except AttributeError:
# cannot construct a panel with just 1D nans indicating no data
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
_source_functions = {'google': _get_hist_google, 'yahoo': _get_hist_yahoo}
def _get_data_from(symbols, start, end, interval, retry_count, pause, adjust_price,
ret_index, chunksize, source):
src_fn = _source_functions[source]
# If a single symbol, (e.g., 'GOOG')
if isinstance(symbols, (compat.string_types, int)):
hist_data = src_fn(symbols, start, end, interval, retry_count, pause)
# Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
elif isinstance(symbols, DataFrame):
hist_data = _dl_mult_symbols(symbols.index, start, end, interval, chunksize,
retry_count, pause, src_fn)
else:
hist_data = _dl_mult_symbols(symbols, start, end, interval, chunksize,
retry_count, pause, src_fn)
if source.lower() == 'yahoo':
if ret_index:
hist_data['Ret_Index'] = _calc_return_index(hist_data['Adj Close'])
if adjust_price:
hist_data = _adjust_prices(hist_data)
return hist_data
def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25, interval='d'):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Yahoo! Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default 3
Number of times to retry query request.
pause : int, default 0
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
adjust_price : bool, default False
If True, adjusts all prices in hist_data ('Open', 'High', 'Low',
'Close') based on 'Adj Close' price. Adds 'Adj_Ratio' column and drops
'Adj Close'.
ret_index : bool, default False
If True, includes a simple return index 'Ret_Index' in hist_data.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
interval : string, default 'd'
Time interval code, valid values are 'd' for daily, 'w' for weekly,
'm' for monthly and 'v' for dividend.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
if interval not in ['d', 'w', 'm', 'v']:
raise ValueError("Invalid interval: valid values are 'd', 'w', 'm' and 'v'")
return _get_data_from(symbols, start, end, interval, retry_count, pause,
adjust_price, ret_index, chunksize, 'yahoo')
def get_data_google(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Google Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default 3
Number of times to retry query request.
pause : int, default 0
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
return _get_data_from(symbols, start, end, None, retry_count, pause,
adjust_price, ret_index, chunksize, 'google')
_FRED_URL = "http://research.stlouisfed.org/fred2/series/"
def get_data_fred(name, start=dt.datetime(2010, 1, 1),
end=dt.datetime.today()):
"""
Get data for the given name from the St. Louis FED (FRED).
Date format is datetime
Returns a DataFrame.
If multiple names are passed for "series" then the index of the
DataFrame is the outer join of the indicies of each series.
"""
start, end = _sanitize_dates(start, end)
if not is_list_like(name):
names = [name]
else:
names = name
urls = [_FRED_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
n in names]
def fetch_data(url, name):
with urlopen(url) as resp:
data = read_csv(resp, index_col=0, parse_dates=True,
header=None, skiprows=1, names=["DATE", name],
na_values='.')
try:
return data.truncate(start, end)
except KeyError:
if data.ix[3].name[7:12] == 'Error':
raise IOError("Failed to get the data. Check that {0!r} is "
"a valid FRED series.".format(name))
raise
df = concat([fetch_data(url, n) for url, n in zip(urls, names)],
axis=1, join='outer')
return df
_FAMAFRENCH_URL = 'http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp'
def get_data_famafrench(name):
# path of zip files
zip_file_path = '{0}/{1}_TXT.zip'.format(_FAMAFRENCH_URL, name)
with urlopen(zip_file_path) as url:
raw = url.read()
with tempfile.TemporaryFile() as tmpf:
tmpf.write(raw)
with ZipFile(tmpf, 'r') as zf:
data = zf.open(zf.namelist()[0]).readlines()
line_lengths = np.array(lmap(len, data))
file_edges = np.where(line_lengths == 2)[0]
datasets = {}
edges = zip(file_edges + 1, file_edges[1:])
for i, (left_edge, right_edge) in enumerate(edges):
dataset = [d.split() for d in data[left_edge:right_edge]]
if len(dataset) > 10:
ncol_raw = np.array(lmap(len, dataset))
ncol = np.median(ncol_raw)
header_index = np.where(ncol_raw == ncol - 1)[0][-1]
header = dataset[header_index]
ds_header = dataset[header_index + 1:]
# to ensure the header is unique
header = ['{0} {1}'.format(j, hj) for j, hj in enumerate(header,
start=1)]
index = np.array([d[0] for d in ds_header], dtype=int)
dataset = np.array([d[1:] for d in ds_header], dtype=float)
datasets[i] = DataFrame(dataset, index, columns=header)
return datasets
# Items needed for options class
CUR_MONTH = dt.datetime.now().month
CUR_YEAR = dt.datetime.now().year
CUR_DAY = dt.datetime.now().day
def _two_char(s):
return '{0:0>2}'.format(s)
class Options(object):
"""
***Experimental***
This class fetches call/put data for a given stock/expiry month.
It is instantiated with a string representing the ticker symbol.
The class has the following methods:
get_options_data:(month, year, expiry)
get_call_data:(month, year, expiry)
get_put_data: (month, year, expiry)
get_near_stock_price(opt_frame, above_below)
get_all_data(call, put)
get_forward_data(months, call, put) (deprecated)
Examples
--------
# Instantiate object with ticker
>>> aapl = Options('aapl', 'yahoo')
# Fetch next expiry call data
>>> calls = aapl.get_call_data()
# Can now access aapl.calls instance variable
>>> aapl.calls
# Fetch next expiry put data
>>> puts = aapl.get_put_data()
# Can now access aapl.puts instance variable
>>> aapl.puts
# cut down the call data to be 3 below and 3 above the stock price.
>>> cut_calls = aapl.get_near_stock_price(call=True, above_below=3)
# Fetch call and put data with expiry from now to 8 months out
>>> forward_data = aapl.get_forward_data(8, call=True, put=True)
# Fetch all call and put data
>>> all_data = aapl.get_all_data()
"""
_TABLE_LOC = {'calls': 1, 'puts': 2}
_OPTIONS_BASE_URL = 'http://finance.yahoo.com/q/op?s={sym}'
_FINANCE_BASE_URL = 'http://finance.yahoo.com'
def __init__(self, symbol, data_source=None):
""" Instantiates options_data with a ticker saved as symbol """
self.symbol = symbol.upper()
if data_source is None:
warnings.warn("Options(symbol) is deprecated, use Options(symbol,"
" data_source) instead", FutureWarning, stacklevel=2)
data_source = "yahoo"
if data_source != "yahoo":
raise NotImplementedError("currently only yahoo supported")
def get_options_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
calls and puts. See the following example:
>>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_options() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
Also note that aapl.calls and appl.puts will always be the calls
and puts for the next expiry. If the user calls this method with
a different expiry, the ivar will be named callsYYMMDD or putsYYMMDD,
where YY, MM and DD are, respectively, two digit representations of
the year, month and day for the expiry of the options.
"""
return concat([f(month, year, expiry)
for f in (self.get_put_data,
self.get_call_data)]).sortlevel()
def _get_option_frames_from_yahoo(self, expiry):
url = self._yahoo_url_from_expiry(expiry)
option_frames = self._option_frames_from_url(url)
frame_name = '_frames' + self._expiry_to_string(expiry)
setattr(self, frame_name, option_frames)
return option_frames
@staticmethod
def _expiry_to_string(expiry):
m1 = _two_char(expiry.month)
d1 = _two_char(expiry.day)
return str(expiry.year)[-2:] + m1 + d1
def _yahoo_url_from_expiry(self, expiry):
try:
expiry_links = self._expiry_links
except AttributeError:
_, expiry_links = self._get_expiry_dates_and_links()
return self._FINANCE_BASE_URL + expiry_links[expiry]
def _option_frames_from_url(self, url):
frames = read_html(url)
nframes = len(frames)
frames_req = max(self._TABLE_LOC.values())
if nframes < frames_req:
raise RemoteDataError("%s options tables found (%s expected)" % (nframes, frames_req))
if not hasattr(self, 'underlying_price'):
try:
self.underlying_price, self.quote_time = self._underlying_price_and_time_from_url(url)
except IndexError:
self.underlying_price, self.quote_time = np.nan, np.nan
calls = frames[self._TABLE_LOC['calls']]
puts = frames[self._TABLE_LOC['puts']]
calls = self._process_data(calls, 'call')
puts = self._process_data(puts, 'put')
return {'calls': calls, 'puts': puts}
def _underlying_price_and_time_from_url(self, url):
root = self._parse_url(url)
underlying_price = self._underlying_price_from_root(root)
quote_time = self._quote_time_from_root(root)
return underlying_price, quote_time
@staticmethod
def _underlying_price_from_root(root):
underlying_price = root.xpath('.//*[@class="time_rtq_ticker Fz-30 Fw-b"]')[0]\
.getchildren()[0].text
underlying_price = underlying_price.replace(',', '') #GH11
try:
underlying_price = float(underlying_price)
except ValueError:
underlying_price = np.nan
return underlying_price
@staticmethod
def _quote_time_from_root(root):
#Gets the time of the quote, note this is actually the time of the underlying price.
try:
quote_time_text = root.xpath('.//*[@class="time_rtq Fz-m"]')[0].getchildren()[1].getchildren()[0].text
##TODO: Enable timezone matching when strptime can match EST with %Z
quote_time_text = quote_time_text.split(' ')[0]
quote_time = dt.datetime.strptime(quote_time_text, "%I:%M%p")
quote_time = quote_time.replace(year=CUR_YEAR, month=CUR_MONTH, day=CUR_DAY)
except ValueError:
quote_time = np.nan
return quote_time
def _get_option_data(self, expiry, name):
frame_name = '_frames' + self._expiry_to_string(expiry)
try:
frames = getattr(self, frame_name)
except AttributeError:
frames = self._get_option_frames_from_yahoo(expiry)
option_data = frames[name]
if expiry != self.expiry_dates[0]:
name += self._expiry_to_string(expiry)
setattr(self, name, option_data)
return option_data
def get_call_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
call_data: pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
calls and puts. See the following example:
>>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_call_data() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
Also note that aapl.calls will always be the calls for the next
expiry. If the user calls this method with a different month
or year, the ivar will be named callsYYMMDD where YY, MM and DD are,
respectively, two digit representations of the year, month and day
for the expiry of the options.
"""
expiry = self._try_parse_dates(year, month, expiry)
return self._get_data_in_date_range(expiry, call=True, put=False)
def get_put_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
put_data: pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
puts. See the following example:
>>> aapl = Options('aapl') # Create object
>>> aapl.puts # will give an AttributeError
>>> aapl.get_put_data() # Get data and set ivars
>>> aapl.puts # Doesn't throw AttributeError
return self.__setattr__(self, str(str(x) + str(y)))
Also note that aapl.puts will always be the puts for the next
expiry. If the user calls this method with a different month
or year, the ivar will be named putsYYMMDD where YY, MM and DD are,
respectively, two digit representations of the year, month and day
for the expiry of the options.
"""
expiry = self._try_parse_dates(year, month, expiry)
return self._get_data_in_date_range(expiry, put=True, call=False)
def get_near_stock_price(self, above_below=2, call=True, put=False,
month=None, year=None, expiry=None):
"""
***Experimental***
Returns a data frame of options that are near the current stock price.
Parameters
----------
above_below : number, int, optional (default=2)
The number of strike prices above and below the stock price that
should be taken
call : bool
Tells the function whether or not it should be using calls
put : bool
Tells the function weather or not it should be using puts
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
chopped: DataFrame
The resultant DataFrame chopped down to be 2 * above_below + 1 rows
desired. If there isn't data as far out as the user has asked for
then
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
expiry = self._try_parse_dates(year, month, expiry)
data = self._get_data_in_date_range(expiry, call=call, put=put)
return self.chop_data(data, above_below, self.underlying_price)
def chop_data(self, df, above_below=2, underlying_price=None):
"""Returns a data frame only options that are near the current stock price."""
if not underlying_price:
try:
underlying_price = self.underlying_price
except AttributeError:
underlying_price = np.nan
max_strike = max(df.index.get_level_values('Strike'))
min_strike = min(df.index.get_level_values('Strike'))
if not np.isnan(underlying_price) and min_strike < underlying_price < max_strike:
start_index = np.where(df.index.get_level_values('Strike')
> underlying_price)[0][0]
get_range = slice(start_index - above_below,
start_index + above_below + 1)
df = df[get_range].dropna(how='all')
return df
def _try_parse_dates(self, year, month, expiry):
"""
Validates dates provided by user. Ensures the user either provided both a month and a year or an expiry.
Parameters
----------
year : int
Calendar year
month : int
Calendar month
expiry : date-like or convertible, (preferred)
Expiry date
Returns
-------
list of expiry dates (datetime.date)
"""
#Checks if the user gave one of the month or the year but not both and did not provide an expiry:
if (month is not None and year is None) or (month is None and year is not None) and expiry is None:
msg = "You must specify either (`year` and `month`) or `expiry` " \
"or none of these options for the next expiry."
raise ValueError(msg)
if expiry is not None:
if hasattr(expiry, '__iter__'):
expiry = [self._validate_expiry(exp) for exp in expiry]
else:
expiry = [self._validate_expiry(expiry)]
if len(expiry) == 0:
raise ValueError('No expiries available for given input.')
elif year is None and month is None:
#No arguments passed, provide next expiry
year = CUR_YEAR
month = CUR_MONTH
expiry = dt.date(year, month, 1)
expiry = [self._validate_expiry(expiry)]
else:
#Year and month passed, provide all expiries in that month
expiry = [expiry for expiry in self.expiry_dates if expiry.year == year and expiry.month == month]
if len(expiry) == 0:
raise ValueError('No expiries available in %s-%s' % (year, month))
return expiry
def _validate_expiry(self, expiry):
"""Ensures that an expiry date has data available on Yahoo
If the expiry date does not have options that expire on that day, return next expiry"""
expiry_dates = self.expiry_dates
expiry = to_datetime(expiry)
if hasattr(expiry, 'date'):
expiry = expiry.date()
if expiry in expiry_dates:
return expiry
else:
index = DatetimeIndex(expiry_dates).order()
return index[index.date >= expiry][0].date()
def get_forward_data(self, months, call=True, put=False, near=False,
above_below=2):
"""
***Experimental***
Gets either call, put, or both data for months starting in the current
month and going out in the future a specified amount of time.
Parameters
----------
months : number, int
How many months to go out in the collection of the data. This is
inclusive.
call : bool, optional (default=True)
Whether or not to collect data for call options
put : bool, optional (default=False)
Whether or not to collect data for put options.
near : bool, optional (default=False)
Whether this function should get only the data near the
current stock price. Uses Options.get_near_stock_price
above_below : number, int, optional (default=2)
The number of strike prices above and below the stock price that
should be taken if the near option is set to True
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
warnings.warn("get_forward_data() is deprecated", FutureWarning,
stacklevel=2)
end_date = dt.date.today() + MonthEnd(months)
dates = (date for date in self.expiry_dates if date <= end_date.date())
data = self._get_data_in_date_range(dates, call=call, put=put)
if near:
data = self.chop_data(data, above_below=above_below)
return data
def get_all_data(self, call=True, put=True):
"""
***Experimental***
Gets either call, put, or both data for all available months starting
in the current month.
Parameters
----------
call : bool, optional (default=True)
Whether or not to collect data for call options
put : bool, optional (default=True)
Whether or not to collect data for put options.
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
try:
expiry_dates = self.expiry_dates
except AttributeError:
expiry_dates, _ = self._get_expiry_dates_and_links()
return self._get_data_in_date_range(dates=expiry_dates, call=call, put=put)
def _get_data_in_date_range(self, dates, call=True, put=True):
to_ret = Series({'calls': call, 'puts': put})
to_ret = to_ret[to_ret].index
data = []
for name in to_ret:
for expiry_date in dates:
nam = name + self._expiry_to_string(expiry_date)
try: # Try to access on the instance
frame = getattr(self, nam)
except AttributeError:
frame = self._get_option_data(expiry=expiry_date, name=name)
data.append(frame)
return concat(data).sortlevel()
@property
def expiry_dates(self):
"""
Returns a list of available expiry dates
"""
try:
expiry_dates = self._expiry_dates
except AttributeError:
expiry_dates, _ = self._get_expiry_dates_and_links()
return expiry_dates
def _get_expiry_dates_and_links(self):
"""
Gets available expiry dates.
Returns
-------
Tuple of:
List of datetime.date objects
Dict of datetime.date objects as keys and corresponding links
"""
url = self._OPTIONS_BASE_URL.format(sym=self.symbol)
root = self._parse_url(url)
try:
links = root.xpath('//*[@id="options_menu"]/form/select/option')
except IndexError:
raise RemoteDataError('Expiry dates not available')
expiry_dates = [dt.datetime.strptime(element.text, "%B %d, %Y").date() for element in links]
links = [element.attrib['data-selectbox-link'] for element in links]
if len(expiry_dates) == 0:
raise RemoteDataError('Data not available')
expiry_links = dict(zip(expiry_dates, links))
self._expiry_links = expiry_links
self._expiry_dates = expiry_dates
return expiry_dates, expiry_links
def _parse_url(self, url):
"""
Downloads and parses a URL, returns xml root.
"""
try:
from lxml.html import parse
except ImportError:
raise ImportError("Please install lxml if you want to use the "
"{0!r} class".format(self.__class__.__name__))
try:
doc = parse(url)
except _network_error_classes:
raise RemoteDataError("Unable to parse URL "
"{0!r}".format(url))
else:
root = doc.getroot()
if root is None:
raise RemoteDataError("Parsed URL {0!r} has no root"
"element".format(url))
return root
def _process_data(self, frame, type):
"""
Adds columns for Expiry, IsNonstandard (ie: deliverable is not 100 shares)
and Tag (the tag indicating what is actually deliverable, None if standard).
"""
frame.columns = ['Strike', 'Symbol', 'Last', 'Bid', 'Ask', 'Chg', 'PctChg', 'Vol', 'Open_Int', 'IV']
frame["Rootexp"] = frame.Symbol.str[0:-9]
frame["Root"] = frame.Rootexp.str[0:-6]
frame["Expiry"] = to_datetime(frame.Rootexp.str[-6:])
#Removes dashes in equity ticker to map to option ticker.
#Ex: BRK-B to BRKB140517C00100000
frame["IsNonstandard"] = frame['Root'] != self.symbol.replace('-', '')
del frame["Rootexp"]
frame["Underlying"] = self.symbol
try:
frame['Underlying_Price'] = self.underlying_price
frame["Quote_Time"] = self.quote_time
except AttributeError:
frame['Underlying_Price'] = np.nan
frame["Quote_Time"] = np.nan
frame.rename(columns={'Open Int': 'Open_Int'}, inplace=True)
frame['Type'] = type
frame.set_index(['Strike', 'Expiry', 'Type', 'Symbol'], inplace=True)
return frame
|
py | 1a4c45807610aea3316f75431d9b7d722cc77970 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_OES_geometry_shader'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_OES_geometry_shader',error_checker=_errors._error_checker)
GL_FIRST_VERTEX_CONVENTION_OES=_C('GL_FIRST_VERTEX_CONVENTION_OES',0x8E4D)
GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES=_C('GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES',0x8DA7)
GL_FRAMEBUFFER_DEFAULT_LAYERS_OES=_C('GL_FRAMEBUFFER_DEFAULT_LAYERS_OES',0x9312)
GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES=_C('GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES',0x8DA8)
GL_GEOMETRY_LINKED_INPUT_TYPE_OES=_C('GL_GEOMETRY_LINKED_INPUT_TYPE_OES',0x8917)
GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES=_C('GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES',0x8918)
GL_GEOMETRY_LINKED_VERTICES_OUT_OES=_C('GL_GEOMETRY_LINKED_VERTICES_OUT_OES',0x8916)
GL_GEOMETRY_SHADER_BIT_OES=_C('GL_GEOMETRY_SHADER_BIT_OES',0x00000004)
GL_GEOMETRY_SHADER_INVOCATIONS_OES=_C('GL_GEOMETRY_SHADER_INVOCATIONS_OES',0x887F)
GL_GEOMETRY_SHADER_OES=_C('GL_GEOMETRY_SHADER_OES',0x8DD9)
GL_LAST_VERTEX_CONVENTION_OES=_C('GL_LAST_VERTEX_CONVENTION_OES',0x8E4E)
GL_LAYER_PROVOKING_VERTEX_OES=_C('GL_LAYER_PROVOKING_VERTEX_OES',0x825E)
GL_LINES_ADJACENCY_OES=_C('GL_LINES_ADJACENCY_OES',0x000A)
GL_LINE_STRIP_ADJACENCY_OES=_C('GL_LINE_STRIP_ADJACENCY_OES',0x000B)
GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES=_C('GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES',0x8A32)
GL_MAX_FRAMEBUFFER_LAYERS_OES=_C('GL_MAX_FRAMEBUFFER_LAYERS_OES',0x9317)
GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES=_C('GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES',0x92D5)
GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES=_C('GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES',0x92CF)
GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES=_C('GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES',0x90CD)
GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES=_C('GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES',0x9123)
GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES=_C('GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES',0x9124)
GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES=_C('GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES',0x8DE0)
GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES=_C('GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES',0x8E5A)
GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES=_C('GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES',0x90D7)
GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES=_C('GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES',0x8C29)
GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES=_C('GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES',0x8DE1)
GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES=_C('GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES',0x8A2C)
GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES=_C('GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES',0x8DDF)
GL_PRIMITIVES_GENERATED_OES=_C('GL_PRIMITIVES_GENERATED_OES',0x8C87)
GL_REFERENCED_BY_GEOMETRY_SHADER_OES=_C('GL_REFERENCED_BY_GEOMETRY_SHADER_OES',0x9309)
GL_TRIANGLES_ADJACENCY_OES=_C('GL_TRIANGLES_ADJACENCY_OES',0x000C)
GL_TRIANGLE_STRIP_ADJACENCY_OES=_C('GL_TRIANGLE_STRIP_ADJACENCY_OES',0x000D)
GL_UNDEFINED_VERTEX_OES=_C('GL_UNDEFINED_VERTEX_OES',0x8260)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint)
def glFramebufferTextureOES(target,attachment,texture,level):pass
|
py | 1a4c45c130070c48436b7a7dfea7be39c73cf345 | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
import routes, models
if __name__ == '__main__':
app.run()
|
py | 1a4c45e713f0f38361d02a09c7db4db8737900a4 | # -*- coding: utf-8 -*-
# @createTime : 2019/10/22 20:59
# @author : Huanglg
# @fileName: BOM.py
# @email: [email protected]
import time
from mesService.lib.OracleLib.OracleDBUtil import Oracle
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
func(*args, **kw)
print('current Function [%s] run time is %.2f' % (func.__name__ ,time.time() - local_time))
return wrapper
@print_run_time
def test_oracle():
oracle = Oracle()
product_sql = """select id productid, tt.medium productdesc from product p
left join TEXT_TRANSLATION tt on p.textid = tt.textid and tt.LANGUAGEID = 2052"""
products = oracle.query(product_sql)
print(products)
product_component_sql = """select PC.COMPONENTID,TT.MEDIUM,PC.PRODUCTID PRODUCTID,C.PRODUCTID CPRODUCTID from PRODUCT_COMPONENT PC
left join COMPONENT C on C.ID = PC.COMPONENTID
left join PRODUCT P on P.ID = C.PRODUCTID
left join TEXT_TRANSLATION TT on TT.TEXTID = P.TEXTID and TT.LANGUAGEID = 2052
where PC.PRODUCTID={productid}"""
for product in products:
productid = product['productid']
sql = product_component_sql.format(productid=productid)
product_component = oracle.query(sql)
print(product_component)
if __name__ == '__main__':
test_oracle()
|
py | 1a4c4625461e48f286b05e58a3bdf1a439e10c30 | # coding=utf-8
from distutils.util import convert_path
import os
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
from pip.req import parse_requirements
import uuid
import sys
AUTHOR = 'Nekmo'
EMAIL = '[email protected]'
PLUGIN_NAME = 'userscommands'
DESCRIPTION = ''
WEBSITE = 'http://nekmo.com'
DOWNLOAD_URL = ''
STATUS_LEVEL = 1 # 1:Planning 2:Pre-Alpha 3:Alpha 4:Beta 5:Production/Stable 6:Mature 7:Inactive
CLASSIFIERS = [
'License :: OSI Approved :: MIT License',
# 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# 'License :: OSI Approved :: BSD License',
]
ROOT_INCLUDE = ['requirements.txt', 'VERSION', 'LICENSE.txt']
SETUP_REQUIRES = ['pip']
##############################################################################
# find_package_data is an Ian Bicking creation.
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append(
(fn, prefix + name + '/', package, only_in_packages)
)
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
##############################################################################
__dir__ = os.path.abspath(os.path.dirname(__file__))
def get_url(dep):
if hasattr(dep, 'url'):
return dep.url
if dep.link is None:
return
return dep.link.url
VERSION = open('VERSION').read().replace('\n', '') # Please, change VERSION file
requirements = parse_requirements('requirements.txt', session=uuid.uuid1()) # Please, change requirements.txt file
INSTALL_REQUIRES = [str(ir.req) for ir in requirements if not get_url(ir)]
try:
LONG_DESCRIPTION = open('README', 'rt').read() # Please, change README file
except IOError:
LONG_DESCRIPTION = ''
if not DESCRIPTION:
DESCRIPTION = '%s, a plugin for NekBot, a modular and multiprotocol bot written in Python.' % PLUGIN_NAME
STATUS_NAME = ['Planning', 'Pre-Alpha', 'Alpha', 'Beta',
'Production/Stable', 'Mature', 'Inactive'][STATUS_LEVEL - 1]
packages = find_packages(__dir__)
# Prevent include symbolic links
for package_name in tuple(packages):
path = os.path.join(__dir__, package_name.replace('.', '/'))
if not os.path.exists(path):
continue
if not os.path.islink(path):
continue
packages.remove(package_name)
setup(
name='nekbot.plugins.%s' % PLUGIN_NAME,
namespace_packages=['nekbot.plugins'],
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=WEBSITE,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS.extend([
'Development Status :: %i - %s' % (STATUS_LEVEL, STATUS_NAME),
'Intended Audience :: Developers',
'Environment :: Console',
'Topic :: Communications :: Chat',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'Topic :: Communications :: Conferencing',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]),
platforms=['linux'],
scripts=[
# 'scripts/myscript.sh'
],
provides=['nekbot.plugins.%s' % PLUGIN_NAME],
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
packages=['nekbot', 'nekbot.plugins', 'nekbot.plugins.%s' % PLUGIN_NAME],
include_package_data=True,
keywords=['nekbot', 'bot', PLUGIN_NAME, 'plugins', 'chat'],
entry_points={
},
zip_safe=False,
) |
py | 1a4c477f99138c81c60682dab86bd61f209b7bce | """ Run with pytest """
def test_simple_cases(testdir):
""" Verify a simple passing test and a simple failing test.
The failing test is marked as xfail to have it skipped. """
testdir.makepyfile(
"""
import pytest
from seleniumbase import BaseCase
class MyTestCase(BaseCase):
def test_passing(self):
self.assert_equal('yes', 'yes')
@pytest.mark.xfail
def test_failing(self):
self.assert_equal('yes', 'no')
"""
)
result = testdir.inline_run("--headless", "--rs")
assert result.matchreport("test_passing").passed
assert result.matchreport("test_failing").skipped
def test_basecase(testdir):
testdir.makepyfile(
"""
from seleniumbase import BaseCase
class MyTest(BaseCase):
def test_basecase(self):
self.open("data:text/html,<p>Hello<br><input></p>")
self.assert_element("html > body") # selector
self.assert_text("Hello", "body p") # text, selector
self.type("input", "Goodbye") # selector, text
self.click("body p") # selector
"""
)
result = testdir.inline_run("--headless")
assert result.matchreport("test_basecase").passed
def test_sb_fixture(testdir):
testdir.makepyfile(
"""
def test_sb_fixture(sb):
sb.open("data:text/html,<p>Hello<br><input></p>")
sb.assert_element("html > body") # selector
sb.assert_text("Hello", "body p") # text, selector
sb.type("input", "Goodbye") # selector, text
sb.click("body p") # selector
"""
)
result = testdir.inline_run("--headless")
assert result.matchreport("test_sb_fixture").passed
def test_request_sb_fixture(testdir):
testdir.makepyfile(
"""
def test_request_sb_fixture(request):
sb = request.getfixturevalue('sb')
sb.open("data:text/html,<p>Hello<br><input></p>")
sb.assert_element("html > body") # selector
sb.assert_text("Hello", "body p") # text, selector
sb.type("input", "Goodbye") # selector, text
sb.click("body p") # selector
sb.tearDown()
"""
)
result = testdir.inline_run("--headless")
assert result.matchreport("test_request_sb_fixture").passed
|
py | 1a4c4855d183b572b67d94124fe074d6341d8594 | import time
from PySide2 import QtWidgets, QtCore
### global classes ############################################################
class SimpleGlobal:
def __init__(self, initial_value=None):
self.value = initial_value
def read(self):
return self.value
def write(self, value):
self.value = value
debug = SimpleGlobal(False)
class PollTimer(SimpleGlobal):
def connect_to_timeout(self, slot):
QtWidgets.QAction.connect(self.value, QtCore.SIGNAL("timeout()"), slot)
poll_timer = PollTimer()
slack_poll_timer = PollTimer()
class logger: # must come before other globals
def __init__(self):
pass
def load(self):
import yaqc_cmds.project.logging_handler as logging_handler
self.value = logging_handler.log
if debug.read():
self.log("info", "Debug", "Yaqc_cmds is in debug mode")
def log(self, level, name, message="", origin="name"):
"""
wrapper of logging method for Yaqc_cmds
accepts strings
levels: debug, info, warning, error, critical
"""
self.value(level, name, message, origin)
logger = logger()
### other globals #############################################################
# alphabetical
app = SimpleGlobal()
colors_dict = SimpleGlobal()
hardware_advanced_box = SimpleGlobal()
hardware_initialized = SimpleGlobal(False)
google_drive_control = SimpleGlobal()
google_drive_enabled = SimpleGlobal()
class hardware_waits:
def __init__(self):
"""
holds value, a list of hardware wait_until_still methods
"""
self.value = []
def add(self, method):
self.value.append(method)
def wait(self):
for method in self.value:
method()
hardware_waits = hardware_waits()
class hardware_widget(SimpleGlobal):
def __init__(self, initial_value=None):
super().__init__(initial_value)
self.number_of_widgets = 0
def write(self, value):
super().write(value)
self.value.setLayout(QtWidgets.QVBoxLayout())
self.value.layout().setMargin(5)
self.value.layout().addStretch(1)
def add_to(self, widget):
self.value.layout().takeAt(self.number_of_widgets)
self.value.layout().addWidget(widget)
self.number_of_widgets += 1
self.value.layout().addStretch(1)
hardware_widget = hardware_widget()
main_thread = SimpleGlobal(QtCore.QThread.currentThread())
main_window = SimpleGlobal()
scan_thread = SimpleGlobal()
class QueueControl(QtCore.QObject):
def __init__(self):
self.value = None
self.widgets_to_disable = []
def read(self):
return self.value
def write(self, value):
for widget in self.widgets_to_disable:
try:
widget.setDisabled(value)
except RuntimeError:
# widget has been deleted, probably
self.widgets_to_disable.remove(widget)
self.value = value
main_window.read().queue_control.emit()
def disable_when_true(self, widget):
self.widgets_to_disable.append(widget)
queue_control = QueueControl()
class progress_bar:
def __init__(self):
self.value = None
def write(self, value):
self.value = value
def give_time_display_elements(self, time_elapsed, time_remaining):
self.time_elapsed = time_elapsed
self.time_remaining = time_remaining
def begin_new_scan_timer(self):
self.start_time = time.time()
def set_fraction(self, fraction):
self.value.setValue(fraction * 100)
# time elapsed
time_elapsed = time.time() - self.start_time
m, s = divmod(time_elapsed, 60)
h, m = divmod(m, 60)
self.time_elapsed.setText("%02d:%02d:%02d" % (h, m, s))
# time remaining
if fraction == 0:
self.time_remaining.setText("??:??:??")
else:
time_remaining = (time_elapsed / fraction) - time_elapsed
m, s = divmod(time_remaining, 60)
h, m = divmod(m, 60)
self.time_remaining.setText("%02d:%02d:%02d" % (h, m, s))
progress_bar = progress_bar()
class shutdown(SimpleGlobal):
"""
holds the reference of MainWindow.shutdown Qt signal
during startup, add your shutdown method to this object using the 'add_method' method it will be called upon shutdown.
your method must not have any arguments
"""
def __init__(self, initial_value=None):
super().__init__(initial_value)
self.methods = []
def add_method(self, method):
self.methods.append(method)
def fire(self):
for method in self.methods:
method()
main_window.read().close()
shutdown = shutdown()
slack_control = SimpleGlobal()
slack_enabled = SimpleGlobal()
system_name = SimpleGlobal()
version = SimpleGlobal()
|
py | 1a4c49406d60e4f82e35ae88975d750b4e33def8 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkwaf_openapi.endpoint import endpoint_data
class DescribeInstanceInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'waf-openapi', '2019-09-10', 'DescribeInstanceInfo','waf')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_InstanceSource(self):
return self.get_query_params().get('InstanceSource')
def set_InstanceSource(self,InstanceSource):
self.add_query_param('InstanceSource',InstanceSource) |
py | 1a4c49e9d6793761a19b68869b0e1cf477a0a180 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Lookup ZeuS hosts by hostname"
class Input:
HOST = "host"
class Output:
AS_NAME = "as_name"
AS_NUM = "as_num"
BINARY_URLS = "binary_urls"
CONFIG_URLS = "config_urls"
COUNTRY = "country"
DATE_ADDED = "date_added"
DOMAIN_HISTORY = "domain_history"
DROP_URLS = "drop_urls"
FAKE_URLS = "fake_urls"
FOUND = "found"
HOST = "host"
IP = "ip"
LAST_CHECKED = "last_checked"
LAST_UPDATED = "last_updated"
LEVEL = "level"
MALWARE = "malware"
NAMESERVERS = "nameservers"
REGISTRAR = "registrar"
SBL = "sbl"
STATUS = "status"
UPTIME = "uptime"
class HostInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"host": {
"type": "string",
"title": "Hostname",
"description": "Host to search for",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class HostOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"as_name": {
"type": "string",
"title": "AS Name",
"description": "Autonomous system name",
"order": 9
},
"as_num": {
"type": "string",
"title": "AS Number",
"description": "Autonomous system number",
"order": 8
},
"binary_urls": {
"type": "array",
"title": "Binary URLs",
"description": "ZeuS binary URLs on this C\\u0026C",
"items": {
"$ref": "#/definitions/binary_url"
},
"order": 18
},
"config_urls": {
"type": "array",
"title": "Config URLs",
"description": "ZeuS config URLs on this C\\u0026C",
"items": {
"$ref": "#/definitions/config_url"
},
"order": 17
},
"country": {
"type": "string",
"title": "Country",
"description": "Country",
"order": 10
},
"date_added": {
"type": "string",
"title": "Date Added",
"displayType": "date",
"description": "Date added",
"format": "date-time",
"order": 14
},
"domain_history": {
"type": "array",
"title": "Domain History",
"description": "Domain history",
"items": {
"$ref": "#/definitions/domain_history"
},
"order": 21
},
"drop_urls": {
"type": "array",
"title": "Dropzones",
"description": "ZeuS drop URLs on this C\\u0026C",
"items": {
"$ref": "#/definitions/drop_url"
},
"order": 19
},
"fake_urls": {
"type": "array",
"title": "Fake URLs",
"description": "ZeuS fake URLs on this C\\u0026C",
"items": {
"$ref": "#/definitions/fake_url"
},
"order": 20
},
"found": {
"type": "boolean",
"title": "Results Found",
"description": "Results found",
"order": 1
},
"host": {
"type": "string",
"title": "Hostname",
"description": "Hostname",
"order": 6
},
"ip": {
"type": "string",
"title": "IP Address",
"description": "IP Address",
"order": 3
},
"last_checked": {
"type": "string",
"title": "Last Checked",
"displayType": "date",
"description": "Last checked",
"format": "date-time",
"order": 15
},
"last_updated": {
"type": "string",
"title": "Last Updated",
"displayType": "date",
"description": "Last updated",
"format": "date-time",
"order": 16
},
"level": {
"type": "integer",
"title": "Level",
"description": "Level",
"order": 11
},
"malware": {
"type": "string",
"title": "Malware Name",
"description": "Malware name",
"order": 2
},
"nameservers": {
"type": "array",
"title": "Nameserver(s)",
"description": "Nameserver(s)",
"items": {
"type": "string"
},
"order": 13
},
"registrar": {
"type": "string",
"title": "Registrar",
"description": "Registrar",
"order": 12
},
"sbl": {
"type": "string",
"title": "SBL Number",
"description": "Spamhaus Block List number",
"order": 7
},
"status": {
"type": "string",
"title": "Status",
"description": "Status",
"order": 4
},
"uptime": {
"type": "string",
"title": "Uptime",
"description": "Uptime",
"order": 5
}
},
"definitions": {
"binary_url": {
"type": "object",
"title": "binary_url",
"properties": {
"anubis": {
"type": "string",
"title": "Anubis",
"description": "Anubis",
"order": 6
},
"date": {
"type": "string",
"title": "Date",
"description": "Date",
"order": 1
},
"file": {
"type": "string",
"title": "File Download",
"displayType": "bytes",
"description": "File download",
"format": "bytes",
"order": 9
},
"filesize": {
"type": "string",
"title": "Filesize",
"description": "Filesize",
"order": 4
},
"http_status": {
"type": "string",
"title": "HTTP Status",
"description": "HTTP status",
"order": 8
},
"md5": {
"type": "string",
"title": "MD5 Hash",
"description": "MD5 hash",
"order": 5
},
"status": {
"type": "string",
"title": "Status",
"description": "Status",
"order": 3
},
"url": {
"type": "string",
"title": "Binary URL",
"description": "Binary URL",
"order": 2
},
"virustotal": {
"type": "string",
"title": "Virsustotal",
"description": "Virustotal",
"order": 7
}
}
},
"config_url": {
"type": "object",
"title": "config_url",
"properties": {
"builder": {
"type": "string",
"title": "Builder",
"description": "Builder",
"order": 5
},
"date": {
"type": "string",
"title": "Date",
"displayType": "date",
"description": "Date",
"format": "date-time",
"order": 1
},
"file": {
"type": "string",
"title": "File Download",
"displayType": "bytes",
"description": "File download",
"format": "bytes",
"order": 9
},
"filesize": {
"type": "string",
"title": "Filesize",
"description": "Filesize",
"order": 6
},
"http_status": {
"type": "string",
"title": "HTTP Status",
"description": "HTTP status",
"order": 8
},
"md5": {
"type": "string",
"title": "MD5 Hash",
"description": "MD5 hash",
"order": 7
},
"status": {
"type": "string",
"title": "Status",
"description": "Status",
"order": 3
},
"url": {
"type": "string",
"title": "Config URL",
"description": "Config URL",
"order": 2
},
"version": {
"type": "string",
"title": "Version",
"description": "Version",
"order": 4
}
}
},
"domain_history": {
"type": "object",
"title": "domain_history",
"properties": {
"as_name": {
"type": "string",
"title": "AS Name",
"description": "Autonomous system name",
"order": 5
},
"as_num": {
"type": "string",
"title": "AS Num",
"description": "Autonomous system number",
"order": 4
},
"changedate": {
"type": "string",
"title": "Changedate",
"description": "Changedate",
"order": 1
},
"country": {
"type": "string",
"title": "Country",
"description": "Country",
"order": 6
},
"host": {
"type": "string",
"title": "Host",
"description": "Hostname",
"order": 2
},
"ip": {
"type": "string",
"title": "IP",
"description": "IP address",
"order": 3
}
}
},
"drop_url": {
"type": "object",
"title": "drop_url",
"properties": {
"date": {
"type": "string",
"title": "Date",
"displayType": "date",
"description": "Date",
"format": "date-time",
"order": 1
},
"http_status": {
"type": "string",
"title": "HTTP Status",
"description": "HTTP status",
"order": 4
},
"status": {
"type": "string",
"title": "Status",
"description": "Status",
"order": 3
},
"url": {
"type": "string",
"title": "Drop URL",
"description": "Drop URL",
"order": 2
}
}
},
"fake_url": {
"type": "object",
"title": "fake_url",
"properties": {
"md5": {
"type": "string",
"title": "ZeuS Config MD5",
"description": "ZeuS config MD5",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Protocol",
"order": 3
},
"url": {
"type": "string",
"title": "Url",
"description": "Fake URL",
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
py | 1a4c4a43ec1b7a96997c4747714da2c65108539d | #Author: Thy H. Nguyen
import turtle
wn = turtle.Screen()
wn.bgcolor("#E0FFFF")
mom = turtle.Turtle()
mom.color("#0000CD")
mom.shape("circle")
thy = int(input())
i=1
while i < thy:
mom.right(10)
mom.forward(100)
mom.stamp()
mom.backward(thy)
mom.dot()
i +=1
wn.exitonclick()
|
py | 1a4c4ac0780c7f68d68983bede0d9bff49c02d25 | from __future__ import unicode_literals
import re
CHUNK_RANGE_RE = re.compile(
r'^@@ -(?P<orig_start>\d+)(,(?P<orig_len>\d+))? '
r'\+(?P<new_start>\d+)(,(?P<new_len>\d+))? @@',
re.M)
def filter_interdiff_opcodes(opcodes, filediff_data, interfilediff_data):
"""Filters the opcodes for an interdiff to remove unnecessary lines.
An interdiff may contain lines of code that have changed as the result of
updates to the tree between the time that the first and second diff were
created. This leads to some annoyances when reviewing.
This function will filter the opcodes to remove as much of this as
possible. It will only output non-"equal" opcodes if it falls into the
ranges of lines dictated in the uploaded diff files.
"""
def _find_range_info(diff):
lines = diff.splitlines()
process_changes = False
ranges = []
chunk_start = None
chunk_len = 0
lines_of_context = 0
# Look through the chunks of the diff, trying to find the amount
# of context shown at the beginning of each chunk. Though this
# will usually be 3 lines, it may be fewer or more, depending
# on file length and diff generation settings.
for line in lines:
if process_changes:
if line.startswith((b'-', b'+')):
# We've found the first change in the chunk. We now
# know how many lines of context we have.
#
# We reduce the indexes by 1 because the chunk ranges
# in diffs start at 1, and we want a 0-based index.
start = chunk_start - 1 + lines_of_context
ranges.append((start, start + chunk_len))
process_changes = False
continue
else:
lines_of_context += 1
# This was not a change within a chunk, or we weren't processing,
# so check to see if this is a chunk header instead.
m = CHUNK_RANGE_RE.match(line)
if m:
# It is a chunk header. Reset the state for the next range,
# and pull the line number and length from the header.
chunk_start = int(m.group('new_start'))
chunk_len = int(m.group('new_len') or '1')
process_changes = True
lines_of_context = 0
return ranges
def _is_range_valid(line_range, tag, i1, i2):
return (line_range is not None and
i1 >= line_range[0] and
(tag == 'delete' or i1 != i2))
orig_ranges = _find_range_info(filediff_data)
new_ranges = _find_range_info(interfilediff_data)
orig_range_i = 0
new_range_i = 0
if orig_ranges:
orig_range = orig_ranges[orig_range_i]
else:
orig_range = None
if new_ranges:
new_range = new_ranges[new_range_i]
else:
new_range = None
if not orig_range and not new_range:
# There's nothing in here, or it's not a unified diff. Just yield
# what we get.
for tag, i1, i2, j1, j2 in opcodes:
yield tag, i1, i2, j1, j2
return
for tag, i1, i2, j1, j2 in opcodes:
while orig_range and i1 > orig_range[1]:
# We've left the range of the current chunk to consider in the
# original diff. Move on to the next one.
orig_range_i += 1
if orig_range_i < len(orig_ranges):
orig_range = orig_ranges[orig_range_i]
else:
orig_range = None
while new_range and j1 > new_range[1]:
# We've left the range of the current chunk to consider in the
# new diff. Move on to the next one.
new_range_i += 1
if new_range_i < len(new_ranges):
new_range = new_ranges[new_range_i]
else:
new_range = None
# See if the chunk we're looking at is in the range of the chunk in
# one of the uploaded diffs. If so, allow it through.
orig_starts_valid = _is_range_valid(orig_range, tag, i1, i2)
new_starts_valid = _is_range_valid(new_range, tag, j1, j2)
valid_chunk = orig_starts_valid or new_starts_valid
if valid_chunk:
# This chunk is valid. It may only be a portion of the real
# chunk, though. We'll need to split it up into a known valid
# segment first, and yield that.
if orig_range:
cap_i2 = orig_range[1] + 1
else:
cap_i2 = i2
if new_range:
cap_j2 = new_range[1] + 1
else:
cap_j2 = j2
if orig_starts_valid:
valid_i2 = min(i2, cap_i2)
else:
valid_i2 = i2
if new_starts_valid:
valid_j2 = min(j2, cap_j2)
else:
valid_j2 = j2
if tag in ('equal', 'replace'):
# We need to take care to not let the replace lines have
# differing ranges for the orig and modified files. We want the
# replace to take up the full bounds of the two sides, but
# capped to the valid chunk range.
#
# For this, we need to pick a consistent value for the length
# of the range. We know at least one side will be within
# bounds, since we have a valid chunk and at least one is
# capped to be <= the end of the range.
#
# If one side is out of bounds of the range, the other range
# will win. If both are in bounds, the largest wins.
i_diff = valid_i2 - i1
j_diff = valid_j2 - j1
if valid_i2 > cap_i2:
# Sanity-check that valid_j2 is in bounds. We don't need
# to check this in the following conditionals, though,
# since that's covered by the conditionals themselves.
assert valid_j2 <= cap_j2
max_cap = j_diff
elif valid_j2 > cap_j2:
max_cap = i_diff
else:
max_cap = max(i_diff, j_diff)
# Set each valid range to be the same length.
valid_i2 = i1 + max_cap
valid_j2 = j1 + max_cap
# Update the caps, so that we'll process whatever we've
# chopped off.
cap_i2 = valid_i2
cap_j2 = valid_j2
yield tag, i1, valid_i2, j1, valid_j2
if valid_i2 == i2 and valid_j2 == j2:
continue
# There were more parts of this range remaining. We know they're
# all invalid, so let's update i1 and j1 to point to the start
# of those invalid ranges, and mark them.
if orig_range is not None and i2 + 1 > cap_i2:
i1 = cap_i2
if new_range is not None and j2 + 1 > cap_j2:
j1 = cap_j2
valid_chunk = False
if not valid_chunk:
# Turn this into an "filtered-equal" chunk. The left-hand and
# right-hand side of the diffs will look different, which may be
# noticeable, but it will still help the user pay attention to
# what's actually changed that they care about.
#
# These will get turned back into "equal" chunks in the
# post-processing step.
yield 'filtered-equal', i1, i2, j1, j2
def post_process_filtered_equals(opcodes):
"""Post-processes filtered-equal and equal chunks from interdiffs.
Any filtered-out "filtered-equal" chunks will get turned back into "equal"
chunks and merged into any prior equal chunks. Likewise, simple "equal"
chunks will also get merged.
"equal" chunks that have any indentation information will remain
their own chunks, with nothing merged in.
"""
cur_chunk = None
for tag, i1, i2, j1, j2, meta in opcodes:
if ((tag == 'equal' and not meta.get('indentation_changes')) or
tag == 'filtered-equal'):
# We either have a plain equal chunk without any indentation
# changes, or a filtered-equal chunk. In these cases, we can
# safely merge the chunks together and transform them into
# an "equal" chunk.
if cur_chunk:
i1 = cur_chunk[1]
j1 = cur_chunk[3]
meta = cur_chunk[5]
cur_chunk = ('equal', i1, i2, j1, j2, meta)
else:
# This is some sort of changed chunk (insert, delete, replace,
# or equal with indentation changes). Yield the previous chunk
# we were working with, if any, and then yield the current chunk.
if cur_chunk:
yield cur_chunk
cur_chunk = None
yield tag, i1, i2, j1, j2, meta
if cur_chunk:
yield cur_chunk
|
py | 1a4c4b5675b1aa39f68af08ae2bb6daddda8e6d2 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
Pyro4客户端,此处调用远程对象
"""
import Pyro4
def main():
uri = input("What is the Pyro uri of the greeting object?(help: 输入server启动时对应的uri) ").strip()
name = input("What is your name? ").strip()
print(f'uri:{uri}, name:{name}')
server = Pyro4.Proxy(uri) # 获取server
print(server.welcomeMessage(name))
if __name__ == '__main__':
main()
|
py | 1a4c4b8f3fed130ba46b050624e3d4ba4d1f8e7d | import torch
import torch.nn as nn
import neat.activations as a
from torch import autograd
class FeedForwardNet(nn.Module):
def __init__(self, genome, config):
super(FeedForwardNet, self).__init__()
self.genome = genome
self.units = self.build_units()
self.lin_modules = nn.ModuleList()
self.config = config
self.activation = a.Activations().get(config.ACTIVATION)
for unit in self.units:
self.lin_modules.append(unit.linear)
def forward(self, x):
outputs = dict()
input_units = [u for u in self.units if u.ref_node.type == 'input']
output_units = [u for u in self.units if u.ref_node.type == 'output']
bias_units = [u for u in self.units if u.ref_node.type == 'bias']
stacked_units = self.genome.order_units(self.units)
# Set input values
for u in input_units:
outputs[u.ref_node.id] = x[0][u.ref_node.id]
# Set bias value
for u in bias_units:
outputs[u.ref_node.id] = torch.ones((1, 1)).to(device)[0][0]
# Compute through directed topology
while len(stacked_units) > 0:
current_unit = stacked_units.pop()
if current_unit.ref_node.type != 'input' and current_unit.ref_node.type != 'bias':
# Build input vector to current node
inputs_ids = self.genome.get_inputs_ids(current_unit.ref_node.id)
in_vec = autograd.Variable(torch.zeros((1, len(inputs_ids)), device=device, requires_grad=True))
for i, input_id in enumerate(inputs_ids):
in_vec[0][i] = outputs[input_id]
# Compute output of current node
linear_module = self.lin_modules[self.units.index(current_unit)]
if linear_module is not None: # TODO: Can this be avoided?
scaled = self.config.SCALE_ACTIVATION * linear_module(in_vec)
out = self.activation(scaled)
else:
out = torch.zeros((1, 1))
# Add to outputs dictionary
outputs[current_unit.ref_node.id] = out
# Build output vector
output = autograd.Variable(torch.zeros((1, len(output_units)), device=device, requires_grad=True))
for i, u in enumerate(output_units):
output[0][i] = outputs[u.ref_node.id]
return output
def build_units(self):
units = []
for n in self.genome.node_genes:
in_genes = self.genome.get_connections_in(n.id)
num_in = len(in_genes)
weights = [g.weight for g in in_genes]
new_unit = Unit(n, num_in)
new_unit.set_weights(weights)
units.append(new_unit)
return units
class Unit:
def __init__(self, ref_node, num_in_features):
self.ref_node = ref_node
self.linear = self.build_linear(num_in_features)
def set_weights(self, weights):
if self.ref_node.type != 'input' and self.ref_node.type != 'bias':
weights = torch.cat(weights).unsqueeze(0)
for p in self.linear.parameters():
p.data = weights
def build_linear(self, num_in_features):
if self.ref_node.type == 'input' or self.ref_node.type == 'bias':
return None
return nn.Linear(num_in_features, 1, False)
def __str__(self):
return 'Reference Node: ' + str(self.ref_node) + '\n'
# TODO: Multiple GPU support get from config
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
py | 1a4c4ba8198b0091ddeab4d1e7791d78770e4b49 | '''
This script helps creating and managing experiments.
Possible commands:
- launch: launch an experiment loading its specification from a CSV file
- view: list the experiments which are still running
- stop: stop all the runners of the experiment
'''
import pandas as pd
import argparse, os, sys, re
from multiprocessing import Pool
from screenutils import Screen, list_screens
from datetime import datetime
class Screener(object):
def command_sender(self, zipped_pair):
screen, command = zipped_pair
screen.send_commands(command)
def run(self, commands, name='s'):
n_screens = len(commands)
screens = [Screen(name+'_%d' % (i+1), True) for i in range(n_screens)]
p = Pool(n_screens)
p.map(self.command_sender, zip(screens, commands))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--command', help='Command to execute.', type=str, default='launch', choices=['launch', 'view', 'stop'])
# Experiment selection
parser.add_argument('--name', help='Name of the experiment', type=str, default=None)
parser.add_argument('--dir', help='Directory from which to load the experiment (to launch).', type=str, default=None)
# Env
parser.add_argument('--condaenv', help='Conda environment to activate.', type=str, default=None)
parser.add_argument('--pythonv', help='Python version to use', type=str, default='python3')
parser.add_argument('--pythonpath', help='Pythonpath to use for script.', type=str, default=None)
parser.add_argument('--cuda_devices', help='CUDA visible devices.', type=str, default='')
# Sacred
parser.add_argument('--sacred', action='store_true', default=False, help='Enable sacred.')
parser.add_argument('--sacred_dir', help='Dir used by sacred to log.', type=str, default=None)
parser.add_argument('--sacred_slack', help='Config file for slack.', type=str, default=None)
parser.add_argument('--dirty', action='store_true', default=False, help='Enable sacred dirty running.')
args = parser.parse_args()
if args.command == 'launch':
assert args.name is not None, "Provide an experiment name."
assert args.dir is not None, "Provide a directory to load the experiment."
# Load experiment
experiment_path = args.dir + '/' + args.name + '.csv'
experiment = pd.read_csv(experiment_path)
# Start build base command
cmd_base = ''
# Set env variables
cmd_base += 'export CUDA_VISIBLE_DEVICES=' + args.cuda_devices + ' && '
cmd_base += 'export EXPERIMENT_NAME=' + args.name + ' && '
if args.sacred_dir and args.sacred:
cmd_base += 'export SACRED_RUNS_DIRECTORY=' + args.sacred_dir + ' && '
if args.sacred_slack and args.sacred:
cmd_base += 'export SACRED_SLACK_CONFIG=' + args.sacred_slack + ' && '
if args.pythonpath:
cmd_base += "export PYTHONPATH='PYTHONPATH:" + args.pythonpath + "' && "
if args.condaenv:
cmd_base += 'source activate ' + args.condaenv + ' && '
# Parse the CSV
param_cols = list(experiment)
param_cols.remove('script')
# Build the commands
cmd_base += args.pythonv + ' '
cmds = []
for index, row in experiment.iterrows():
# Get the script, check if we need to use sacred (just append _sacred to script name)
script = row['script']
if args.sacred:
script += '_sacred'
script = 'baselines/' + script + '.py '
_c = cmd_base + script
# Check if dirty and if to use with
if args.sacred and not args.dirty:
_c += '-e '
if args.sacred and len(param_cols) > 0:
_c += 'with '
# Add experiment_name to params
if args.sacred:
_c += 'experiment_name=' + args.name + ' '
else:
_c += '--experiment_name=' + args.name + ' '
# Params
for p in param_cols:
if args.sacred:
_c += str(p).strip() + '=' + str(row[p]).strip() + ' '
else:
_c += '--' + str(p).strip() + '=' + str(row[p]).strip() + ' '
# Add the exit command to terminate the experiment
_c += '&& exit'
cmds.append(_c)
scr = Screener()
scr.run(cmds, name=args.name)
elif args.command == 'view':
from baselines.common.sacred_utils import load_runs, filter_runs
from baselines.common import colorize
assert args.name is not None, "Provide an experiment name."
assert args.dir is not None, "Provide a directory for experiment."
rule = re.compile(args.name + '_*')
# Get all screens
all_active_screens = 0
for s in list_screens():
if rule.match(s.name):
all_active_screens += 1
# Load runs to get active ones
runs = load_runs(args.dir)
running_runs = filter_runs({'run.status': 'RUNNING'}, runs)
print(colorize("==========================================", color='red'))
max_eta, max_duration = None, None
for key in running_runs.keys():
run = running_runs[key]
print(colorize('Run:', color='blue'), "{0} ({1})".format(key, run['config']['env']))
print("\t" + colorize("Steps:", color='blue') +
"{0}/{1}".format(len(run['metrics']['EpRewMean']['steps'])+1, run['config']['max_iters']) +
"\t\t" + colorize("Reward:", color='blue') + "{0}".format(run['metrics']['EpRewMean']['values'][-1]) +
"\t\t" + colorize("Seed:", color='blue') + "{0}".format(run['config']['seed']) +
"\t\t" + colorize("Delta:", color='blue') + "{0}".format(run['config']['delta']))
completion = (len(run['metrics']['EpRewMean']['steps'])+1) / run['config']['max_iters']
start_time = datetime.strptime(run['run']['start_time'], '%Y-%m-%dT%H:%M:%S.%f')
duration = datetime.utcnow() - start_time
eta = duration * (1 - completion) / completion
max_eta = max(eta, max_eta) if max_eta is not None else eta
max_duration = max(duration, max_duration) if max_duration is not None else duration
if len(running_runs.keys()) == 0:
print(colorize("Done.", color='red'))
else:
t = max_eta.total_seconds()
d = max_duration.total_seconds()
print(colorize("==========================================", color='red'))
print(colorize("Active screens: {0}".format(all_active_screens), color='red'))
print(colorize("Active runs: {0}".format(len(running_runs.keys())), color='red'))
print(colorize("Elapsed time: {0} hours, {1} minutes, {2} seconds".format(int(d // 3600), int((d%3600)//60), int(d%3600)%60), color='red'))
print(colorize("ETA: {0} hours, {1} minutes, {2} seconds".format(int(t // 3600), int((t%3600)//60), int(t%3600)%60), color='red'))
print(colorize("==========================================", color='red'))
elif args.command == 'stop':
assert args.name is not None, "Provide an experiment name."
rule = re.compile(args.name + '_*')
# Get all screens
for s in list_screens():
if rule.match(s.name):
print("Stopping", s.name)
s.kill()
else:
raise Exception('Unrecognized command.')
|
py | 1a4c4c490fdec6236d86464e16fcd997320fa047 | #!/usr/bin/env python3
# FreeRTOS Common IO V0.1.2
# Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# http://aws.amazon.com/freertos
# http://www.FreeRTOS.org
import serial
from time import sleep
import csv
import os, sys
import argparse
import threading
import socket
import re
scriptdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(scriptdir)
if parentdir not in sys.path:
print("Script Dir: %s" % scriptdir)
print("Parent Dir: %s" % parentdir)
sys.path.append(parentdir)
from test_iot_test_template import test_template
class TestI2cMasterAssisted(test_template):
"""
Test class for i2c master tests.
"""
def __init__(self, serial, ip, login, pwd, csv_handler):
self._func_list = [self.test_IotI2CWriteSyncAssisted,
self.test_IotI2CWriteAsyncAssisted,
self.test_IotI2CReadSyncAssisted,
self.test_IotI2CReadAsyncAssisted
]
self._serial = serial
self._ip = ip
self._login = login
self._pwd = pwd
self._cr = csv_handler
shell_script = "%s/test_iot_runonPI_i2c_master.sh" % scriptdir
port = 50007
def i2c_write_test(self, cmd):
"""
Test body of write test.
:param cmd: iot test cmd
:return:
"""
t_shell = threading.Thread(target=self.run_shell_script,
args=(" ".join([self.shell_script, self._ip, self._login, self._pwd, '-s']),))
t_shell.start()
socket.setdefaulttimeout(10)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time_out = 10
# Wait until connection with the process on rpi is established.
while s.connect_ex((self._ip, self.port)) != 0 and time_out > 0:
time_out -= 1
sleep(1)
if time_out == 0:
print("Socket connection cannot be established")
s.close()
return "Fail"
self._serial.reset_input_buffer()
self._serial.write('\r\n'.encode('utf-8'))
self._serial.write(cmd.encode('utf-8'))
self._serial.write('\r\n'.encode('utf-8'))
res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')
w_bytes = []
for x in re.sub(r'\r', '', res).split('\n'):
if x.find('IGNORE') != -1:
w_bytes = [s for s in x.split(',') if len(s) == 2]
break
# Retrieve bytes read by rpi.
s.sendall(b's')
try:
r_bytes = s.recv(1024)
except:
print("No data received from rpi.\n", repr(res))
s.close()
return 'Fail'
r_bytes = ["{:02X}".format(b) for b in r_bytes]
# End process on the rpi.
s.sendall(b'E')
t_shell.join()
s.close()
# Compare read and write bytes.
if self.compare_host_dut_result(r_bytes, w_bytes) == -1:
print(repr(res))
return "Fail"
return 'Pass'
def test_IotI2CWriteSyncAssisted(self):
return self.i2c_write_test("iot_tests test 11 1")
def test_IotI2CWriteAsyncAssisted(self):
return self.i2c_write_test("iot_tests test 11 2")
def i2c_read_test(self, cmd):
"""
Test body for read test. The i2c slave callback function in the rpi library is only called after i2c stop. The
register address cannot be read by rpi before restart so the data to send can only be loaded to rpi fifo after
stop. As a result, the first read from host is always the data loaded from last request or some random value if
fifo is never loaded before.
The solution with the current rpi library is to read rpi twice. Compare the second dut read data with the first
rpi send data.
:param cmd: iot test cmd
:return:
"""
w_bytes, r_bytes = ([] for i in range(2))
t_shell = threading.Thread(target=self.run_shell_script,
args=(" ".join([self.shell_script, self._ip, self._login, self._pwd, '-s']),))
t_shell.start()
socket.setdefaulttimeout(10)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time_out = 10
# Wait until connection with the process on rpi is established.
while s.connect_ex((self._ip, self.port)) != 0 and time_out > 0:
time_out -= 1
sleep(1)
if time_out == 0:
print("Socket connection cannot be established")
s.close()
return "Fail"
for i in range(2):
self._serial.reset_input_buffer()
self._serial.write('\r\n'.encode('utf-8'))
self._serial.write(cmd.encode('utf-8'))
self._serial.write('\r\n'.encode('utf-8'))
res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')
for x in re.sub(r'\r', '', res).split('\n'):
if x.find('IGNORE') != -1:
r_bytes.append([s for s in x.split(',') if len(s) == 2])
break
# Retrieve bytes sent by rpi
s.sendall(b's')
try:
data = s.recv(1024)
except:
print("No data from pi")
s.close()
return 'Fail'
w_bytes.append(["{:02X}".format(b) for b in data])
# Exit if failed to read bytes from DUT.
if len(r_bytes) != i + 1:
print("No data read by DUT.\n", repr(res))
break
# End process on the rpi.
s.sendall(b'E')
t_shell.join()
s.close()
if len(r_bytes) != 2 or len(w_bytes) != 2:
print("Write and read different number of bytes.\npi:", w_bytes, "\ndut:", r_bytes)
return 'Fail'
# Compare read and write bytes.
if self.compare_host_dut_result(w_bytes[0], r_bytes[1]) == -1:
print(repr(res))
return "Fail"
return 'Pass'
def test_IotI2CReadSyncAssisted(self):
return self.i2c_read_test("iot_tests test 11 3")
def test_IotI2CReadAsyncAssisted(self):
return self.i2c_read_test("iot_tests test 11 4")
# unit test
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--ip', nargs=1, default=[''], help='ip address of rpi')
parser.add_argument('-l', '--login_name', nargs=1, default=[''], help='login name of rpi')
parser.add_argument('-s', '--password', nargs=1, default=[''], help='password of rpi')
parser.add_argument('-p', '--port', nargs=1, default=[''], help='serial port of connected platform')
args = parser.parse_args()
try:
serial_port = serial.Serial(port=args.port[0], timeout=5)
except Exception as e:
print(e)
exit()
rpi_ip = args.ip[0]
rpi_login = args.login_name[0]
rpi_pwd = args.password[0]
with open(scriptdir + 'test_result.csv', 'w', newline='') as csvfile:
field_name = ['test name', 'test result']
writer = csv.DictWriter(csvfile, fieldnames=field_name)
writer.writeheader()
t_handler = TestI2cMasterAssisted(serial_port, rpi_ip, rpi_login, rpi_pwd, writer)
t_handler.auto_run()
serial_port.close()
|
py | 1a4c4cb8c645716cbb0914d2a30188d20f83fde5 | # -*- coding: utf-8 -*-
"""
Dummy conftest.py for aleph_client.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
|
py | 1a4c4d0cf6ffd99dacc8885c267d60d6da042a5c | from datetime import datetime, date
import re
'''
Ad ogni tipo di attività viene associata una costante per il calcolo del fabbisogno calorico.
'''
fattore_metabolismo = {"Sedentaria": 1.2, "Leggera": 1.375, "Moderata": 1.55, "Attiva": 1.725, "Molto attiva": 1.9}
'''
La classe Utente rappresenta l'utente considerando dati come il nome, sesso, età,
altezza, peso, il tipo di attività che esso svolge e le patologia che esso possiede.
Alcune patologie non sono state completamente inserite (es.colesterolo).
'''
class Utente:
'''
Utente(int) --> Utente
Costruttore della classe Utente.
Il costruttore prende in input un intero che rappresenta il numero di chat_id
per identificare univocamente un singolo utente.
'''
def __init__(self, chat_id):
self.chat_id = chat_id
'''
set_utente(stringa, stringa, stringa, data, intero, intero, stringa, bool, bool, bool) --> void
Funzione che completa la l'inserimento dell'utente inserendo i relativi dati.
'''
def set_utente(self, nome, sesso, data, altezza, peso, attivita, b_iper, nefropatia, anemia_sideropenica):
self.nome=nome
self.sesso=sesso
self.set_data(data)
self.altezza=int(altezza)
self.peso=int(peso)
self.attivita=attivita
#self.diabete = b_diab
#self.colesterolo = b_cole
self.iper_tens = bool(int(b_iper))
#self.ipo_tens = b_ipo
self.nefropatia = bool(int(nefropatia))
self.anemia_sideropenica = bool(int(anemia_sideropenica))
'''
set_chat_id(int) --> void
'''
def set_chat_id(self, chat_id):
self.chat_id = chat_id
'''
set_nome(stringa) --> void
'''
def set_nome(self, nome):
self.nome = nome
'''
set_sesso(stringa) --> void
'''
def set_sesso(self, sesso):
self.sesso = sesso
'''
set_data(data) --> void
'''
def set_data(self, data_nascita):
try:
self.data_nascita = datetime.strptime(data_nascita, "%d/%m/%Y")
except:
try:
self.data_nascita = datetime.strptime(data_nascita, "%Y-%m-%d")
except:
return False
'''
set_altezza(int) --> void
'''
def set_altezza(self, altezza):
self.altezza = altezza
'''
set_peso(int) --> void
'''
def set_peso(self, peso):
self.peso = peso
'''
set_attivita(stringa) --> void
'''
def set_attivita(self, attivita):
self.attivita = attivita
'''
set_diabete(bool) --> void
'''
def set_diabete(self, b):
self.diabete = bool(b)
'''
set_colesterolo(bool) --> void
'''
def set_colesterolo(self, b):
self.colesterolo = bool(b)
'''
set_iper_tens(bool) --> void
'''
def set_iper_tens(self, b):
self.iper_tens = bool(b)
'''
set_ipo_tens(bool) --> void
'''
def set_ipo_tens(self, b):
self.ipo_tens = bool(b)
'''
set_nefropatia(bool) --> void
'''
def set_nefropatia(self, nefropatia):
self.nefropatia = bool(nefropatia)
'''
set_anemia_sideropenica(bool) --> void
'''
def set_anemia_sideropenica(self, anemia_sideropenica):
self.anemia_sideropenica = bool(anemia_sideropenica)
'''
get_chat_id() --> int
'''
def get_chat_id(self):
return self.chat_id
'''
get_nome() --> stringa
'''
def get_nome(self):
return self.nome
'''
get_sesso() --> stringa
'''
def get_sesso(self):
return self.sesso
'''
get_data() --> data
'''
def get_data(self):
return self.data_nascita
'''
get_eta() --> int
Restituisce l'età dell'utente utilizzando la data di nascita.
'''
def get_eta(self):
if date.today().month <= self.data_nascita.month:
return date.today().year-1 - self.data_nascita.year
else:
return date.today().year - self.data_nascita.year
'''
get_altezza() --> int
'''
def get_altezza(self):
return self.altezza
'''
get_peso() --> int
'''
def get_peso(self):
return self.peso
'''
get_attivita() --> stringa
'''
def get_attivita(self):
return self.attivita
'''
get_diabete() --> bool
'''
def get_diabete(self):
return self.diabete
'''
get_colesterolo() --> bool
'''
def get_colesterolo(self):
return self.colesterolo
'''
get_iper_tens() --> bool
'''
def get_iper_tens(self):
return self.iper_tens
'''
get_ipo_tens() --> bool
'''
def get_ipo_tens(self):
return self.ipo_tens
'''
get_nefropatia() --> bool
'''
def get_nefropatia(self):
return bool(self.nefropatia)
'''
get_anemia_sideropenica() --> bool
'''
def get_anemia_sideropenica(self):
return bool(self.anemia_sideropenica)
'''
get_fabbisogno_calorico() --> int
Calcola il fabbisogno giornaliero calorico dell'utente controllando il sesso,
l'altezza il peso e il tipo di attivita che esso svolge.
'''
def fabbisogno_calorico(self):
if self.sesso == "Maschio":
return ((self.peso*10) + (self.altezza*6.25) - (5*self.get_eta()) + 5) * fattore_metabolismo[self.attivita]
elif self.sesso == "Femmina":
return ((self.peso*10) + (self.altezza*6.25) - (5*self.get_eta())-161) * fattore_metabolismo[self.attivita]
'''
toString() --> stringa
'''
def __str__(self):
return "Nome: " + str(self.nome) + "\nSesso: " + str(self.sesso) + \
"\nEtà: " + str(self.get_eta()) + "\nAltezza: " + str(self.altezza) + "cm\nPeso: " + str(self.peso) +\
"kg\nAttività fisica: " + str(self.attivita) + "\nFabbisogno calorico: " +\
str(round(self.fabbisogno_calorico(), 2))
'''
can_eat(Food) --> stringa
La funzione prende in input l'oggetto di classe Food e stabilisce se l'utente
in base alla patologia che l'utente ha può mangiare o meno il cibo.
Restituisce la stringa che spiega se l'utente può o non può mangiare quella determinata pietanza.
'''
def can_eat(self, cibo):
feedback = ""
if self.get_nefropatia():
risposta = cibo.can_eat_nefropatia(self)
if risposta == "Sconsigliato":
feedback = "Puoi mangiare questo piatto ma ti sconsigliamo di mangiare altri piatti contenente troppe proteine o sodio."
elif risposta == "Proibito":
return "Questo piatto contiene troppe proteine e sodio per il tuo metabolismo, ti consigliamo di non mangiarlo."
elif risposta == "No_info":
return "Non ho abbastanza informazioni su questa pietanza."
elif self.get_iper_tens():
risposta = cibo.can_eat_iperteso()
if risposta == "Sconsigliato":
feedback = "Puoi mangiare questo piatto ma ti sconsigliamo di mangiare altri piatti contenente troppo sodio."
elif risposta == "Proibito":
return "Questo piatto contiene troppo sodio per il tuo metabolismo, ti consigliamo di non mangiarlo."
elif risposta == "No_info":
return "Non ho abbastanza informazioni su questa pietanza."
if self.get_anemia_sideropenica():
risposta = cibo.can_eat_anemico(self)
if risposta == "Sconsigliato":
feedback = "Puoi mangiare questo piatto ma ti consigliamo di mangiare anche piatti aventi più ferro."
elif risposta == "No_info":
return "Non ho abbastanza informazioni su questa pietanza."
if feedback == "":
return "Puoi mangiare tranquillamente questo cibo!"
else:
return feedback
'''
controllo_nome(stringa) --> bool | controllo_nome(stringa) --> stringa
Attraverso l'utilizzo di espressioni regolari restituisce il nome dell'utente correttamente.
Vengono eliminati tutti i caratteri che non possono essere contenuti in un nome.
'''
def controllo_nome(nome):
regex = re.findall("\D+", nome)
char = ""
if regex:
for c in regex:
char += c
return char
return False
'''
controllo_formato_data(data) --> bool | controllo_formato_data(data) --> data
Viene restituita la data se è stata inserita correttamente altrimenti restituisce False.
'''
def controllo_formato_data(data):
try:
n_data = datetime.strptime(data, "%d/%m/%Y")
if (date.today().year - n_data.year) > 13:
return data
return False
except ValueError:
return False
'''
controllo_cifre(int) --> bool | controllo_cifre(int) --> int
Effettua un controllo sul numero.
'''
def controllo_cifre(numero):
regex = re.findall("[\d]*\.[\d]*|\d+", numero)
char = ""
if regex:
for c in regex:
char += c
return float(char)
return False
'''
controllo_altezza(int) --> bool | controllo_altezza(int) --> int
Effettua un controllo sui limiti dell'altezza.
'''
def controllo_altezza(altezza):
h = controllo_cifre(altezza)
if 150 < h < 210:
return h
else:
return False
'''
controllo_peso(int) --> bool | controllo_cifre(int) --> int
Effettua un controllo sui limiti del peso.
'''
def controllo_peso(peso):
w = controllo_cifre(peso)
if 40 < w < 150:
return w
else:
return False
|
py | 1a4c4f67394787bf0abafe8f63254fd72d1540f2 | '''
Implements the targetcli target related UI.
This file is part of targetcli.
Copyright (c) 2011-2014 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from ui_node import UINode, UIRTSLibNode
from ui_backstore import dedup_so_name
from rtslib import RTSLibError, RTSLibBrokenLink, utils
from rtslib import NodeACL, NetworkPortal, MappedLUN
from rtslib import Target, TPG, LUN
class UIFabricModule(UIRTSLibNode):
'''
A fabric module UI.
'''
def __init__(self, fabric_module, parent):
UIRTSLibNode.__init__(self, fabric_module.name, fabric_module, parent)
self.cfs_cwd = fabric_module.path
self.refresh()
if self.rtsnode.has_feature('discovery_auth'):
for param in ['userid', 'password',
'mutual_userid', 'mutual_password',
'enable']:
self.define_config_group_param('discovery_auth',
param, 'string')
self.refresh()
def ui_getgroup_discovery_auth(self, auth_attr):
'''
This is the backend method for getting discovery_auth attributes.
@param auth_attr: The auth attribute to get the value of.
@type auth_attr: str
@return: The auth attribute's value
@rtype: str
'''
value = None
if auth_attr == 'password':
value = self.rtsnode.discovery_password
elif auth_attr == 'userid':
value = self.rtsnode.discovery_userid
elif auth_attr == 'mutual_password':
value = self.rtsnode.discovery_mutual_password
elif auth_attr == 'mutual_userid':
value = self.rtsnode.discovery_mutual_userid
elif auth_attr == 'enable':
value = self.rtsnode.discovery_enable_auth
return value
def ui_setgroup_discovery_auth(self, auth_attr, value):
'''
This is the backend method for setting discovery auth attributes.
@param auth_attr: The auth attribute to set the value of.
@type auth_attr: str
@param value: The auth's value
@type value: str
'''
self.assert_root()
if value is None:
value = ''
if auth_attr == 'password':
self.rtsnode.discovery_password = value
elif auth_attr == 'userid':
self.rtsnode.discovery_userid = value
elif auth_attr == 'mutual_password':
self.rtsnode.discovery_mutual_password = value
elif auth_attr == 'mutual_userid':
self.rtsnode.discovery_mutual_userid = value
elif auth_attr == 'enable':
self.rtsnode.discovery_enable_auth = value
def refresh(self):
self._children = set([])
for target in self.rtsnode.targets:
self.shell.log.debug("Found target %s under fabric module %s."
% (target.wwn, target.fabric_module))
if target.has_feature('tpgts'):
UIMultiTPGTarget(target, self)
else:
UITarget(target, self)
def summary(self):
no_targets = len(self._children)
if no_targets != 1:
msg = "%d Targets" % no_targets
else:
msg = "%d Target" % no_targets
return (msg, None)
def ui_command_create(self, wwn=None):
'''
Creates a new target. The I{wwn} format depends on the transport(s)
supported by the fabric module. If the I{wwn} is ommited, then a
target will be created using either a randomly generated WWN of the
proper type, or the first unused WWN in the list of possible WWNs if
one is available. If WWNs are constrained to a list (i.e. for hardware
targets addresses) and all WWNs are in use, the target creation will
fail. Use the B{info} command to get more information abour WWN type
and possible values.
SEE ALSO
========
B{info}
'''
self.assert_root()
target = Target(self.rtsnode, wwn, mode='create')
wwn = target.wwn
if target.has_feature('tpgts'):
ui_target = UIMultiTPGTarget(target, self)
self.shell.log.info("Created target %s." % wwn)
return ui_target.ui_command_create()
else:
ui_target = UITarget(target, self)
self.shell.log.info("Created target %s." % wwn)
return self.new_node(ui_target)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
spec = self.rtsnode.spec
if current_param == 'wwn' and spec['wwn_list'] is not None:
existing_wwns = [child.wwn for child in self.rtsnode.targets]
completions = [wwn for wwn in spec['wwn_list']
if wwn.startswith(text)
if wwn not in existing_wwns]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, wwn):
'''
Recursively deletes the target with the specified I{wwn}, and all
objects hanging under it.
SEE ALSO
========
B{create}
'''
self.assert_root()
target = Target(self.rtsnode, wwn, mode='lookup')
target.delete()
self.shell.log.info("Deleted Target %s." % wwn)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn':
wwns = [child.name for child in self.children]
completions = [wwn for wwn in wwns if wwn.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_info(self):
'''
Displays information about the fabric module, notably the supported
transports(s) and accepted B{wwn} format(s), as long as supported
features.
'''
spec = self.rtsnode.spec
self.shell.log.info("Fabric module name: %s" % self.name)
self.shell.log.info("ConfigFS path: %s" % self.rtsnode.path)
if spec['wwn_list'] is not None:
self.shell.log.info("Allowed WWNs list (%s type): %s"
% (spec['wwn_type'],
', '.join(spec['wwn_list'])))
else:
self.shell.log.info("Supported WWN type: %s" % spec['wwn_type'])
self.shell.log.info("Fabric module specfile: %s"
% self.rtsnode.spec_file)
self.shell.log.info("Fabric module features: %s"
% ', '.join(spec['features']))
self.shell.log.info("Corresponding kernel module: %s"
% spec['kernel_module'])
def ui_command_version(self):
'''
Displays the target fabric module version.
'''
version = "Target fabric module %s: %s" \
% (self.rtsnode.name, self.rtsnode.version)
self.shell.con.display(version.strip())
class UIMultiTPGTarget(UIRTSLibNode):
'''
A generic target UI that has multiple TPGs.
'''
def __init__(self, target, parent):
UIRTSLibNode.__init__(self, target.wwn, target, parent)
self.cfs_cwd = target.path
self.refresh()
def refresh(self):
self._children = set([])
for tpg in self.rtsnode.tpgs:
UITPG(tpg, self)
def summary(self):
if not self.rtsnode.fabric_module.is_valid_wwn(self.rtsnode.wwn):
description = "INVALID WWN"
is_healthy = False
else:
is_healthy = None
no_tpgs = len(self._children)
if no_tpgs != 1:
description = "%d TPGs" % no_tpgs
else:
description = "%d TPG" % no_tpgs
return (description, is_healthy)
def ui_command_create(self, tag=None):
'''
Creates a new Target Portal Group within the target. The I{tag} must be
a strictly positive integer value. If omitted, the next available
Target Portal Group Tag (TPG) will be used.
SEE ALSO
========
B{delete}
'''
self.assert_root()
if tag is None:
tags = [tpg.tag for tpg in self.rtsnode.tpgs]
for index in range(1048576):
if index not in tags and index > 0:
tag = index
break
if tag is None:
self.shell.log.error("Cannot find an available TPG Tag.")
return
else:
self.shell.log.info("Selected TPG Tag %d." % tag)
else:
try:
tag = int(tag)
except ValueError:
self.shell.log.error("The TPG Tag must be an integer value.")
return
else:
if tag < 1:
self.shell.log.error("The TPG Tag must be >0.")
return
tpg = TPG(self.rtsnode, tag, mode='create')
if self.shell.prefs['auto_enable_tpgt']:
tpg.enable = True
self.shell.log.info("Created TPG %s." % tpg.tag)
ui_tpg = UITPG(tpg, self)
return self.new_node(ui_tpg)
def ui_command_delete(self, tag):
'''
Deletes the Target Portal Group with TPG I{tag} from the target. The
I{tag} must be a positive integer matching an existing TPG.
SEE ALSO
========
B{create}
'''
self.assert_root()
tpg = TPG(self.rtsnode, tag, mode='lookup')
tpg.delete()
self.shell.log.info("Deleted TPG %s." % tag)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'tag':
tags = [child.name[4:] for child in self.children]
completions = [tag for tag in tags if tag.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UITPG(UIRTSLibNode):
'''
A generic TPG UI.
'''
def __init__(self, tpg, parent):
name = "tpg%d" % tpg.tag
UIRTSLibNode.__init__(self, name, tpg, parent)
self.cfs_cwd = tpg.path
self.refresh()
UILUNs(tpg, self)
if tpg.has_feature('acls'):
UINodeACLs(self.rtsnode, self)
if tpg.has_feature('nps'):
UIPortals(self.rtsnode, self)
def summary(self):
if self.rtsnode.has_feature('nexus'):
description = ("%s" % self.rtsnode.nexus, True)
elif self.rtsnode.enable:
description = ("enabled", True)
else:
description = ("disabled", False)
return description
def ui_command_enable(self):
'''
Enables the TPG.
SEE ALSO
========
B{disable status}
'''
self.assert_root()
if self.rtsnode.enable:
self.shell.log.info("The TPG is already enabled.")
else:
self.rtsnode.enable = True
self.shell.log.info("The TPG has been enabled.")
def ui_command_disable(self):
'''
Disables the TPG.
SEE ALSO
========
B{enable status}
'''
self.assert_root()
if self.rtsnode.enable:
self.rtsnode.enable = False
self.shell.log.info("The TPG has been disabled.")
else:
self.shell.log.info("The TPG is already disabled.")
class UITarget(UITPG):
'''
A generic target UI merged with its only TPG.
'''
def __init__(self, target, parent):
UITPG.__init__(self, TPG(target, 1), parent)
self._name = target.wwn
self.target = target
self.rtsnode.enable = True
def summary(self):
if not self.target.fabric_module.is_valid_wwn(self.target.wwn):
return ("INVALID WWN", False)
else:
return UITPG.summary(self)
class UINodeACLs(UINode):
'''
A generic UI for node ACLs.
'''
def __init__(self, tpg, parent):
UINode.__init__(self, "acls", parent)
self.tpg = tpg
self.cfs_cwd = "%s/acls" % tpg.path
self.refresh()
def refresh(self):
self._children = set([])
for node_acl in self.tpg.node_acls:
UINodeACL(node_acl, self)
def summary(self):
no_acls = len(self._children)
if no_acls != 1:
msg = "%d ACLs" % no_acls
else:
msg = "%d ACL" % no_acls
return (msg, None)
def ui_command_create(self, wwn, add_mapped_luns=None):
'''
Creates a Node ACL for the initiator node with the specified I{wwn}.
The node's I{wwn} must match the expected WWN Type of the target's
fabric module.
If I{add_mapped_luns} is omitted, the global parameter
B{auto_add_mapped_luns} will be used, else B{true} or B{false} are
accepted. If B{true}, then after creating the ACL, mapped LUNs will be
automatically created for all existing LUNs.
SEE ALSO
========
B{delete}
'''
self.assert_root()
spec = self.tpg.parent_target.fabric_module.spec
if not utils.is_valid_wwn(spec['wwn_type'], wwn):
self.shell.log.error("'%s' is not a valid %s WWN."
% (wwn, spec['wwn_type']))
return
add_mapped_luns = \
self.ui_eval_param(add_mapped_luns, 'bool',
self.shell.prefs['auto_add_mapped_luns'])
try:
node_acl = NodeACL(self.tpg, wwn, mode="create")
except RTSLibError, msg:
self.shell.log.error(msg)
return
else:
self.shell.log.info("Created Node ACL for %s"
% node_acl.node_wwn)
ui_node_acl = UINodeACL(node_acl, self)
if add_mapped_luns:
for lun in self.tpg.luns:
MappedLUN(node_acl, lun.lun, lun.lun, write_protect=False)
self.shell.log.info("Created mapped LUN %d." % lun.lun)
self.refresh()
return self.new_node(ui_node_acl)
def ui_command_delete(self, wwn):
'''
Deletes the Node ACL with the specified I{wwn}.
SEE ALSO
========
B{create}
'''
self.assert_root()
node_acl = NodeACL(self.tpg, wwn, mode='lookup')
node_acl.delete()
self.shell.log.info("Deleted Node ACL %s." % wwn)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn':
wwns = [acl.node_wwn for acl in self.tpg.node_acls]
completions = [wwn for wwn in wwns if wwn.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UINodeACL(UIRTSLibNode):
'''
A generic UI for a node ACL.
'''
def __init__(self, node_acl, parent):
UIRTSLibNode.__init__(self, node_acl.node_wwn, node_acl, parent)
self.define_config_group_param(
'attribute', 'tcq_depth', 'string', "Command queue depth.", True)
self.cfs_cwd = node_acl.path
self.refresh()
def ui_getgroup_attribute(self, attribute):
'''
This is the backend method for getting attributes.
@param attribute: The attribute to get the value of.
@type attribute: str
@return: The attribute's value
@rtype: arbitrary
'''
if attribute == 'tcq_depth':
return self.rtsnode.tcq_depth
else:
return self.rtsnode.get_attribute(attribute)
def ui_setgroup_attribute(self, attribute, value):
'''
This is the backend method for setting attributes.
@param attribute: The attribute to set the value of.
@type attribute: str
@param value: The attribute's value
@type value: arbitrary
'''
self.assert_root()
if attribute == 'tcq_depth':
self.rtsnode.tcq_depth = value
else:
self.rtsnode.set_attribute(attribute, value)
def refresh(self):
self._children = set([])
for mlun in self.rtsnode.mapped_luns:
UIMappedLUN(mlun, self)
def summary(self):
no_mluns = len(self._children)
if no_mluns != 1:
msg = "%d Mapped LUNs" % no_mluns
else:
msg = "%d Mapped LUN" % no_mluns
return (msg, None)
def ui_command_create(self, mapped_lun, tpg_lun, write_protect=None):
'''
Creates a mapping to one of the TPG LUNs for the initiator referenced
by the ACL. The provided I{tpg_lun} will appear to that initiator as
LUN I{mapped_lun}. If the I{write_protect} flag is set to B{1}, the
initiator will not have write access to the Mapped LUN.
SEE ALSO
========
B{delete}
'''
self.assert_root()
try:
tpg_lun = int(tpg_lun)
mapped_lun = int(mapped_lun)
except ValueError:
self.shell.log.error("Incorrect LUN value.")
return
mlun = MappedLUN(self.rtsnode, mapped_lun, tpg_lun, write_protect)
ui_mlun = UIMappedLUN(mlun, self)
self.shell.log.info("Created Mapped LUN %s." % mlun.mapped_lun)
return self.new_node(ui_mlun)
def ui_command_delete(self, mapped_lun):
'''
Deletes the specified I{mapped_lun}.
SEE ALSO
========
B{create}
'''
self.assert_root()
mlun = MappedLUN(self.rtsnode, mapped_lun)
mlun.delete()
self.shell.log.info("Deleted Mapped LUN %s." % mapped_lun)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'mapped_lun':
mluns = [str(mlun.mapped_lun) for mlun in self.rtsnode.mapped_luns]
completions = [mlun for mlun in mluns if mlun.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UIMappedLUN(UIRTSLibNode):
'''
A generic UI for MappedLUN objects.
'''
def __init__(self, mapped_lun, parent):
name = "mapped_lun%d" % mapped_lun.mapped_lun
UIRTSLibNode.__init__(self, name, mapped_lun, parent)
self.cfs_cwd = mapped_lun.path
self.refresh()
def summary(self):
mapped_lun = self.rtsnode
is_healthy = True
try:
tpg_lun = mapped_lun.tpg_lun
except RTSLibBrokenLink:
description = "BROKEN LUN LINK"
is_healthy = False
else:
if mapped_lun.write_protect:
access_mode = 'ro'
else:
access_mode = 'rw'
description = "lun%d (%s)" % (tpg_lun.lun, access_mode)
return (description, is_healthy)
class UILUNs(UINode):
'''
A generic UI for TPG LUNs.
'''
def __init__(self, tpg, parent):
UINode.__init__(self, "luns", parent)
self.cfs_cwd = "%s/lun" % tpg.path
self.tpg = tpg
self.refresh()
def refresh(self):
self._children = set([])
for lun in self.tpg.luns:
UILUN(lun, self)
def summary(self):
no_luns = len(self._children)
if no_luns != 1:
msg = "%d LUNs" % no_luns
else:
msg = "%d LUN" % no_luns
return (msg, None)
def ui_command_create(self, storage_object, lun=None,
add_mapped_luns=None):
'''
Creates a new LUN in the Target Portal Group, attached to a storage
object. If the I{lun} parameter is omitted, the first available LUN in
the TPG will be used. If present, it must be a number greater than 0.
Alternatively, the syntax I{lunX} where I{X} is a positive number is
also accepted.
The I{storage_object} must be the path of an existing storage object,
i.e. B{/backstore/pscsi0/mydisk} to reference the B{mydisk} storage
object of the virtual HBA B{pscsi0}.
If I{add_mapped_luns} is omitted, the global parameter
B{auto_add_mapped_luns} will be used, else B{true} or B{false} are
accepted. If B{true}, then after creating the LUN, mapped LUNs will be
automatically created for all existing node ACLs, mapping the new LUN.
SEE ALSO
========
B{delete}
'''
self.assert_root()
if lun is None:
luns = [lun.lun for lun in self.tpg.luns]
for index in range(1048576):
if index not in luns:
lun = index
break
if lun is None:
self.shell.log.error("Cannot find an available LUN.")
return
else:
self.shell.log.info("Selected LUN %d." % lun)
else:
try:
if lun.startswith('lun'):
lun = lun[3:]
lun = int(lun)
except ValueError:
self.shell.log.error("The LUN must be an integer value.")
return
else:
if lun < 0:
self.shell.log.error("The LUN cannot be negative.")
return
add_mapped_luns = \
self.ui_eval_param(add_mapped_luns, 'bool',
self.shell.prefs['auto_add_mapped_luns'])
try:
storage_object = self.get_node(storage_object).rtsnode
except ValueError:
self.shell.log.error("Invalid storage object %s." % storage_object)
return
lun_object = LUN(self.tpg, lun, storage_object)
self.shell.log.info("Created LUN %s." % lun_object.lun)
ui_lun = UILUN(lun_object, self)
if add_mapped_luns:
for acl in self.tpg.node_acls:
mapped_lun = lun
existing_mluns = [mlun.mapped_lun for mlun in acl.mapped_luns]
if mapped_lun in existing_mluns:
tentative_mlun = 0
while mapped_lun == lun:
if tentative_mlun not in existing_mluns:
mapped_lun = tentative_mlun
self.shell.log.warning(
"Mapped LUN %d already " % lun
+ "exists in ACL %s, using %d instead."
% (acl.node_wwn, mapped_lun))
else:
tentative_mlun += 1
mlun = MappedLUN(acl, mapped_lun, lun, write_protect=False)
self.shell.log.info("Created mapped LUN %d in node ACL %s"
% (mapped_lun, acl.node_wwn))
self.parent.refresh()
return self.new_node(ui_lun)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'storage_object':
storage_objects = []
for backstore in self.get_node('/backstores').children:
for storage_object in backstore.children:
storage_objects.append(storage_object.path)
completions = [so for so in storage_objects if so.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, lun):
'''
Deletes the supplied LUN from the Target Portal Group. The I{lun} must
be a positive number matching an existing LUN.
Alternatively, the syntax I{lunX} where I{X} is a positive number is
also accepted.
SEE ALSO
========
B{create}
'''
self.assert_root()
if lun.lower().startswith("lun"):
lun = lun[3:]
try:
lun = int(lun)
lun_object = LUN(self.tpg, lun)
except:
raise RTSLibError("Invalid LUN")
lun_object.delete()
self.shell.log.info("Deleted LUN %s." % lun)
# Refresh the TPG as we need to also refresh acls MappedLUNs
self.parent.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'lun':
luns = [str(lun.lun) for lun in self.tpg.luns]
completions = [lun for lun in luns if lun.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UILUN(UIRTSLibNode):
'''
A generic UI for LUN objects.
'''
def __init__(self, lun, parent):
name = "lun%d" % lun.lun
UIRTSLibNode.__init__(self, name, lun, parent)
self.cfs_cwd = lun.path
self.refresh()
def summary(self):
lun = self.rtsnode
is_healthy = True
try:
storage_object = lun.storage_object
except RTSLibBrokenLink:
description = "BROKEN STORAGE LINK"
is_healthy = False
else:
backstore = storage_object.backstore
if backstore.plugin.startswith("rd"):
path = "ramdisk"
else:
path = storage_object.udev_path
if self.shell.prefs['legacy_hba_view']:
description = "%s%s/%s (%s)" % (backstore.plugin,
backstore.index,
storage_object.name, path)
else:
description = "%s/%s (%s)" % (backstore.plugin,
dedup_so_name(storage_object),
path)
return (description, is_healthy)
class UIPortals(UINode):
'''
A generic UI for TPG network portals.
'''
def __init__(self, tpg, parent):
UINode.__init__(self, "portals", parent)
self.tpg = tpg
self.cfs_cwd = "%s/np" % tpg.path
self.refresh()
def refresh(self):
self._children = set([])
for portal in self.tpg.network_portals:
UIPortal(portal, self)
def summary(self):
no_portals = len(self._children)
if no_portals != 1:
msg = "%d Portals" % no_portals
else:
msg = "%d Portal" % no_portals
return (msg, None)
def ui_command_create(self, ip_address=None, ip_port=None):
'''
Creates a Network Portal with specified I{ip_address} and I{ip_port}.
If I{ip_port} is omitted, the default port for the target fabric will
be used. If I{ip_address} is omitted, the first IP address found
matching the local hostname will be used.
SEE ALSO
========
B{delete}
'''
self.assert_root()
if ip_port is None:
# FIXME: Add a specfile parameter to determine that
ip_port = 3260
self.shell.log.info("Using default IP port %d" % ip_port)
if ip_address is None:
if not ip_address:
ip_address = utils.get_main_ip()
if ip_address:
self.shell.log.info("Automatically selected IP address %s."
% ip_address)
else:
self.shell.log.error("Cannot find a usable IP address to "
+ "create the Network Portal.")
return
elif ip_address not in utils.list_eth_ips():
self.shell.log.error("IP address does not exist: %s" % ip_address)
return
try:
ip_port = int(ip_port)
except ValueError:
self.shell.log.error("The ip_port must be an integer value.")
return
portal = NetworkPortal(self.tpg, ip_address, ip_port, mode='create')
self.shell.log.info("Created network portal %s:%d."
% (ip_address, ip_port))
ui_portal = UIPortal(portal, self)
return self.new_node(ui_portal)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'ip_address':
completions = [addr for addr in utils.list_eth_ips()
if addr.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, ip_address, ip_port):
'''
Deletes the Network Portal with specified I{ip_address} and I{ip_port}.
SEE ALSO
========
B{create}
'''
self.assert_root()
portal = NetworkPortal(self.tpg, ip_address, ip_port, mode='lookup')
portal.delete()
self.shell.log.info("Deleted network portal %s:%s"
% (ip_address, ip_port))
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
completions = []
# TODO: Check if a dict comprehension is acceptable here with supported
# XXX: python versions.
portals = {}
all_ports = set([])
for portal in self.tpg.network_portals:
all_ports.add(str(portal.port))
if not portal.ip_address in portals:
portals[portal.ip_address] = []
portals[portal.ip_address].append(str(portal.port))
if current_param == 'ip_address':
if 'ip_port' in parameters:
port = parameters['ip_port']
completions = [addr for addr in portals
if port in portals[addr]
if addr.startswith(text)]
else:
completions = [addr for addr in portals
if addr.startswith(text)]
elif current_param == 'ip_port':
if 'ip_address' in parameters:
addr = parameters['ip_address']
if addr in portals:
completions = [port for port in portals[addr]
if port.startswith(text)]
else:
completions = [port for port in all_ports
if port.startswith(text)]
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UIPortal(UIRTSLibNode):
'''
A generic UI for a network portal.
'''
def __init__(self, portal, parent):
name = "%s:%s" % (portal.ip_address, portal.port)
UIRTSLibNode.__init__(self, name, portal, parent)
self.cfs_cwd = portal.path
self.portal = portal
self.refresh()
def summary(self):
if self.portal._get_iser_attr():
return ('OK, iser enabled', True)
else:
return ('OK, iser disabled', True)
def ui_command_iser_enable(self):
'''
Enables iser operation on an network portal.
'''
if self.portal._get_iser_attr() == True:
self.shell.log.info("iser operation has already been enabled")
else:
self.portal._set_iser_attr(True)
self.shell.log.info("iser operation has been enabled")
def ui_command_iser_disable(self):
'''
Disabled iser operation on an network portal.
'''
if self.portal._get_iser_attr() == False:
self.shell.log.info("iser operation has already been disabled")
else:
self.portal._set_iser_attr(False)
self.shell.log.info("iser operation has been disabled")
|
py | 1a4c51a4117d07b32e172dc2bca2afa48b7551f8 | from app.exceptions.services.base_service_exception import BaseServiceException
class ActorServiceException(BaseServiceException):
def __init__(self, status_code: int, message: str) -> None:
super().__init__(status_code=status_code, message=message)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.