ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4e474aa57a66174ffe3ab5a213a71c2dd9ca7f | #%%
import numpy as np
import pandas as pd
import tqdm
import vdj.io
import vdj.bayes
import vdj.stats
# Load data and stan model
data = pd.read_csv('../../data/compiled_dwell_times.csv')
model = vdj.bayes.StanModel('../stan/pooled_exponential_sum.stan', force_compile=True)
#%%
# Iterate through the data and fit while storing thinned samples
samps_df = []
stats_df = []
for g, d in tqdm.tqdm(data.groupby(['mutant', 'salt'])):
_, samples = model.sample({'N':len(d), 'dwell':d['dwell_time_min']},
control=dict(adapt_delta=0.9), iter=5000)
stats = model.summary()
# Parse the mutant
mut = vdj.io.mutation_parser(g[0])
# Add identifiers and append
samples['mutant'] = g[0]
samples['seq'] = mut['seq']
samples['n_muts'] = mut['n_muts']
samples['salt'] = g[1]
stats['mutant'] = g[0]
stats['seq'] = mut['seq']
stats['n_muts'] = mut['n_muts']
stats['salt'] = g[1]
samps_df.append(samples.iloc[::10])
stats_df.append(stats)
# Concatenate and save the dataframes
samps = pd.concat(samps_df)
samps.to_csv('../../data/sum_expon_samples.csv', index=False)
stats = pd.concat(stats_df)
stats.to_csv('../../data/sum_expon_summary.csv', index=False)
#%%
|
py | 1a4e480616cd2943a62553af94c48c3774d6f302 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements triplet loss."""
import tensorflow as tf
from tensorflow_addons.losses import metric_learning
from tensorflow_addons.utils.keras_utils import LossFunctionWrapper
from tensorflow_addons.utils.types import FloatTensorLike, TensorLike
from typeguard import typechecked
from typing import Optional, Union, Callable
def _masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = tf.math.reduce_min(data, dim, keepdims=True)
masked_maximums = (
tf.math.reduce_max(
tf.math.multiply(data - axis_minimums, mask), dim, keepdims=True
)
+ axis_minimums
)
return masked_maximums
def _masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = tf.math.reduce_max(data, dim, keepdims=True)
masked_minimums = (
tf.math.reduce_min(
tf.math.multiply(data - axis_maximums, mask), dim, keepdims=True
)
+ axis_maximums
)
return masked_minimums
@tf.keras.utils.register_keras_serializable(package="Addons")
@tf.function
def triplet_semihard_loss(
y_true: TensorLike,
y_pred: TensorLike,
margin: FloatTensorLike = 1.0,
distance_metric: Union[str, Callable] = "L2",
) -> tf.Tensor:
"""Computes the triplet loss with semi-hard negative mining.
Args:
y_true: 1-D integer `Tensor` with shape [batch_size] of
multiclass integer labels.
y_pred: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
distance_metric: str or function, determines distance metric:
"L1" for l1-norm distance
"L2" for l2-norm distance
"angular" for cosine similarity
A custom function returning a 2d adjacency
matrix of a chosen distance metric can
also be passed here. e.g.
def custom_distance(batch):
batch = 1 - batch @ batch.T
return batch
triplet_semihard_loss(batch, labels,
distance_metric=custom_distance
)
Returns:
triplet_loss: float scalar with dtype of y_pred.
"""
labels, embeddings = y_true, y_pred
convert_to_float32 = (
embeddings.dtype == tf.dtypes.float16 or embeddings.dtype == tf.dtypes.bfloat16
)
precise_embeddings = (
tf.cast(embeddings, tf.dtypes.float32) if convert_to_float32 else embeddings
)
# Reshape label tensor to [batch_size, 1].
lshape = tf.shape(labels)
labels = tf.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix
if distance_metric == "L1":
pdist_matrix = metric_learning.pairwise_distance(
precise_embeddings, squared=False
)
elif distance_metric == "L2":
pdist_matrix = metric_learning.pairwise_distance(
precise_embeddings, squared=True
)
elif distance_metric == "angular":
pdist_matrix = metric_learning.angular_distance(precise_embeddings)
else:
pdist_matrix = distance_metric(precise_embeddings)
# Build pairwise binary adjacency matrix.
adjacency = tf.math.equal(labels, tf.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = tf.math.logical_not(adjacency)
batch_size = tf.size(labels)
# Compute the mask.
pdist_matrix_tile = tf.tile(pdist_matrix, [batch_size, 1])
mask = tf.math.logical_and(
tf.tile(adjacency_not, [batch_size, 1]),
tf.math.greater(
pdist_matrix_tile, tf.reshape(tf.transpose(pdist_matrix), [-1, 1])
),
)
mask_final = tf.reshape(
tf.math.greater(
tf.math.reduce_sum(
tf.cast(mask, dtype=tf.dtypes.float32), 1, keepdims=True
),
0.0,
),
[batch_size, batch_size],
)
mask_final = tf.transpose(mask_final)
adjacency_not = tf.cast(adjacency_not, dtype=tf.dtypes.float32)
mask = tf.cast(mask, dtype=tf.dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = tf.reshape(
_masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size]
)
negatives_outside = tf.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = tf.tile(
_masked_maximum(pdist_matrix, adjacency_not), [1, batch_size]
)
semi_hard_negatives = tf.where(mask_final, negatives_outside, negatives_inside)
loss_mat = tf.math.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = tf.cast(adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(
tf.ones([batch_size])
)
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = tf.math.reduce_sum(mask_positives)
triplet_loss = tf.math.truediv(
tf.math.reduce_sum(
tf.math.maximum(tf.math.multiply(loss_mat, mask_positives), 0.0)
),
num_positives,
)
if convert_to_float32:
return tf.cast(triplet_loss, embeddings.dtype)
else:
return triplet_loss
@tf.keras.utils.register_keras_serializable(package="Addons")
@tf.function
def triplet_hard_loss(
y_true: TensorLike,
y_pred: TensorLike,
margin: FloatTensorLike = 1.0,
soft: bool = False,
distance_metric: Union[str, Callable] = "L2",
) -> tf.Tensor:
"""Computes the triplet loss with hard negative and hard positive mining.
Args:
y_true: 1-D integer `Tensor` with shape [batch_size] of
multiclass integer labels.
y_pred: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
soft: Boolean, if set, use the soft margin version.
distance_metric: str or function, determines distance metric:
"L1" for l1-norm distance
"L2" for l2-norm distance
"angular" for cosine similarity
A custom function returning a 2d adjacency
matrix of a chosen distance metric can
also be passed here. e.g.
def custom_distance(batch):
batch = 1 - batch @ batch.T
return batch
triplet_semihard_loss(batch, labels,
distance_metric=custom_distance
)
Returns:
triplet_loss: float scalar with dtype of y_pred.
"""
labels, embeddings = y_true, y_pred
convert_to_float32 = (
embeddings.dtype == tf.dtypes.float16 or embeddings.dtype == tf.dtypes.bfloat16
)
precise_embeddings = (
tf.cast(embeddings, tf.dtypes.float32) if convert_to_float32 else embeddings
)
# Reshape label tensor to [batch_size, 1].
lshape = tf.shape(labels)
labels = tf.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
if distance_metric == "L1":
pdist_matrix = metric_learning.pairwise_distance(
precise_embeddings, squared=False
)
elif distance_metric == "L2":
pdist_matrix = metric_learning.pairwise_distance(
precise_embeddings, squared=True
)
elif distance_metric == "angular":
pdist_matrix = metric_learning.angular_distance(precise_embeddings)
else:
pdist_matrix = distance_metric(precise_embeddings)
# Build pairwise binary adjacency matrix.
adjacency = tf.math.equal(labels, tf.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = tf.math.logical_not(adjacency)
adjacency_not = tf.cast(adjacency_not, dtype=tf.dtypes.float32)
# hard negatives: smallest D_an.
hard_negatives = _masked_minimum(pdist_matrix, adjacency_not)
batch_size = tf.size(labels)
adjacency = tf.cast(adjacency, dtype=tf.dtypes.float32)
mask_positives = tf.cast(adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(
tf.ones([batch_size])
)
# hard positives: largest D_ap.
hard_positives = _masked_maximum(pdist_matrix, mask_positives)
if soft:
triplet_loss = tf.math.log1p(tf.math.exp(hard_positives - hard_negatives))
else:
triplet_loss = tf.maximum(hard_positives - hard_negatives + margin, 0.0)
# Get final mean triplet loss
triplet_loss = tf.reduce_mean(triplet_loss)
if convert_to_float32:
return tf.cast(triplet_loss, embeddings.dtype)
else:
return triplet_loss
@tf.keras.utils.register_keras_serializable(package="Addons")
class TripletSemiHardLoss(LossFunctionWrapper):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than the minimum negative distance
among which are at least greater than the positive distance plus the
margin constant (called semi-hard negative) in the mini-batch.
If no such negative exists, uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
We expect labels `y_true` to be provided as 1-D integer `Tensor` with shape
[batch_size] of multi-class integer labels. And embeddings `y_pred` must be
2-D float `Tensor` of l2 normalized embedding vectors.
Args:
margin: Float, margin term in the loss definition. Default value is 1.0.
name: Optional name for the op.
"""
@typechecked
def __init__(
self,
margin: FloatTensorLike = 1.0,
distance_metric: Union[str, Callable] = "L2",
name: Optional[str] = None,
**kwargs
):
super().__init__(
triplet_semihard_loss,
name=name,
reduction=tf.keras.losses.Reduction.NONE,
margin=margin,
distance_metric=distance_metric,
)
@tf.keras.utils.register_keras_serializable(package="Addons")
class TripletHardLoss(LossFunctionWrapper):
"""Computes the triplet loss with hard negative and hard positive mining.
The loss encourages the maximum positive distance (between a pair of embeddings
with the same labels) to be smaller than the minimum negative distance plus the
margin constant in the mini-batch.
The loss selects the hardest positive and the hardest negative samples
within the batch when forming the triplets for computing the loss.
See: https://arxiv.org/pdf/1703.07737.
We expect labels `y_true` to be provided as 1-D integer `Tensor` with shape
[batch_size] of multi-class integer labels. And embeddings `y_pred` must be
2-D float `Tensor` of l2 normalized embedding vectors.
Args:
margin: Float, margin term in the loss definition. Default value is 1.0.
soft: Boolean, if set, use the soft margin version. Default value is False.
name: Optional name for the op.
"""
@typechecked
def __init__(
self,
margin: FloatTensorLike = 1.0,
soft: bool = False,
distance_metric: Union[str, Callable] = "L2",
name: Optional[str] = None,
**kwargs
):
super().__init__(
triplet_hard_loss,
name=name,
reduction=tf.keras.losses.Reduction.NONE,
margin=margin,
soft=soft,
distance_metric=distance_metric,
)
|
py | 1a4e490729d08b048611bbcd007c4a38ebc0a657 | from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from datetime import datetime
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Movie:
'''
Movie class to define Movie Objects
'''
def __init__(self,id,title,overview,poster,vote_average,vote_count):
self.id =id
self.title = title
self.overview = overview
self.poster = "https://image.tmdb.org/t/p/w500/" + poster
self.vote_average = vote_average
self.vote_count = vote_count
class Review(db.Model):
'''
Review class to define review Objects
'''
__tablename__ = 'reviews'
id = db.Column(db.Integer,primary_key = True)
movie_id = db.Column(db.Integer)
movie_title = db.Column(db.String)
image_path = db.Column(db.String)
movie_review = db.Column(db.String)
posted = db.Column(db.DateTime,default=datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
all_reviews = []
def save_review(self):
db.session.add(self)
db.session.commit()
@classmethod
def clear_reviews(cls):
Review.all_reviews.clear()
@classmethod
def get_reviews(cls,id):
reviews = Review.query.filter_by(movie_id=id).all()
return reviews
class User(UserMixin,db.Model):
'''
User class to define user Objects
'''
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email = db.Column(db.String(255),unique = True,index = True)
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
password_hash = db.Column(db.String(255))
reviews = db.relationship('Review',backref = 'user',lazy = "dynamic")
@property
def password(self):
raise AttributeError('You cannnot read the password attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.password_hash,password)
def __repr__(self):
return f'User {self.username}'
class Role(db.Model):
'''
Role class to define roles Objects
'''
__tablename__ = 'roles'
id = db.Column(db.Integer,primary_key = True)
name = db.Column(db.String(255))
users = db.relationship('User',backref = 'role',lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
class Genres:
'''
Genre class to genres Objects
'''
def __init__(self,id,name):
self.id = id
self.name = name
class Trailer:
'''
Trailer class to define trailer Objects
'''
def __init__(self,key):
self.key= key
|
py | 1a4e4b506de67098a7e8b7f18581efe1d9064a9d | """empty message
Revision ID: 9b9102347500
Revises: 7ede01846a31
Create Date: 2019-08-14 12:26:40.368422
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9b9102347500'
down_revision = '7ede01846a31'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('study', sa.Column('image_url', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('study', 'image_url')
# ### end Alembic commands ###
|
py | 1a4e4cb57fdfab33ea96b8d0ed98755ca3f54138 | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains an example test.
Tests should be placed in ``src/tests``, in modules that mirror your
project's structure, and in files named test_*.py. They are simply functions
named ``test_*`` which test a unit of logic.
To run the tests, run ``kedro test``.
"""
from pathlib import Path
import pytest
from umoja.run import ProjectContext
@pytest.fixture
def project_context():
return ProjectContext(str(Path.cwd()))
class TestProjectContext:
def test_project_name(self, project_context):
assert project_context.project_name == "umoja"
def test_project_version(self, project_context):
assert project_context.project_version == "0.15.8"
|
py | 1a4e4cd38391a06c246e579ab13c09a60ffe8f70 | import os
from app import create_app
config_name = os.environ.get('APP_SETTINGS')
app = create_app(config_name)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run('', port=port)
|
py | 1a4e4cefdb235e71831e76eaccf61179472c578b | """market_access_python_frontend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
urlpatterns = []
if settings.DEBUG and settings.DJANGO_ENV == "local":
urlpatterns += [
path("admin/", admin.site.urls),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += [
path("", include("users.urls", namespace="users")),
path("", include("barriers.urls", namespace="barriers")),
path("", include("reports.urls", namespace="reports")),
path("", include("core.urls", namespace="core")),
path("", include("healthcheck.urls", namespace="healthcheck")),
]
|
py | 1a4e4d12f57a8603b32a895c73e7efcb2bc7573b | import flavio
from wilson import Wilson
import wcxf
from flavio.statistics.likelihood import Likelihood, FastLikelihood
from flavio.statistics.probability import NormalDistribution
from flavio.statistics.functions import pull, pvalue
import warnings
import pandas as pd
import numpy as np
from collections import OrderedDict
from math import ceil
from .util import tree, get_datapath
from . import ckm
from multipledispatch import dispatch
from copy import copy
import os
# by default, smelli uses leading log accuracy for SMEFT running!
Wilson.set_default_option('smeft_accuracy', 'leadinglog')
class GlobalLikelihood(object):
"""Class that provides a global likelihood in SMEFT Wilson
coefficient space.
User methods:
- `log_likelihood`: return an instance of LieklihoodResult
given a dictionary of Wilson coefficients at a given scale
- `log_likelihood_wcxf`: return an instance of LieklihoodResult
given the path to a WCxf file
- `log_likelihood_wilson`: return an instance of LieklihoodResult+
given an instance of `wilson.Wilson`
Utility methods:
- `make_measurement`: compute the SM covariances. Note that it is only
necessary to call this method when changes to the default
parameters/uncertainties have been made
- `save_sm_covariances`, `load_sm_covariances`: Save the calculated SM
covariances or load them from data files
- `save_exp_covariances`, `load_exp_covariances`: Save the calculated
experimental central values and covariances or load them from data files
"""
_default_bases = {'SMEFT': 'Warsaw', 'WET': 'flavio'}
_fast_likelihoods_yaml = [
'fast_likelihood_quarks.yaml',
'fast_likelihood_leptons.yaml'
]
_likelihoods_yaml = [
'likelihood_ewpt.yaml',
'likelihood_lept.yaml',
'likelihood_rd_rds.yaml',
'likelihood_lfu_fccc.yaml',
'likelihood_lfu_fcnc.yaml',
'likelihood_bcpv.yaml',
'likelihood_bqnunu.yaml',
'likelihood_lfv.yaml',
'likelihood_zlfv.yaml',
]
def __init__(self, eft='SMEFT', basis=None,
par_dict=None,
include_likelihoods=None,
exclude_likelihoods=None,
Nexp=5000,
exp_cov_folder=None,
sm_cov_folder=None,
custom_likelihoods=None,
fix_ckm=False):
"""Initialize the likelihood.
Optionally, a dictionary of parameters can be passed as `par_dict`.
If not given (or not complete), flavio default parameter values will
be used. Note that the CKM elements in `par_dict` will be ignored as
the "true" CKM elements will be extracted for each parameter point
from the measurement of four input observables:
- `'RKpi(P+->munu)'`
- `'BR(B+->taunu)'`
- `'BR(B->Xcenu)'`
- `'DeltaM_d/DeltaM_s'`
Parameters:
- eft: a WCxf EFT, must be one of 'SMEFT' (default) or 'WET'.
- basis: a WCxf basis, defaults to 'Warsaw' for SMEFT and 'flavio'
for WET.
- include_likelihoods: a list of strings specifying the likelihoods
to be included (default: all of them). Note that this cannot be used
to add likelihoods.
- exclude_likelihoods: a list of strings specifying the likelihoods
to be excluded (default: none of them).
- Nexp: number of random evaluations of the experimental likelihood
used to extract the covariance matrix for "fast likelihood"
instances. Defaults to 5000.
- exp_cov_folder: directory containing saved expererimental
covariances. The data files have to be in the format exported by
`save_exp_covariances`.
- sm_cov_folder: directory containing saved SM
covariances. The data files have to be in the format exported by
`save_sm_covariances`.
- custom_likelihoods: a dictionary in which each value is a list of
observables and each key is a string that serves as user-defined
name. For each item of the dictionary, a custom likelihood will be
computed.
- fix_ckm: If False (default), automatically determine the CKM elements
in the presence of new physics in processes used to determine these
elements in the SM. If set to True, the CKM elements are fixed to
their SM values, which can lead to inconsistent results, but also
to a significant speedup in specific cases.
"""
self.eft = eft
self.basis = basis or self._default_bases[self.eft]
par_dict = par_dict or {} # initialize empty if not given
# take missing parameters from flavio defaults
self.par_dict_default = flavio.default_parameters.get_central_all()
self.par_dict_default.update(par_dict)
self._par_dict_sm = None
self.fix_ckm = fix_ckm
self.likelihoods = {}
self.fast_likelihoods = {}
self._custom_likelihoods_dict = custom_likelihoods or {}
self.custom_likelihoods = {}
self._load_likelihoods(include_likelihoods=include_likelihoods,
exclude_likelihoods=exclude_likelihoods)
self._Nexp = Nexp
if exp_cov_folder is not None:
self.load_exp_covariances(exp_cov_folder)
self._sm_cov_loaded = False
try:
if sm_cov_folder is None:
self.load_sm_covariances(get_datapath('smelli', 'data/cache'))
else:
self.load_sm_covariances(sm_cov_folder)
self._sm_cov_loaded = True
self.make_measurement()
except (KeyboardInterrupt, SystemExit):
raise
except:
warnings.warn("There was a problem loading the SM covariances. "
"Please recompute them with `make_measurement`.")
self._log_likelihood_sm = None
self._obstable_sm = None
def _load_likelihoods(self,
include_likelihoods=None,
exclude_likelihoods=None):
if include_likelihoods is not None and exclude_likelihoods is not None:
raise ValueError("include_likelihoods and exclude_likelihoods "
"should not be specified simultaneously.")
for fn in self._fast_likelihoods_yaml:
if include_likelihoods is not None and fn not in include_likelihoods:
continue
if exclude_likelihoods is not None and fn in exclude_likelihoods:
continue
with open(self._get_likelihood_path(fn), 'r') as f:
L = FastLikelihood.load(f)
self.fast_likelihoods[fn] = L
for fn in self._likelihoods_yaml:
if include_likelihoods is not None and fn not in include_likelihoods:
continue
if exclude_likelihoods is not None and fn in exclude_likelihoods:
continue
if self.eft != 'SMEFT' and fn in ['likelihood_ewpt.yaml',
'likelihood_zlfv.yaml',]:
continue
with open(self._get_likelihood_path(fn), 'r') as f:
L = Likelihood.load(f)
self.likelihoods[fn] = L
for name, observables in self._custom_likelihoods_dict.items():
L = CustomLikelihood(self, observables)
self.custom_likelihoods['custom_' + name] = L
def _get_likelihood_path(self, name):
"""Return a path for the likelihood specified by `name`.
If a YAML file with that name is found in the package's data
directory, that is used. Otherwise, `name` is assumed to be a path.
Raises `FileNotFoundError` if path does not exists.
"""
path = get_datapath('smelli', 'data/yaml/' + name)
if os.path.exists(path):
return path
path = get_datapath('smelli', 'data/yaml/' + name + '.yaml')
if os.path.exists(path):
return path
if os.path.exists(name):
return name
if os.path.exists(name + '.yaml'):
return name + '.yaml'
else:
raise FileNotFoundError("Likelihood YAML file '{}' was not found".format(name))
def make_measurement(self, *args, **kwargs):
"""Initialize the likelihood by producing a pseudo-measurement containing both
experimental uncertainties as well as theory uncertainties stemming
from nuisance parameters.
Optional parameters:
- `N`: number of random computations for the SM covariance (computing
time is proportional to it; more means less random fluctuations.)
- `Nexp`: number of random computations for the experimental covariance.
This is much less expensive than the theory covariance, so a large
number can be afforded (default: 5000).
- `threads`: number of parallel threads for the SM
covariance computation. Defaults to 1 (no parallelization).
- `force`: if True, will recompute SM covariance even if it
already has been computed. Defaults to False.
- `force_exp`: if True, will recompute experimental central values and
covariance even if they have already been computed. Defaults to False.
"""
if 'Nexp' not in kwargs:
kwargs['Nexp'] = self._Nexp
for name, flh in self.fast_likelihoods.items():
flh.make_measurement(*args, **kwargs)
self._sm_cov_loaded = True
def save_sm_covariances(self, folder):
for name, flh in self.fast_likelihoods.items():
filename = os.path.join(folder, name + '.p')
flh.sm_covariance.save(filename)
def load_sm_covariances(self, folder):
for name, flh in self.fast_likelihoods.items():
filename = os.path.join(folder, name + '.p')
flh.sm_covariance.load(filename)
def save_exp_covariances(self, folder):
for name, flh in self.fast_likelihoods.items():
filename = os.path.join(folder, name + '.p')
flh.exp_covariance.save(filename)
def load_exp_covariances(self, folder):
for name, flh in self.fast_likelihoods.items():
filename = os.path.join(folder, name + '.p')
flh.exp_covariance.load(filename)
@property
def log_likelihood_sm(self):
if self._log_likelihood_sm is None:
self._log_likelihood_sm = self._log_likelihood(self.par_dict_sm, flavio.WilsonCoefficients())
return self._log_likelihood_sm
def _check_sm_cov_loaded(self):
"""Check if the SM covariances have been computed or loaded."""
if not self._sm_cov_loaded:
raise ValueError("Please load or compute the SM covariances first"
" by calling `make_measurement`.")
def get_ckm_sm(self):
scheme = ckm.CKMSchemeRmuBtaunuBxlnuDeltaM()
Vus, Vcb, Vub, delta = scheme.ckm_np(w=None)
return {'Vus': Vus, 'Vcb': Vcb, 'Vub': Vub, 'delta': delta}
@property
def par_dict_sm(self):
"""Return the dictionary of parameters where the four CKM parameters
`Vus`, `Vcb`, `Vub`, `delta` have been replaced by their
"true" values extracted assuming the SM.
They should be almost (but not exactly) equal to the default
flavio CKM parameters."""
if self._par_dict_sm is None:
par_dict_sm = self.par_dict_default.copy()
par_dict_sm.update(self.get_ckm_sm())
self._par_dict_sm = par_dict_sm
return self._par_dict_sm
@property
def obstable_sm(self):
self._check_sm_cov_loaded()
if self._obstable_sm is None:
info = tree() # nested dict
for flh_name, flh in self.fast_likelihoods.items():
# loop over fast likelihoods: they only have a single "measurement"
m = flh.pseudo_measurement
ml = flh.full_measurement_likelihood
pred_sm = ml.get_predictions_par(self.par_dict_sm,
flavio.WilsonCoefficients())
sm_cov = flh.sm_covariance.get(force=False)
_, exp_cov = flh.exp_covariance.get(force=False)
inspire_dict = self._get_inspire_dict(flh.observables, ml)
for i, obs in enumerate(flh.observables):
info[obs]['lh_name'] = flh_name
info[obs]['name'] = obs if isinstance(obs, str) else obs[0]
info[obs]['th. unc.'] = np.sqrt(sm_cov[i, i])
info[obs]['experiment'] = m.get_central(obs)
info[obs]['exp. unc.'] = np.sqrt(exp_cov[i, i])
info[obs]['exp. PDF'] = NormalDistribution(m.get_central(obs), np.sqrt(exp_cov[i, i]))
info[obs]['inspire'] = sorted(set(inspire_dict[obs]))
info[obs]['ll_sm'] = m.get_logprobability_single(obs, pred_sm[obs])
info[obs]['ll_central'] = m.get_logprobability_single(obs, m.get_central(obs))
for lh_name, lh in self.likelihoods.items():
# loop over "normal" likelihoods
ml = lh.measurement_likelihood
pred_sm = ml.get_predictions_par(self.par_dict_sm,
flavio.WilsonCoefficients())
inspire_dict = self._get_inspire_dict(lh.observables, ml)
for i, obs in enumerate(lh.observables):
obs_dict = flavio.Observable.argument_format(obs, 'dict')
obs_name = obs_dict.pop('name')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
p_comb = flavio.combine_measurements(
obs_name,
include_measurements=ml.get_measurements,
**obs_dict)
info[obs]['experiment'] = p_comb.central_value
info[obs]['exp. unc.'] = max(p_comb.error_left, p_comb.error_right)
info[obs]['exp. PDF'] = p_comb
info[obs]['inspire'] = sorted(set(inspire_dict[obs]))
info[obs]['th. unc.'] = 0
info[obs]['lh_name'] = lh_name
info[obs]['name'] = obs if isinstance(obs, str) else obs[0]
info[obs]['ll_sm'] = p_comb.logpdf([pred_sm[obs]])
info[obs]['ll_central'] = p_comb.logpdf([p_comb.central_value])
self._obstable_sm = info
return self._obstable_sm
def get_wilson(self, wc_dict, scale):
return Wilson(wc_dict, scale=scale, eft=self.eft, basis=self.basis)
def _log_likelihood(self, par_dict, w):
"""Return the log-likelihood as a dictionary for an instance of
`wilson.Wilson`."""
ll = {}
for name, flh in self.fast_likelihoods.items():
ll[name] = flh.log_likelihood(par_dict, w, delta=True)
for name, lh in self.likelihoods.items():
ll[name] = lh.log_likelihood(par_dict, w, delta=True)
for name, clh in self.custom_likelihoods.items():
ll[name] = clh.log_likelihood(par_dict, w, delta=True)
return ll
@dispatch(dict)
def parameter_point(self, wc_dict, scale=None):
"""Choose a point in parameter space by providing a dictionary of
Wilson coefficient values (with keys corresponding to WCxf Wilson
coefficient names) and the input scale."""
if not scale:
raise ValueError("You need to provide a scale")
w = self.get_wilson(wc_dict, scale)
return GlobalLikelihoodPoint(self, w, fix_ckm=self.fix_ckm)
@dispatch(dict, (int, float))
def parameter_point(self, wc_dict, scale):
"""Choose a point in parameter space by providing a dictionary of
Wilson coefficient values (with keys corresponding to WCxf Wilson
coefficient names) and the input scale."""
w = self.get_wilson(wc_dict, scale)
return GlobalLikelihoodPoint(self, w, fix_ckm=self.fix_ckm)
@dispatch(str)
def parameter_point(self, filename):
"""Choose a point in parameter space by providing the path to a WCxf
file."""
with open(filename, 'r') as f:
wc = wcxf.WC.load(f)
w = Wilson.from_wc(wc)
return GlobalLikelihoodPoint(self, w, fix_ckm=self.fix_ckm)
@dispatch(Wilson)
def parameter_point(self, w):
"""Choose a point in parameter space by providing an instance
of `wilson.Wilson`."""
return GlobalLikelihoodPoint(self, w, fix_ckm=self.fix_ckm)
@staticmethod
def _get_inspire_dict(observables, ml):
inspire_dict = {}
obs_set = set(observables)
for m_name in ml.get_measurements:
m_obj = flavio.Measurement[m_name]
for obs in set(m_obj.all_parameters) & obs_set:
if obs in inspire_dict:
inspire_dict[obs].append(m_obj.inspire)
else:
inspire_dict[obs]=[m_obj.inspire]
return inspire_dict
def number_observations_dict(self, exclude_observables=None):
"""Get a dictionary of the number of "observations" for each
sublikelihood.
Here, an "observation" is defined as an individual measurment
of an observable. Thus, the number of observations is always
>= the number of observables.
"""
nobs_dict = {}
for name, flh in self.fast_likelihoods.items():
nobs_dict[name] = len(set(flh.observables) - set(exclude_observables or []))
for name, lh in self.likelihoods.items():
ml = lh.measurement_likelihood
nobs_dict[name] = ml.get_number_observations(
exclude_observables=exclude_observables
)
for name, clh in self.custom_likelihoods.items():
nobs_dict[name] = clh.get_number_observations()
nobs_dict['global'] = sum([v for k, v in nobs_dict.items() if 'custom_' not in k])
return nobs_dict
class CustomLikelihood(object):
def __init__(self, likelihood, observables):
self.likelihood = likelihood
self.observables = observables
self.exclude_obs = self._get_exclude_obs_dict()
def _get_exclude_obs_dict(self):
"""Get a dictionary with observables to be excluded from each
(Fast)Likelihood instance."""
exclude_obs = {}
for lhs_or_flhs in (self.likelihood.likelihoods,
self.likelihood.fast_likelihoods):
for lh_name, lh in lhs_or_flhs.items():
exclude_observables = set(lh.observables) - set(self.observables)
if set(lh.observables) != exclude_observables:
exclude_obs[lh_name] = exclude_observables
return exclude_obs
def log_likelihood(self, par_dict, wc_obj, delta=False):
custom_log_likelihood = 0
for lh_name, exclude_observables in self.exclude_obs.items():
lh = (self.likelihood.fast_likelihoods.get(lh_name)
or self.likelihood.likelihoods.get(lh_name))
custom_log_likelihood += lh.log_likelihood(
par_dict, wc_obj, delta=delta,
exclude_observables=exclude_observables
)
return custom_log_likelihood
def get_number_observations(self):
"""Get the number of observations, defined as individual measurements
of observables."""
nobs = 0
for llh_name, exclude_observables in self.exclude_obs.items():
if llh_name in self.likelihood.fast_likelihoods:
flh = self.likelihood.fast_likelihoods[llh_name]
nobs += len(set(flh.observables) - set(exclude_observables or []))
else:
lh = self.likelihood.likelihoods[llh_name]
ml = lh.measurement_likelihood
nobs += ml.get_number_observations(
exclude_observables=exclude_observables)
return nobs
class GlobalLikelihoodPoint(object):
"""Class representing the properties of the likelihood function at a
specific point in parameter space.
Attributes:
- `log_likelihood_dict`: dictionary with individual contributions
to the log-likelihood
- `value`: Return the numerical values of the global log-likelihood
compared to the SM value (can also be acessed with `float(self)`)
Methods:
- `get_obstable`: return a pandas data frame with the values and pulls
for each individual observable, given the Wilson coefficients
"""
def __init__(self, likelihood, w,
fix_ckm=False):
"""Initialize the `GlobalLikelihoodPoint` instance.
Parameters:
- likelihood: an instance of `GlobalLikelihood`
- w: an instance of `wilson.Wilson`
- fix_ckm: If False (default), automatically determine the CKM elements
in the presence of new physics in processes used to determine these
elements in the SM. If set to True, the CKM elements are fixed to
their SM values, which can lead to inconsistent results, but also
to a significant speedup in specific cases.
"""
self.likelihood = likelihood
likelihood._check_sm_cov_loaded()
self.w_input = w
self.fix_ckm = fix_ckm
self._w = None
self._obstable_tree_cache = None
self._log_likelihood_dict = None
self._par_dict_np = None
@property
def w(self):
if self._w is None:
w = self.w_input
opt = w.get_option('parameters')
par = self.par_dict_np
for p in ['Vus', 'Vcb', 'Vub', 'delta']:
opt[p] = par[p]
w.set_option('parameters', opt)
self._w = w
return self._w
def get_ckm_np(self):
"""return the values of the four "true" CKM parameters
`Vus`, `Vcb`, `Vub`, `delta`, extracted from the four input observables
for this parameter point in Wilson coefficient space."""
# the default 4-observable scheme
scheme = ckm.CKMSchemeRmuBtaunuBxlnuDeltaM()
try:
Vus, Vcb, Vub, delta = scheme.ckm_np(self.w_input)
except ValueError:
# this happens mostly when the formulas result in |cos(delta)| > 1
raise ValueError("The extraction of CKM elements failed. Too large NP effects?")
return {'Vus': Vus, 'Vcb': Vcb, 'Vub': Vub, 'delta': delta}
@property
def par_dict_np(self):
"""Return the dictionary of parameters where the four CKM parameters
`Vus`, `Vcb`, `Vub`, `delta` have been replaced by their
"true" values as extracted from the four input observables.
Note that if `fix_ckm` is set to `True`, this method actually
returns the SM values."""
if self.fix_ckm:
return self.likelihood.par_dict_sm
if self._par_dict_np is None:
par_dict_np = self.likelihood.par_dict_default.copy()
par_dict_np.update(self.get_ckm_np())
self._par_dict_np = par_dict_np
return self._par_dict_np
def _delta_log_likelihood(self):
"""Compute the delta log likelihood for the individual likelihoods"""
ll = self.likelihood._log_likelihood(self.par_dict_np, self.w)
for name in ll:
ll[name] -= self.likelihood.log_likelihood_sm[name]
ll['global'] = sum([v for k, v in ll.items() if 'custom_' not in k])
return ll
def log_likelihood_dict(self):
"""Return a dictionary with the delta log likelihood values
for the individual contributions.
Cached after the first call."""
if self._log_likelihood_dict is None:
self._log_likelihood_dict = self._delta_log_likelihood()
return self._log_likelihood_dict
def log_likelihood_global(self):
"""Return the value of the global delta log likelihood.
Cached after the first call. Corresponds to the `global` key of
the dictionary returned by `log_likelihood_dict`."""
return self.log_likelihood_dict()['global']
def pvalue_dict(self, n_par=0):
r"""Dictionary of $p$ values of sublikelihoods given the number `n_par`
of free parameters (default 0)."""
nobs = self.likelihood.number_observations_dict()
chi2 = self.chi2_dict()
return {k: pvalue(chi2[k], dof=max(1, nobs[k] - n_par)) for k in chi2}
def chi2_dict(self):
r"""Dictionary of total $\chi^2$ values of each sublikelihood.
$$\chi^2 = -2 (\ln L + \ln L_\text{SM})$$
"""
ll = self.log_likelihood_dict()
llsm = self.likelihood._log_likelihood_sm.copy()
llsm['global'] = sum([v for k, v in llsm.items() if 'custom_' not in k])
return {k: -2 * (ll[k] + llsm[k]) for k in ll}
@property
def _obstable_tree(self):
if not self._obstable_tree_cache:
llh = self.likelihood
info = copy(llh.obstable_sm)
for flh_name, flh in llh.fast_likelihoods.items():
# loop over fast likelihoods: they only have a single "measurement"
m = flh.pseudo_measurement
ml = flh.full_measurement_likelihood
pred = ml.get_predictions_par(self.par_dict_np, self.w)
for i, obs in enumerate(flh.observables):
info[obs]['theory'] = pred[obs]
ll_central = info[obs]['ll_central']
ll_sm = info[obs]['ll_sm']
ll = m.get_logprobability_single(obs, pred[obs])
# DeltaChi2 is -2*DeltaLogLikelihood
info[obs]['pull exp.'] = pull(-2 * (ll - ll_central), dof=1)
s = -1 if ll > ll_sm else 1
info[obs]['pull SM'] = s * pull(-2 * (ll - ll_sm), dof=1)
for lh_name, lh in llh.likelihoods.items():
# loop over "normal" likelihoods
ml = lh.measurement_likelihood
pred = ml.get_predictions_par(self.par_dict_np, self.w)
for i, obs in enumerate(lh.observables):
info[obs]['theory'] = pred[obs]
ll_central = info[obs]['ll_central']
ll_sm = info[obs]['ll_sm']
p_comb = info[obs]['exp. PDF']
ll = p_comb.logpdf([pred[obs]])
info[obs]['pull exp.'] = pull(-2 * (ll - ll_central), dof=1)
s = -1 if ll > ll_sm else 1
info[obs]['pull SM'] = s * pull(-2 * (ll - ll_sm), dof=1)
self._obstable_tree_cache = info
return self._obstable_tree_cache
def obstable(self, min_pull_exp=0, sort_by='pull exp.', ascending=None,
min_val=None, max_val=None):
r"""Return a pandas data frame with the central values and uncertainties
as well as the pulls with respect to the experimental and the SM values for each observable.
The pull is defined is $\sqrt(|-2\ln L|)$. Note that the global
likelihood is *not* simply proportional to the sum of squared pulls
due to correlations.
"""
sort_keys = ['name', 'exp. unc.', 'experiment', 'pull SM', 'pull exp.',
'th. unc.', 'theory']
if sort_by not in sort_keys:
raise ValueError(
"'{}' is not an allowed value for sort_by. Allowed values are "
"'{}', and '{}'.".format(sort_by, "', '".join(sort_keys[:-1]),
sort_keys[-1])
)
info = self._obstable_tree
subset = None
if sort_by == 'pull exp.':
# if sorted by pull exp., use descending order as default
if ascending is None:
ascending = False
if min_val is not None:
min_val = max(min_pull_exp, min_val)
else:
min_val = min_pull_exp
elif min_pull_exp != 0:
subset = lambda row: row['pull exp.'] >= min_pull_exp
# if sorted not by pull exp., use ascending order as default
if ascending is None:
ascending = True
info = self._obstable_filter_sort(info, sortkey=sort_by,
ascending=ascending,
min_val=min_val, max_val=max_val,
subset=subset)
# create DataFrame
df = pd.DataFrame(info).T
# if df has length 0 (e.g. if min_pull is very large) there are no
# columns that could be removed
if len(df) >0:
# remove columns that are only used internal and should not be
# included in obstable
del(df['inspire'])
del(df['lh_name'])
del(df['name'])
del(df['exp. PDF'])
del(df['ll_central'])
del(df['ll_sm'])
return df
@staticmethod
def _obstable_filter_sort(info, sortkey='name', ascending=True,
min_val=None, max_val=None,
subset=None, max_rows=None):
# impose min_val and max_val
if min_val is not None:
info = {obs:row for obs,row in info.items()
if row[sortkey] >= min_val}
if max_val is not None:
info = {obs:row for obs,row in info.items()
if row[sortkey] <= max_val}
# get only subset:
if subset is not None:
info = {obs:row for obs,row in info.items() if subset(row)}
# sort
info = OrderedDict(sorted(info.items(), key=lambda x: x[1][sortkey],
reverse=(not ascending)))
# restrict number of rows per tabular to max_rows
if max_rows is None or len(info)<=max_rows:
return info
else:
info_list = []
for n in range(ceil(len(info)/max_rows)):
info_n = OrderedDict((obs,row)
for i,(obs,row) in enumerate(info.items())
if i>=n*max_rows and i<(n+1)*max_rows)
info_list.append(info_n)
return info_list
|
py | 1a4e4d164de5c0cbc58460ec1391dc59f894aac2 | #!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/1/28 2:40 PM
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
# 发件人地址
from_addr = '[email protected]'
# 邮箱密码
password = 'pass'
# 收件人地址
to_addr = '[email protected]'
# 163网易邮箱服务器地址
smtp_server = 'smtp.163.com'
# 设置邮件信息
msg = MIMEText('Python爬虫运行异常,异常信息为遇到HTTP 403', 'plain', 'utf-8')
msg['Form'] = _format_addr('一号爬虫 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('一号爬虫运行状态', 'utf-8').encode()
# 发送邮件
server = smtplib.SMTP(smtp_server, 25)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
|
py | 1a4e4de85e36e27779cc1370271bc79678c865fb | import pexpect
import argparse
import os
import os.path
import subprocess
import sys
class RepositorySet:
def __init__(self, repository_root, repositories):
self.repository_root = repository_root
self.repositories = repositories
class Repository:
def __init__(self, name, origin_url, remote_urls):
self.name = name
self.origin_url = origin_url
self.remote_urls = remote_urls
class Credentials:
def __init__(self, username, password):
self.username = username
self.password = password
class Command:
def __init__(self, name, repository_root, username='', password=''):
self.name = name
self.repository_root = repository_root
self.credentials = Credentials(username, password)
def is_list(self):
return self.name == 'list'
def is_update_local(self):
return self.name == 'update-local'
def is_update_remote(self):
return self.name == 'update-remote'
def is_git_repo(name):
old_working_dir = os.getcwd()
ret = None
if os.path.isdir(name):
os.chdir(name)
result = subprocess.run(
['git', 'rev-parse', '--is-inside-work-tree'],
capture_output=True,
text=True
)
if result.returncode == 0:
ret = True
else:
ret = False
else:
ret = False
os.chdir(old_working_dir)
assert ret is not None
return ret
def get_url_by_label(label):
result = subprocess.run(
['git', 'remote', 'get-url', label],
capture_output=True,
text=True
)
if result.returncode == 0:
return result.stdout.rstrip()
else:
raise ValueError(f'The git repository does not have a URL named {label}')
def remote_url_labels():
result = subprocess.run(
['git', 'remote'],
capture_output=True,
text=True
)
remote_labels = result.stdout.rstrip().split('\n')
return remote_labels
def remote_urls(labels, exclude=['origin']):
urls = {}
for label in (label for label in labels if label not in exclude):
try:
url = get_url_by_label(label)
except ValueError:
url = ''
urls[label] = url
return urls
def scan_repository_root(repository_root):
repositories = {}
old_working_dir = os.getcwd()
os.chdir(repository_root)
for name in (name for name in os.listdir(repository_root) if is_git_repo(name)):
os.chdir(name)
try:
origin_url = get_url_by_label('origin')
except ValueError:
origin_url = ''
labels = remote_url_labels()
urls = remote_urls(labels)
repository = Repository(name, origin_url, urls)
repositories[name] = repository
os.chdir(os.path.pardir)
os.chdir(old_working_dir)
return RepositorySet(repository_root, repositories)
def run_list(repository_set):
print(f'Found {len(repository_set.repositories)} Git repositories in `{repository_set.repository_root}`\n')
for repo in repository_set.repositories.values():
repo_path = os.path.join(repository_set.repository_root, repo.name)
print(f'Repository: {repo_path}')
print(f'Origin: {repo.origin_url}')
print(f'Remote URLs: {repo.remote_urls}\n')
def run_update_local(repository_set):
def git_pull():
return subprocess.run(
['git', 'pull', 'origin'],
capture_output=True,
text=True
)
old_working_dir = os.getcwd()
os.chdir(repository_set.repository_root)
for repository in repository_set.repositories.values():
os.chdir(repository.name)
result = git_pull()
if result.returncode == 0:
print(f'The repository `{repository.name}` has been updated successfully.')
else:
print(f'An error occurred in updating the repository `{repository.name}`')
print(f'{result.stderr}')
print(f'{result.stdout}')
os.chdir(os.path.pardir)
os.chdir(old_working_dir)
def run_update_remote(repository_set, credentials):
def git_push(label):
return pexpect.run(
f'git push {label} --all',
withexitstatus=1,
events={
'(?i)Username for': f'{credentials.username}\n',
'(?i)Password for': f'{credentials.password}\n'
}
)
old_working_dir = os.getcwd()
os.chdir(repository_set.repository_root)
for repository in repository_set.repositories.values():
os.chdir(repository.name)
for label, remote_url in repository.remote_urls.items():
command_output, exit_status = git_push(label)
if exit_status == 0:
print(
f'The remote copy of repository of `{repository.name}` with '
f'the name `{label}` and the URL `{remote_url}` has been '
f'updated successfully.'
)
else:
print(
f'An error occurred in updating the remote copy of the '
f'repository `{repository.name}` to the URL named `{label}` at URL `{remote_url}`.'
)
print(command_output)
os.chdir(os.path.pardir)
os.chdir(old_working_dir)
def run_command(command, repository_set):
if command.is_list():
run_list(repository_set)
elif command.is_update_local():
run_update_local(repository_set)
elif command.is_update_remote():
run_update_remote(repository_set, command.credentials)
else:
raise ValueError(f'The command name `{command.name}` is not a valid command.')
def arg_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
title='subcommands',
description='valid subcommands',
help='subcommand help'
)
# Subparser for the list command.
parser_list = subparsers.add_parser(
'list',
help='Search a directory for git repositories'
)
parser_list.add_argument(
'path', type=str,
help='The path to the git repository directory'
)
# Subparser for the update-local command.
parser_update_local = subparsers.add_parser(
'update-local',
help='Update the local copies of each git repository'
)
parser_update_local.add_argument(
'path', type=str,
help='The path to the git repository directory'
)
# Subparser for the update-remote command.
parser_update_remote = subparsers.add_parser(
'update-remote',
help='Update the remote copies of each git repository'
)
parser_update_remote.add_argument(
'-u', '--username',
help='Username for remote git repositories'
)
parser_update_remote.add_argument(
'-p', '--password',
help='Password or personal access token for remote git repositories'
)
parser_update_remote.add_argument(
'path', type=str,
help='The path to the git repository directory'
)
return parser
def parse_args(args):
command_args = arg_parser().parse_args(args[1:])
if args[1] == 'list':
path = command_args.path
return Command(args[1], path)
elif args[1] == 'update-local':
path = command_args.path
return Command(args[1], path)
elif args[1] == 'update-remote':
username = command_args.username
password = command_args.password
path = command_args.path
return Command(args[1], path, username, password)
else:
raise ValueError(f'The argument `{args[1]}` is not a valid command name.')
def usage():
return ''.join((
'USAGE:\n',
'List the git repositories in a directory\n',
'`upfork list /path/to/git/repository/forks/`\n',
'Update the local copies of the git repositories in a directory\n',
'`upfork update-local /path/to/git/repository/forks/`\n',
'Update the remote copies of the git repositories in a directory\n',
'`upfork update-remote /path/to/git/repository/forks/`\n'
))
def main():
if len(sys.argv) < 3:
sys.exit(usage())
try:
command = parse_args(sys.argv)
except:
sys.exit(usage())
if not os.path.exists(command.repository_root):
sys.exit(f'Path does not exist: {command.repository_root}')
repository_set = scan_repository_root(command.repository_root)
run_command(command, repository_set)
|
py | 1a4e4f6daf6063d81a8fb49f6cd1c121619d5df6 | #!/usr/bin/env python3
# Copyright 2018 The SwiftShader Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import run
import argparse
import multiprocessing
import os
import re
import shutil
LLVM_DIR = os.path.abspath(os.path.join('..', 'llvm'))
LLVM_CONFIGS = os.path.abspath(os.path.join('..', 'configs'))
LLVM_OBJS = os.path.join(os.getcwd(), 'llvm_objs')
LLVM_TARGETS = [
('AArch64', ('__aarch64__',)),
('ARM', ('__arm__',)),
('X86', ('__i386__', '__x86_64__')),
('Mips', ('__mips__',)),
]
LLVM_TRIPLES = {
'android': [
('__x86_64__', 'x86_64-linux-android'),
('__i386__', 'i686-linux-android'),
('__arm__', 'armv7-linux-androideabi'),
('__aarch64__', 'aarch64-linux-android'),
],
'linux': [
('__x86_64__', 'x86_64-unknown-linux-gnu'),
('__i386__', 'i686-pc-linux-gnu'),
('__arm__', 'armv7-linux-gnueabihf'),
('__aarch64__', 'aarch64-linux-gnu'),
('__mips__', 'mipsel-linux-gnu'),
('__mips64', 'mips64el-linux-gnuabi64'),
],
'darwin': [
('__x86_64__', 'x86_64-apple-darwin'),
],
'windows': [
('__x86_64__', 'x86_64-pc-win32'),
('__i386__', 'i686-pc-win32'),
('__arm__', 'armv7-pc-win32'),
('__aarch64__', 'aarch64-pc-win32'),
('__mips__', 'mipsel-pc-win32'),
('__mips64', 'mips64el-pc-win32'),
],
}
LLVM_OPTIONS = [
'-DCMAKE_BUILD_TYPE=Release',
'-DLLVM_TARGETS_TO_BUILD=' + ';'.join(t[0] for t in LLVM_TARGETS),
'-DLLVM_ENABLE_THREADS=OFF',
'-DLLVM_ENABLE_TERMINFO=OFF',
'-DLLVM_ENABLE_LIBXML2=OFF',
'-DLLVM_ENABLE_LIBEDIT=OFF',
'-DLLVM_ENABLE_LIBPFM=OFF',
'-DLLVM_ENABLE_ZLIB=OFF',
]
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('name', help='destination name',
choices=['android', 'linux', 'darwin', 'windows'])
parser.add_argument('-j', '--jobs', help='parallel compilation', type=int)
return parser.parse_args()
def build_llvm(name, num_jobs):
"""Build LLVM and get all generated files."""
if num_jobs is None:
num_jobs = multiprocessing.cpu_count()
"""On Windows we need to have CMake generate build files for the 64-bit
Visual Studio host toolchain."""
host = '-Thost=x64' if name is 'windows' else ''
os.makedirs(LLVM_OBJS, exist_ok=True)
run(['cmake', host, LLVM_DIR] + LLVM_OPTIONS, cwd=LLVM_OBJS)
run(['cmake', '--build', '.', '-j', str(num_jobs)], cwd=LLVM_OBJS)
def list_files(src_base, src, dst_base, suffixes):
"""Enumerate the files that are under `src` and end with one of the
`suffixes` and yield the source path and the destination path."""
src_base = os.path.abspath(src_base)
src = os.path.join(src_base, src)
for base_dir, dirnames, filenames in os.walk(src):
for filename in filenames:
if os.path.splitext(filename)[1] in suffixes:
relative = os.path.relpath(base_dir, src_base)
yield (os.path.join(base_dir, filename),
os.path.join(dst_base, relative, filename))
def copy_common_generated_files(dst_base):
"""Copy platform-independent generated files."""
suffixes = {'.inc', '.h', '.def'}
subdirs = [
os.path.join('include', 'llvm', 'IR'),
os.path.join('include', 'llvm', 'Support'),
os.path.join('lib', 'IR'),
os.path.join('lib', 'Target', 'AArch64'),
os.path.join('lib', 'Target', 'ARM'),
os.path.join('lib', 'Target', 'X86'),
os.path.join('lib', 'Target', 'Mips'),
os.path.join('lib', 'Transforms', 'InstCombine'),
]
for subdir in subdirs:
for src, dst in list_files(LLVM_OBJS, subdir, dst_base, suffixes):
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copyfile(src, dst)
def copy_platform_file(platform, src, dst):
"""Copy platform-dependent generated files and add platform-specific
modifications."""
# LLVM configuration patterns to be post-processed.
llvm_target_pattern = re.compile('^LLVM_[A-Z_]+\\(([A-Za-z0-9_]+)\\)$')
llvm_native_pattern = re.compile(
'^#define LLVM_NATIVE_([A-Z]+) (LLVMInitialize)?(.*)$')
llvm_triple_pattern = re.compile('^#define (LLVM_[A-Z_]+_TRIPLE) "(.*)"$')
llvm_define_pattern = re.compile('^#define ([A-Za-z0-9_]+) (.*)$')
# LLVM configurations to be undefined.
undef_names = [
'BACKTRACE_HEADER',
'ENABLE_BACKTRACES',
'ENABLE_CRASH_OVERRIDES',
'HAVE_BACKTRACE',
'HAVE_POSIX_SPAWN',
'HAVE_PTHREAD_GETNAME_NP',
'HAVE_PTHREAD_SETNAME_NP',
'HAVE_TERMIOS_H',
'HAVE_ZLIB_H',
'HAVE__UNWIND_BACKTRACE',
]
# Build architecture-specific conditions.
conds = {}
for arch, defs in LLVM_TARGETS:
conds[arch] = ' || '.join('defined(' + v + ')' for v in defs)
# Get a set of platform-specific triples.
triples = LLVM_TRIPLES[platform]
with open(src, 'r') as src_file:
os.makedirs(os.path.dirname(dst), exist_ok=True)
with open(dst, 'w') as dst_file:
for line in src_file:
if line == '#define LLVM_CONFIG_H\n':
print(line, file=dst_file, end='')
print('', file=dst_file)
print('#if !defined(__i386__) && defined(_M_IX86)', file=dst_file)
print('#define __i386__ 1', file=dst_file)
print('#endif', file=dst_file)
print('', file=dst_file)
print('#if !defined(__x86_64__) && (defined(_M_AMD64) || defined (_M_X64))', file=dst_file)
print('#define __x86_64__ 1', file=dst_file)
print('#endif', file=dst_file)
print('', file=dst_file)
match = llvm_target_pattern.match(line)
if match:
arch = match.group(1)
print('#if ' + conds[arch], file=dst_file)
print(line, file=dst_file, end='')
print('#endif', file=dst_file)
continue
match = llvm_native_pattern.match(line)
if match:
name = match.group(1)
init = match.group(2) or ''
arch = match.group(3)
end = ''
if arch.lower().endswith(name.lower()):
end = arch[-len(name):]
directive = '#if '
for arch, defs in LLVM_TARGETS:
print(directive + conds[arch], file=dst_file)
print('#define LLVM_NATIVE_' + name + ' ' +
init + arch + end, file=dst_file)
directive = '#elif '
print('#else', file=dst_file)
print('#error "unknown architecture"', file=dst_file)
print('#endif', file=dst_file)
continue
match = llvm_triple_pattern.match(line)
if match:
name = match.group(1)
directive = '#if'
for defs, triple in triples:
print(directive + ' defined(' + defs + ')',
file=dst_file)
print('#define ' + name + ' "' + triple + '"',
file=dst_file)
directive = '#elif'
print('#else', file=dst_file)
print('#error "unknown architecture"', file=dst_file)
print('#endif', file=dst_file)
continue
match = llvm_define_pattern.match(line)
if match and match.group(1) in undef_names:
print('/* #undef ' + match.group(1) + ' */', file=dst_file)
continue
print(line, file=dst_file, end='')
def copy_platform_generated_files(platform, dst_base):
"""Copy platform-specific generated files."""
suffixes = {'.inc', '.h', '.def'}
src_dir = os.path.join('include', 'llvm', 'Config')
for src, dst in list_files(LLVM_OBJS, src_dir, dst_base, suffixes):
copy_platform_file(platform, src, dst)
def main():
args = _parse_args()
build_llvm(args.name, args.jobs)
copy_common_generated_files(os.path.join(LLVM_CONFIGS, 'common'))
copy_platform_generated_files(
args.name, os.path.join(LLVM_CONFIGS, args.name))
if __name__ == '__main__':
main()
|
py | 1a4e4f913b585a4e44da7276a8b5f88fb26ad331 | """Serializers Alquileres"""
#Django REST Framework
from rest_framework import serializers
#Model
from maquinaria.alquileres.models import Alquiler
from maquinaria.maquinas.models import Maquina
class AlquilerModelSerializer(serializers.ModelSerializer):
"""Modelo Serializer de Cliente"""
class Meta:
"""Clase Meta"""
model = Alquiler
fields = (
'id', 'cliente',
'maquina', 'fecha_inicio',
'fecha_final', 'precio_alquiler'
)
class Update(serializers.Serializer):
def save(self):
maquina=Maquina.objects.get(id=1)
maquina.estado=False
maquina.save()
|
py | 1a4e514edc8a50704d3a282435fc44a33da929b8 |
from PIL import Image
# from PIL import GifImagePlugin
import cv2
import numpy as np
import os
#root_dir = os.path.dirname('/Users/apple/Desktop/414project/')
input_video = Image.open("./walking.gif")
frame_length = input_video.n_frames
def track_position_per_frame(f_num):
image = cv2.imread('./walking_frames/frame{}.png'.format(f_num), cv2.IMREAD_UNCHANGED)
#make mask of where the transparent bits are
bg_mask = image[:,:,3] == 0
fg_mask = image[:,:,3] != 0
#replace areas of transparency with black and not transparent with white
image[bg_mask] = [0, 0, 0, 0] #black
image[fg_mask] = [255,255,255, 255] #white
#new image without alpha channel...
img = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
white_ = [255,255,255]
#zipped = np.argwhere(img == white_)[0]
zipped_coordinates = np.argwhere(np.all(img == white_,axis =2))
#current_position['YX'] = zipped_coordinates[0]
current_position = (zipped_coordinates[-1].tolist())
current_position = [i * 3 for i in current_position]
current_position[0],current_position[1] = current_position[1],current_position[0]
return current_position
# remain to be changed
def draw_points(img, current_position):
# img = cv2.circle(img, (top_X,top_Y), radius=4, color=(0, 0, 255), thickness=-1)
new_img =cv2.circle(img, (current_position[1],current_position[0]), radius=4, color=(0, 0, 255), thickness=-1)
cv2.imshow('foot position',new_img)
cv2.waitKey()
def main(frame_length):
input_video = Image.open("./walking.gif")
frame_length = input_video.n_frames
motion_trail = []
cut_frames = list(range(0, frame_length, 15))
for i in cut_frames:
input_video.seek(i)
input_video.save('./walking_frames/frame{}.png'.format(i))
motion_trail.append(track_position_per_frame(i))
print(motion_trail)
with open("2d_coordinates.txt", 'w') as file:
for row in motion_trail:
s = " ".join(map(str, row))
file.write(s+'\n')
if __name__ == '__main__':
main(frame_length)
|
py | 1a4e52ac701fb957d187cae0e6e5bc90596de634 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
import sys
import threading
import random
import zmq
def work_routine():
ctx = zmq.Context()
worker = ctx.socket(zmq.DEALER)
worker.setsockopt(zmq.IDENTITY, 'A')
worker.connect('tcp://localhost:5555')
print 'worker[A] init success ...'
random.seed()
total = 0
while True:
try:
request = worker.recv()
if request == 'END':
print 'worker[A] completed %d tasks' % total
break
total += 1
time.sleep(random.randint(1, 100) * 0.001)
except KeyboardInterrupt:
break
worker.close()
if __name__ == '__main__':
ctx = zmq.Context()
broker = ctx.socket(zmq.ROUTER)
broker.bind('tcp://*:5555')
thread = threading.Thread(target=work_routine, args=())
thread.start()
time.sleep(1)
print 'broker init success ...'
for _ in range(10):
broker.send('A', zmq.SNDMORE)
broker.send('this is the workload')
broker.send('A', zmq.SNDMORE)
broker.send('END')
broker.close()
|
py | 1a4e536798599df1fbb957bb6529253a5add4865 | # coding: utf-8
from __future__ import absolute_import
import pytest
try:
import vtk
except:
vtk = None
from six import string_types
from panel.models.vtk import VTKPlot
from panel.pane import Pane, PaneBase, VTK
vtk_available = pytest.mark.skipif(vtk is None, reason="requires vtk")
def make_render_window():
cone = vtk.vtkConeSource()
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren = vtk.vtkRenderer()
ren.AddActor(coneActor)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
return renWin
def test_get_vtk_pane_type_from_url():
url = r'https://raw.githubusercontent.com/Kitware/vtk-js/master/Data/StanfordDragon.vtkjs'
assert PaneBase.get_pane_type(url) is VTK
def test_get_vtk_pane_type_from_file():
file = r'StanfordDragon.vtkjs'
assert PaneBase.get_pane_type(file) is VTK
@vtk_available
def test_get_vtk_pane_type_from_render_window():
renWin = make_render_window()
assert PaneBase.get_pane_type(renWin) is VTK
def test_vtk_pane_from_url(document, comm):
url = r'https://raw.githubusercontent.com/Kitware/vtk-js/master/Data/StanfordDragon.vtkjs'
pane = Pane(url)
# Create pane
model = pane.get_root(document, comm=comm)
assert isinstance(model, VTKPlot)
assert pane._models[model.ref['id']][0] is model
assert isinstance(model.data, string_types)
@vtk_available
def test_vtk_data_array_dump():
from panel.pane.vtk.vtkjs_serializer import _dump_data_array
root_keys = ['ref', 'vtkClass', 'name', 'dataType',
'numberOfComponents', 'size', 'ranges']
renWin = make_render_window()
renderers = list(renWin.GetRenderers())
ren_props = list(renderers[0].GetViewProps())
mapper = ren_props[0].GetMapper()
mapper.Update() # create data
data = mapper.GetInput().GetPoints().GetData()
scDir = []
root = _dump_data_array(scDir, '', 'test', data)
assert len(set(root_keys) - set(root.keys())) == 0
assert len(scDir) == 1
assert isinstance(scDir[0][0], string_types)
assert isinstance(scDir[0][1], bytes)
|
py | 1a4e54206f9359089b12df4737fc26ee7d0f9ae5 | from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from blenderneuron.activity import Activity
class RootGroup:
__metaclass__ = ABCMeta
def __init__(self):
self.name = ""
self.roots = OrderedDict()
self.import_synapses = False
self.interaction_granularity = 'Cell'
self.record_activity = False
self.recording_granularity = 'Cell'
self.record_variable = "v"
self.recording_period = 1.0
self.recording_time_start = 0
self.recording_time_end = 0
self.activity = Activity()
def __str__(self):
return self.name
def clear_activity(self):
# Clear group level activity
self.activity.clear()
# Cell and section level activity
for root in self.roots.values():
root.clear_activity(recursive=True)
# Segment level
for root in self.roots.values():
root.clear_3d_segment_activity()
def to_dict(self,
include_activity=False,
include_root_children=False,
include_coords_and_radii=False):
"""
:param include_activity:
:param include_root_children:
:param include_coords_and_radii:
:return:
"""
result = {
"name": self.name,
"roots": [root.to_dict(include_activity, include_root_children, include_coords_and_radii) for root in self.roots.values()],
"import_synapses": self.import_synapses,
"interaction_granularity": self.interaction_granularity,
"record_activity": self.record_activity,
"recording_granularity": self.recording_granularity,
"record_variable": self.record_variable,
"recording_period": self.recording_period,
"recording_time_start": self.recording_time_start,
"recording_time_end": self.recording_time_end,
}
if include_activity:
result.update({
"activity": self.activity.to_dict(),
})
return result
|
py | 1a4e54f00fcfb5a61c64adcb4d668c291f801cb7 | # Generated by Django 3.2.4 on 2021-06-22 01:38
import uuid
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import djstripe.enums
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [
("djstripe", "0008_2_5"),
]
operations = [
migrations.CreateModel(
name="WebhookEndpoint",
fields=[
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
(
"created",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"metadata",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
(
"api_version",
models.CharField(
blank=True,
help_text="The API version events are rendered as for this webhook endpoint.",
max_length=10,
),
),
(
"enabled_events",
djstripe.fields.JSONField(),
),
(
"secret",
models.CharField(
blank=True,
help_text="The endpoint's secret, used to generate webhook signatures.",
max_length=256,
editable=False,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.WebhookEndpointStatus, max_length=8
),
),
(
"url",
models.URLField(
help_text="The URL of the webhook endpoint.", max_length=2048
),
),
(
"application",
models.CharField(
blank=True,
help_text="The ID of the associated Connect application.",
max_length=255,
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"djstripe_uuid",
models.UUIDField(
null=True,
unique=True,
default=uuid.uuid4,
help_text="A UUID specific to dj-stripe generated for the endpoint",
),
),
],
options={"get_latest_by": "created", "abstract": False},
),
migrations.CreateModel(
name="UsageRecordSummary",
fields=[
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
(
"created",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"period",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"period_end",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"period_start",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"total_usage",
models.PositiveIntegerField(
help_text="The quantity of the plan to which the customer should be subscribed."
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"invoice",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="usage_record_summaries",
to="djstripe.invoice",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"subscription_item",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="usage_record_summaries",
to="djstripe.subscriptionitem",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"get_latest_by": "created", "abstract": False},
),
migrations.AddField(
model_name="applicationfee",
name="account",
field=djstripe.fields.StripeForeignKey(
default=1,
on_delete=django.db.models.deletion.PROTECT,
related_name="application_fees",
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
preserve_default=False,
),
migrations.AddField(
model_name="customer",
name="deleted",
field=models.BooleanField(
blank=True,
default=False,
help_text="Whether the Customer instance has been deleted upstream in Stripe or not.",
null=True,
),
),
migrations.AddField(
model_name="dispute",
name="balance_transaction",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="disputes",
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="dispute",
name="charge",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="disputes",
to="djstripe.charge",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="dispute",
name="payment_intent",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="disputes",
to="djstripe.paymentintent",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="dispute",
name="balance_transactions",
field=djstripe.fields.JSONField(
default=list,
),
),
migrations.AlterField(
model_name="taxrate",
name="percentage",
field=djstripe.fields.StripePercentField(
decimal_places=4,
max_digits=7,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(100),
],
),
),
migrations.AlterField(
model_name="transfer",
name="destination",
field=djstripe.fields.StripeIdField(
max_length=255,
null=True,
),
),
migrations.AddField(
model_name="usagerecord",
name="action",
field=djstripe.fields.StripeEnumField(
default="increment",
enum=djstripe.enums.UsageAction,
max_length=9,
),
),
migrations.AddField(
model_name="usagerecord",
name="timestamp",
field=djstripe.fields.StripeDateTimeField(
blank=True,
null=True,
),
),
migrations.AddField(
model_name="webhookeventtrigger",
name="stripe_trigger_account",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.RemoveField(model_name="usagerecord", name="description"),
migrations.RemoveField(model_name="usagerecord", name="metadata"),
migrations.AlterField(
model_name="paymentmethod",
name="type",
field=djstripe.fields.StripeEnumField(
enum=djstripe.enums.PaymentMethodType, max_length=17
),
),
migrations.AddField(
model_name="paymentmethod",
name="acss_debit",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="paymentmethod",
name="afterpay_clearpay",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="paymentmethod",
name="boleto",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="paymentmethod",
name="grabpay",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="paymentmethod",
name="wechat_pay",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="subscription",
name="latest_invoice",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.invoice",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AlterField(
model_name="customer",
name="delinquent",
field=models.BooleanField(
blank=True,
default=False,
help_text="Whether or not the latest charge for the customer's latest invoice has failed.",
null=True,
),
),
]
|
bzl | 1a4e556f7029dd206ce725b2796637d5db6ffdc0 | # Copyright 2021 The KubeVirt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def containertag_x_def():
stamp_pkg = "kubevirt.io/kubevirt/tests/conformance"
stamp_var = "containerTag"
return {"%s.%s" % (stamp_pkg, stamp_var): "{%s}" % stamp_var}
|
py | 1a4e567ebfeee02b2caadc5fdd48c0aa7e400f10 | from .trace import (
Trace,
TraceEventDurationBegin,
TraceEventDurationEnd,
TraceEventCounter,
TraceEventInstant,
TraceEventMetadata,
TraceEventFlowStart,
TraceEventFlowEnd,
EventChain,
)
from .analyzer import TraceAnalyzer
|
py | 1a4e591a9b5f6e8d2836b4404be7626c0c1f366e | #!/usr/bin/env python
# coding: utf-8
import logging
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from lightautoml.automl.presets.tabular_presets import TabularAutoML
from lightautoml.dataset.roles import DatetimeRole
from lightautoml.tasks import Task
def test_tabular_automl_preset():
np.random.seed(42)
logging.basicConfig(format='[%(asctime)s] (%(levelname)s): %(message)s', level=logging.DEBUG)
data = pd.read_csv('../example_data/test_data_files/sampled_app_train.csv')
data['BIRTH_DATE'] = (np.datetime64('2018-01-01') + data['DAYS_BIRTH'].astype(np.dtype('timedelta64[D]'))).astype(str)
data['EMP_DATE'] = (np.datetime64('2018-01-01') + np.clip(data['DAYS_EMPLOYED'], None, 0).astype(np.dtype('timedelta64[D]'))
).astype(str)
data['report_dt'] = np.datetime64('2018-01-01')
data['constant'] = 1
data['allnan'] = np.nan
data.drop(['DAYS_BIRTH', 'DAYS_EMPLOYED'], axis=1, inplace=True)
train, test = train_test_split(data, test_size=2000, random_state=42)
roles = {'target': 'TARGET',
DatetimeRole(base_date=True, seasonality=(), base_feats=False): 'report_dt',
}
task = Task('binary', )
automl = TabularAutoML(task=task, timeout=600, general_params={
'use_algos': [['linear_l2', 'lgb', ], ['linear_l2', 'lgb']],
'nested_cv': True,
'skip_conn': True,
}, nested_cv_params={
'cv': 5,
'n_folds': None
})
oof_pred = automl.fit_predict(train, roles=roles)
test_pred = automl.predict(test)
not_nan = np.any(~np.isnan(oof_pred.data), axis=1)
logging.debug('Check scores...')
print('OOF score: {}'.format(roc_auc_score(train[roles['target']].values[not_nan], oof_pred.data[not_nan][:, 0])))
print('TEST score: {}'.format(roc_auc_score(test[roles['target']].values, test_pred.data[:, 0])))
logging.debug('Pickle automl')
with open('automl.pickle', 'wb') as f:
pickle.dump(automl, f)
logging.debug('Load pickled automl')
with open('automl.pickle', 'rb') as f:
automl = pickle.load(f)
logging.debug('Predict loaded automl')
test_pred = automl.predict(test)
logging.debug('TEST score, loaded: {}'.format(roc_auc_score(test['TARGET'].values, test_pred.data[:, 0])))
os.remove('automl.pickle')
|
py | 1a4e592c19af1732f9866c13311b952d1c9a67a7 | #!/usr/bin/python
import sys
import re
with open(sys.argv[1], 'r') as f:
for line in f:
line = re.sub(r'<.*?>', "", line) # Regex
line=line.rstrip() # strips the line break
if len(line) > 0: # insures that there is some text in line
print line
|
py | 1a4e5936813810712811631611b92880c2e626bf | import warnings
from geopy.compat import urlencode
from geopy.exc import GeocoderParseError, GeocoderServiceError
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("Yandex", )
class Yandex(Geocoder):
"""Yandex geocoder.
Documentation at:
https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/
.. versionadded:: 1.5.0
"""
api_path = '/1.x/'
def __init__(
self,
api_key=None,
lang=None,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
scheme=None,
format_string=None,
ssl_context=DEFAULT_SENTINEL,
):
"""
.. versionchanged:: 1.14.0
Default scheme has been changed from ``http`` to ``https``.
:param str api_key: Yandex API key (not obligatory)
https://tech.yandex.ru/maps/keys/get/
:param str lang: response locale, the following locales are
supported: ``"ru_RU"`` (default), ``"uk_UA"``, ``"be_BY"``,
``"en_US"``, ``"tr_TR"``.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
.. versionadded:: 1.12.0
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
.. versionadded:: 1.14.0
:param str format_string:
See :attr:`geopy.geocoders.options.default_format_string`.
.. versionadded:: 1.14.0
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
.. versionadded:: 1.14.0
"""
super(Yandex, self).__init__(
format_string=format_string,
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
)
self.api_key = api_key
self.lang = lang
domain = 'geocode-maps.yandex.ru'
self.api = '%s://%s%s' % (self.scheme, domain, self.api_path)
def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
params = {
'geocode': self.format_string % query,
'format': 'json'
}
if self.api_key:
params['apikey'] = self.api_key
if self.lang:
params['lang'] = self.lang
if exactly_one:
params['results'] = 1
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one,
)
def reverse(
self,
query,
exactly_one=DEFAULT_SENTINEL,
timeout=DEFAULT_SENTINEL,
kind=None,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
.. versionchanged:: 1.14.0
Default value for ``exactly_one`` was ``False``, which differs
from the conventional default across geopy. Please always pass
this argument explicitly, otherwise you would get a warning.
In geopy 2.0 the default value will become ``True``.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str kind: Type of toponym. Allowed values: `house`, `street`, `metro`,
`district`, `locality`.
.. versionadded:: 1.14.0
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if exactly_one is DEFAULT_SENTINEL:
warnings.warn('%s.reverse: default value for `exactly_one` '
'argument will become True in geopy 2.0. '
'Specify `exactly_one=False` as the argument '
'explicitly to get rid of this warning.' % type(self).__name__,
DeprecationWarning, stacklevel=2)
exactly_one = False
try:
point = self._coerce_point_to_string(query, "%(lon)s,%(lat)s")
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'geocode': point,
'format': 'json'
}
if self.api_key:
params['apikey'] = self.api_key
if self.lang:
params['lang'] = self.lang
if kind:
params['kind'] = kind
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
def _parse_json(self, doc, exactly_one):
"""
Parse JSON response body.
"""
if doc.get('error'):
raise GeocoderServiceError(doc['error']['message'])
try:
places = doc['response']['GeoObjectCollection']['featureMember']
except KeyError:
raise GeocoderParseError('Failed to parse server response')
def parse_code(place):
"""
Parse each record.
"""
try:
place = place['GeoObject']
except KeyError:
raise GeocoderParseError('Failed to parse server response')
longitude, latitude = [
float(_) for _ in place['Point']['pos'].split(' ')
]
name_elements = ['name', 'description']
location = ', '.join([place[k] for k in name_elements if place.get(k)])
return Location(location, (latitude, longitude), place)
if exactly_one:
try:
return parse_code(places[0])
except IndexError:
return None
else:
return [parse_code(place) for place in places]
|
py | 1a4e59c3299a13bd5db70055f522353816ff1b3e | #!/usr/bin/env python
print "Test"
from kuri_msgs.msg import *
import nav_msgs.msg as nav_msgs
import geometry_msgs.msg as gm
import tf.transformations
import rospy
from geometry_msgs.msg import Pose
from tf.transformations import quaternion_from_euler
from math import *
def send_task():
pub = rospy.Publisher('kuri_msgs/NavTasks', NavTasks, queue_size=1,latch=True)
rospy.init_node('uav1_task_allocater', anonymous=True)
rate = rospy.Rate(10) # 10hz
obj = Object()
objs = Objects()
obj.width = 100
obj.height = 200
obj.pose.pose.position = gm.Point(0,0,10.0)
objs.objects.append(obj)
navtasks = NavTasks()
navtask = NavTask()
path = nav_msgs.Path()
pose = gm.PoseStamped()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = 'map'
pose.pose.position = gm.Point(0,0,0)
pose.pose.orientation = gm.Quaternion(*quaternion_from_euler(0, 0, 0))
path.poses.append(pose)
navtask.path = path
navtask.uav_id = 1
navtask.uav_name = 'UAV1'
navtask.object = obj
navtasks.tasks.append(navtask)
pub.publish(navtasks)
print "Task Published"
while not rospy.is_shutdown():
#yaw_degrees = 0 # North
#yaw = radians(yaw_degrees)
#quaternion = quaternion_from_euler(0, 0, yaw)
#msg.pose.orientation = SP.Quaternion(*quaternion)
#rospy.loginfo(msg.pose)
#hello_str = "hello world %s" % rospy.get_time()
#rospy.loginfo(hello_str)
#pub.publish(hello_str)
rate.sleep()
if __name__ == '__main__':
try:
send_task()
except rospy.ROSInterruptException:
pass |
py | 1a4e5a2ddf250f09a8fbbed856cded5d8b999190 | # ----------------------------------------------------------------------------------#
# //////////////////////////////////////////////////////////////////////////////////#
# ----------------------------------------------------------------------------------#
#
# Copyright (C) 2018, StepToSky
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1.Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2.Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and / or other materials provided with the distribution.
# 3.Neither the name of StepToSky nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contacts: www.steptosky.com
#
# ----------------------------------------------------------------------------------#
# //////////////////////////////////////////////////////////////////////////////////#
# ----------------------------------------------------------------------------------#
import os
from conans import ConanFile, CMake, tools
from vcs_info import VcsInfo
class LibConan(ConanFile):
version = "0.2.1"
name = 'sts-semver'
url = 'https://github.com/steptosky/sts-semver'
license = 'BSD 3-Clause'
description = "Cross-platform C++ library for working with Semantic Versioning. https://semver.org"
author = 'StepToSky <[email protected]>'
settings = "os", "compiler", "build_type", "arch"
options = {'shared': [True, False], "fPIC": [True, False]}
default_options = 'shared=False', "fPIC=False", 'gtest:shared=False', 'gtest:build_gmock=True'
exports = 'vcs_info.py', 'vcs_data'
exports_sources = 'CMakeLists.txt', 'src/*', 'src-test/*', 'include/*', 'cmake/*', 'license*'
generators = 'cmake'
build_test_var = "CONAN_BUILD_TESTING"
test_dir_var = "CONAN_TESTING_REPORT_DIR"
vcs_data = VcsInfo()
def configure(self):
if self.settings.compiler == "Visual Studio" and float(str(self.settings.compiler.version)) < 12:
raise Exception("Visual Studio 12 (2013) or higher is required")
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def requirements(self):
if os.getenv(self.build_test_var, "0") == "1":
self.requires('gtest/1.8.0@bincrafters/stable', private=True)
def build(self):
build_testing = os.getenv(self.build_test_var, "0")
test_dir = os.getenv(self.test_dir_var, "")
cmake = CMake(self)
self.vcs_data.setup_cmake(cmake)
cmake.definitions["BUILD_TESTING"] = 'ON' if build_testing == "1" else 'OFF'
if test_dir:
cmake.definitions["TESTING_REPORT_DIR"] = test_dir
cmake.configure()
cmake.build()
cmake.install()
if build_testing == "1":
cmake.test()
def package(self):
self.copy("license*", src=".", dst="licenses", ignore_case=True, keep_path=False)
def package_info(self):
libDir = '%s' % self.settings.build_type
self.cpp_info.libdirs = [libDir]
self.cpp_info.libs = tools.collect_libs(self, libDir)
# ----------------------------------------------------------------------------------#
# //////////////////////////////////////////////////////////////////////////////////#
# ----------------------------------------------------------------------------------#
|
py | 1a4e5aeb97fd3cd61d92b0f9092d0191cbed91e2 | # This is the class of the input root. Do not edit it.
class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# O(n) time | O(n) space - where n is the number of nodes in the Binary Tree
def branchSums(root):
sums = []
branchSumsHelper(root, 0, sums)
return sums
def branchSumsHelper(node, runningSum, sums):
if node is None:
return
newRunningSum = runningSum + node.value
if node.left is None and node.right is None:
sums.append(newRunningSum)
branchSumsHelper(node.left, newRunningSum, sums)
branchSumsHelper(node.right, newRunningSum, sums)
|
py | 1a4e5c340d8bbd93d0c165fd63df3895508c7feb | from timemachines.skaters.pmd.pmdinclusion import using_pmd
if using_pmd:
from timemachines.skaters.pmd.pmdskaters import pmd_univariate
from timemachines.skatertools.composition.residualshypocratic import quickly_moving_hypocratic_residual_factory
from timemachines.skatertools.utilities.conventions import Y_TYPE, A_TYPE, E_TYPE, T_TYPE
def pmd_univariate_hypocratic(y: Y_TYPE, s: dict, k: int, a: A_TYPE = None, t: T_TYPE = None, e: E_TYPE = None):
return quickly_moving_hypocratic_residual_factory(f=pmd_univariate, y=y,s=s,k=k,a=a,t=t,e=e)
def pmd_exogenous_hypocratic(y: Y_TYPE, s: dict, k: int, a: A_TYPE = None, t: T_TYPE = None, e: E_TYPE = None):
return quickly_moving_hypocratic_residual_factory(f=pmd_univariate, y=y,s=s,k=k,a=a,t=t,e=e)
PMD_SKATERS_COMPOSED = [ pmd_univariate_hypocratic, pmd_exogenous_hypocratic ]
else:
PMD_SKATERS_COMPOSED = []
|
py | 1a4e5c5eb7e021ab59c8dfd6e127c673c09c76dc |
import torch.nn as nn
import torch.nn.functional as F
from im2mesh.layers import (
ResnetBlockFC, CResnetBlockConv1d,
CBatchNorm1d, CBatchNorm1d_legacy,
ResnetBlockConv1d
)
class Decoder(nn.Module):
''' Decoder class.
It does not perform any form of normalization.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=128, leaky=False):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
# Submodules
self.fc_p = nn.Linear(dim, hidden_size)
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
if not c_dim == 0:
self.fc_c = nn.Linear(c_dim, hidden_size)
self.block0 = ResnetBlockFC(hidden_size)
self.block1 = ResnetBlockFC(hidden_size)
self.block2 = ResnetBlockFC(hidden_size)
self.block3 = ResnetBlockFC(hidden_size)
self.block4 = ResnetBlockFC(hidden_size)
self.fc_out = nn.Linear(hidden_size, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c=None, **kwargs):
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z) #.unsqueeze(1)
net = net + net_z
if self.c_dim != 0:
net_c = self.fc_c(c).unsqueeze(1)
net = net + net_c
# print("net.shape", net.shape)
net = self.block0(net)
net = self.block1(net)
net = self.block2(net)
net = self.block3(net)
net = self.block4(net)
out = self.fc_out(self.actvn(net))
# print("out.shape", out.shape)
out = out.squeeze(-1)
return out
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
class DecoderCBatchNorm2(nn.Module):
''' Decoder with CBN class 2.
It differs from the previous one in that the number of blocks can be
chosen.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
n_blocks (int): number of ResNet blocks
'''
def __init__(self, dim=3, z_dim=0, c_dim=128,
hidden_size=256, n_blocks=5):
super().__init__()
self.z_dim = z_dim
if z_dim != 0:
self.fc_z = nn.Linear(z_dim, c_dim)
self.conv_p = nn.Conv1d(dim, hidden_size, 1)
self.blocks = nn.ModuleList([
CResnetBlockConv1d(c_dim, hidden_size) for i in range(n_blocks)
])
self.bn = CBatchNorm1d(c_dim, hidden_size)
self.conv_out = nn.Conv1d(hidden_size, 1, 1)
self.actvn = nn.ReLU()
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.conv_p(p)
if self.z_dim != 0:
c = c + self.fc_z(z)
for block in self.blocks:
net = block(net, c)
out = self.conv_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
class DecoderCBatchNormNoResnet(nn.Module):
''' Decoder CBN with no ResNet blocks class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False):
super().__init__()
self.z_dim = z_dim
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.fc_0 = nn.Conv1d(hidden_size, hidden_size, 1)
self.fc_1 = nn.Conv1d(hidden_size, hidden_size, 1)
self.fc_2 = nn.Conv1d(hidden_size, hidden_size, 1)
self.fc_3 = nn.Conv1d(hidden_size, hidden_size, 1)
self.fc_4 = nn.Conv1d(hidden_size, hidden_size, 1)
self.bn_0 = CBatchNorm1d(c_dim, hidden_size)
self.bn_1 = CBatchNorm1d(c_dim, hidden_size)
self.bn_2 = CBatchNorm1d(c_dim, hidden_size)
self.bn_3 = CBatchNorm1d(c_dim, hidden_size)
self.bn_4 = CBatchNorm1d(c_dim, hidden_size)
self.bn_5 = CBatchNorm1d(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
p = p.transpose(1, 2)
batch_size, D, T = p.size()
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
net = self.actvn(self.bn_0(net, c))
net = self.fc_0(net)
net = self.actvn(self.bn_1(net, c))
net = self.fc_1(net)
net = self.actvn(self.bn_2(net, c))
net = self.fc_2(net)
net = self.actvn(self.bn_3(net, c))
net = self.fc_3(net)
net = self.actvn(self.bn_4(net, c))
net = self.fc_4(net)
net = self.actvn(self.bn_5(net, c))
out = self.fc_out(net)
out = out.squeeze(1)
return out
class DecoderBatchNorm(nn.Module):
''' Decoder with batch normalization class.
Args:
dim (int): input dimension
z_dim (int): dimension of latent code z
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, dim=3, z_dim=128, c_dim=128,
hidden_size=256, leaky=False):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
# Submodules
if not z_dim == 0:
self.fc_z = nn.Linear(z_dim, hidden_size)
if self.c_dim != 0:
self.fc_c = nn.Linear(c_dim, hidden_size)
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = ResnetBlockConv1d(hidden_size)
self.block1 = ResnetBlockConv1d(hidden_size)
self.block2 = ResnetBlockConv1d(hidden_size)
self.block3 = ResnetBlockConv1d(hidden_size)
self.block4 = ResnetBlockConv1d(hidden_size)
self.bn = nn.BatchNorm1d(hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, z, c, **kwargs):
net = self.fc_p(p)
if self.z_dim != 0:
net_z = self.fc_z(z).unsqueeze(2)
net = net + net_z
if self.c_dim != 0:
net_c = self.fc_c(c).unsqueeze(2)
net = net + net_c
net = self.block0(net)
net = self.block1(net)
net = self.block2(net)
net = self.block3(net)
net = self.block4(net)
out = self.fc_out(self.actvn(self.bn(net)))
out = out.squeeze(1)
return out
|
py | 1a4e5ccdc9d39138315da7aa1770641ba1815ec3 | def insertion_sort():
array = [44, 2, 5, 0, 15, 22]
print('Original array:', array, sep=' ')
for index in range(1, len(array)):
key = array[index]
current_index = index - 1
while (current_index >= 0 and array[current_index] > key):
array[current_index + 1] = array[current_index]
current_index -= 1
array[current_index + 1] = key
print('Sorted array:', array, sep=' ')
if __name__ == "__main__":
insertion_sort() |
py | 1a4e5d3a9b25fca3628edf3650a813e51926d210 | from setuptools import setup
from setuptools import find_packages
NAME = "torbjorn"
AUTHOR = "Ailln"
EMAIL = "[email protected]"
URL = "https://github.com/Ailln/torbjorn"
LICENSE = "MIT License"
DESCRIPTION = "Provide some practical Python decorators."
if __name__ == "__main__":
setup(
name=NAME,
version="0.0.4",
author=AUTHOR,
author_email=EMAIL,
url=URL,
license=LICENSE,
description=DESCRIPTION,
packages=find_packages(),
include_package_data=True,
install_requires=open("./requirements.txt", "r").read().splitlines(),
long_description=open("./README.md", "r").read(),
long_description_content_type='text/markdown',
entry_points={
"console_scripts": [
"torbjorn=torbjorn.shell:run"
]
},
zip_safe=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
|
py | 1a4e5d8340bb9062302dfa8151034b29db113a24 | from rest_framework import serializers
from profiles_api import models
class PokemonSerializer(serializers.Serializer):
"""Serializers a name field for testing our Pokemon API view"""
pokemon_name = serializers.CharField(max_length=36) |
py | 1a4e5dba7e7b85af664f19cbe9ed00b4df0fdb83 | #!/usr/bin/env python3
import os
import sys
import yaml
import argparse
from mako.template import Template
def generate_cpp(sensor_yaml, output_dir):
with open(sensor_yaml, 'r') as f:
ifile = yaml.safe_load(f)
if not isinstance(ifile, dict):
ifile = {}
# Render the mako template
t = Template(filename=os.path.join(
script_dir,
"inventorysensor.mako.cpp"))
output_cpp = os.path.join(output_dir, "inventory-sensor-gen.cpp")
with open(output_cpp, 'w') as fd:
fd.write(t.render(sensorDict=ifile))
def main():
valid_commands = {
'generate-cpp': generate_cpp
}
parser = argparse.ArgumentParser(
description="Inventory Object to IPMI SensorID code generator")
parser.add_argument(
'-i', '--sensor_yaml', dest='sensor_yaml',
default='example.yaml', help='input sensor yaml file to parse')
parser.add_argument(
"-o", "--output-dir", dest="outputdir",
default=".",
help="output directory")
parser.add_argument(
'command', metavar='COMMAND', type=str,
choices=valid_commands.keys(),
help='Command to run.')
args = parser.parse_args()
if (not (os.path.isfile(args.sensor_yaml))):
sys.exit("Can not find input yaml file " + args.sensor_yaml)
function = valid_commands[args.command]
function(args.sensor_yaml, args.outputdir)
if __name__ == '__main__':
script_dir = os.path.dirname(os.path.realpath(__file__))
main()
|
py | 1a4e5dfd6ccb7ec5231849bbb8f3f6012f347552 | # -*- coding: utf-8-*-
from __future__ import absolute_import
import atexit
from .plugins import Email
from apscheduler.schedulers.background import BackgroundScheduler
import logging
from . import app_utils
import time
import sys
if sys.version_info < (3, 0):
import Queue as queue # Python 2
else:
import queue # Python 3
class Notifier(object):
class NotificationClient(object):
def __init__(self, gather, timestamp):
self.gather = gather
self.timestamp = timestamp
def run(self):
self.timestamp = self.gather(self.timestamp)
def __init__(self, profile, brain):
self._logger = logging.getLogger(__name__)
self.q = queue.Queue()
self.profile = profile
self.notifiers = []
self.brain = brain
if 'email' in profile and \
('enable' not in profile['email'] or profile['email']['enable']):
self.notifiers.append(self.NotificationClient(
self.handleEmailNotifications, None))
else:
self._logger.debug('email account not set ' +
'in profile, email notifier will not be used')
if 'robot' in profile and profile['robot'] == 'emotibot':
self.notifiers.append(self.NotificationClient(
self.handleRemenderNotifications, None))
sched = BackgroundScheduler(daemon=True)
sched.start()
sched.add_job(self.gather, 'interval', seconds=120)
atexit.register(lambda: sched.shutdown(wait=False))
def gather(self):
[client.run() for client in self.notifiers]
def handleEmailNotifications(self, lastDate):
"""Places new email notifications in the Notifier's queue."""
emails = Email.fetchUnreadEmails(self.profile, since=lastDate)
if emails is None:
return
if emails:
lastDate = Email.getMostRecentDate(emails)
def styleEmail(e):
subject = Email.getSubject(e, self.profile)
if Email.isEchoEmail(e, self.profile):
if Email.isNewEmail(e):
return subject.replace('[echo]', '')
else:
return ""
elif Email.isControlEmail(e, self.profile):
self.brain.query([subject.replace('[control]', '')
.strip()], None, True)
return ""
sender = Email.getSender(e)
return "您有来自 %s 的新邮件 %s" % (sender, subject)
for e in emails:
self.q.put(styleEmail(e))
return lastDate
def handleRemenderNotifications(self, lastDate):
lastDate = time.strftime('%d %b %Y %H:%M:%S')
due_reminders = app_utils.get_due_reminders()
for reminder in due_reminders:
self.q.put(reminder)
return lastDate
def getNotification(self):
"""Returns a notification. Note that this function is consuming."""
try:
notif = self.q.get(block=False)
return notif
except queue.Empty:
return None
def getAllNotifications(self):
"""
Return a list of notifications in chronological order.
Note that this function is consuming, so consecutive calls
will yield different results.
"""
notifs = []
notif = self.getNotification()
while notif:
notifs.append(notif)
notif = self.getNotification()
return notifs
|
py | 1a4e5e95ddff54d9bdc35ca8bc5f73609304aeae | import numpy as np
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
class Ice(WorldObj):
def __init__(self):
super().__init__('ice', 'blue')
def can_overlap(self):
return True
def render(self, img):
c = (119, 201, 240) # Pale blue
# Background color
fill_coords(img, point_in_rect(0, 1, 0, 1), c)
# Add Ice top object index.
OBJECT_TO_IDX['ice'] = max(OBJECT_TO_IDX.values()) + 1
class IceGridEnv(MiniGridEnv):
def __init__(self, size):
super().__init__(
grid_size=size,
max_steps=4*size*size,
see_through_walls=False,
seed=None
)
def _gen_grid(self, width, height):
assert width >= 5 and height >= 5
self.grid = Grid(width, height)
# Surrounding walls.
self.grid.wall_rect(0, 0, width, height)
# Sample ice patches.
# Chose top left corner.
n_patches = 1
while n_patches > 0:
patch_width = self._rand_int(2, width - 4)
patch_height = self._rand_int(2, height - 4)
# The -2 offset is to account for walls all around the grid.
patch_top_left = (
self._rand_int(1, width - patch_width - 2),
self._rand_int(1, height - patch_height - 2)
)
if patch_top_left != (0, 0):
# Accept patch.
n_patches -= 1
self.add_ice_patch(patch_width, patch_height, patch_top_left)
# Agent top left.
self.agent_pos = (1, 1)
self.agent_dir = 0
# Place goal bottom right.
self.goal_pos = np.array((width - 2, height - 2))
self.put_obj(Goal(), *self.goal_pos)
self.mission = "Get to the goal square"
def add_ice_patch(self, w, h, p):
for i in range(p[0], p[0] + w):
for j in range(p[1], p[1] + h):
self.put_obj(Ice(), i, j)
@property
def on_ice(self):
cur_tile = self.grid.get(*self.agent_pos)
return cur_tile is not None and cur_tile.type == "ice"
def step(self, action):
if not self.on_ice or action != self.actions.forward:
return super().step(action)
# Go forward until not on ice.
while self.on_ice:
fwd_cell = self.grid.get(*self.front_pos)
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = self.front_pos
else:
break
done = self.step_count >= self.max_steps
obs = self.gen_obs()
return obs, 0, done, {}
class IceGridS10Env(IceGridEnv):
def __init__(self):
super().__init__(size=10)
register(
id='MiniGrid-IceGridS10-v0',
entry_point='ice:IceGridS10Env'
) |
py | 1a4e5f88a232af4bd5d5247504fb8ca91efc24e7 | import os
import subprocess
import sys
from typing import Optional
from briefcase.config import BaseConfig
from briefcase.exceptions import BriefcaseCommandError
from .base import BaseCommand
from .create import DependencyInstallError, write_dist_info
class DevCommand(BaseCommand):
cmd_line = 'briefcase dev'
command = 'dev'
output_format = None
description = 'Run a briefcase project in the dev environment'
@property
def platform(self):
"""The dev command always reports as the local platform."""
return {
'darwin': 'macOS',
'linux': 'linux',
'win32': 'windows',
}[sys.platform]
def bundle_path(self, app):
"A placeholder; Dev command doesn't have a bundle path"
raise NotImplementedError()
def binary_path(self, app):
"A placeholder; Dev command doesn't have a binary path"
raise NotImplementedError()
def distribution_path(self, app):
"A placeholder; Dev command doesn't have a distribution path"
raise NotImplementedError()
def add_options(self, parser):
parser.add_argument(
'-a',
'--app',
dest='appname',
help='The app to run'
)
parser.add_argument(
'-d',
'--update-dependencies',
action="store_true",
help='Update dependencies for app'
)
parser.add_argument(
'--no-run',
dest="run_app",
action="store_false",
default=True,
help='Do not run the app, just install dependencies.'
)
def install_dev_dependencies(self, app: BaseConfig, **options):
"""
Install the dependencies for the app devly.
:param app: The config object for the app
"""
if app.requires:
try:
self.subprocess.run(
[
sys.executable, "-m",
"pip", "install",
"--upgrade",
] + app.requires,
check=True,
)
except subprocess.CalledProcessError:
raise DependencyInstallError()
else:
print("No application dependencies.")
def run_dev_app(self, app: BaseConfig, env: dict, **options):
"""
Run the app in the dev environment.
:param app: The config object for the app
:param env: environment dictionary for sub command
"""
try:
# Invoke the app.
self.subprocess.run(
[sys.executable, "-m", app.module_name],
env=env,
check=True,
)
except subprocess.CalledProcessError:
print()
raise BriefcaseCommandError(
"Unable to start application '{app.app_name}'".format(
app=app
))
def get_environment(self, app):
# Create a shell environment where PYTHONPATH points to the source
# directories described by the app config.
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(app.PYTHONPATH)
return env
def __call__(
self,
appname: Optional[str] = None,
update_dependencies: Optional[bool] = False,
run_app: Optional[bool] = True,
**options
):
# Confirm all required tools are available
self.verify_tools()
# Which app should we run? If there's only one defined
# in pyproject.toml, then we can use it as a default;
# otherwise look for a -a/--app option.
if len(self.apps) == 1:
app = list(self.apps.values())[0]
elif appname:
try:
app = self.apps[appname]
except KeyError:
raise BriefcaseCommandError(
"Project doesn't define an application named '{appname}'".format(
appname=appname
))
else:
raise BriefcaseCommandError(
"Project specifies more than one application; "
"use --app to specify which one to start."
)
# Look for the existence of a dist-info file.
# If one exists, assume that the dependencies have already been
# installed. If a dependency update has been manually requested,
# do it regardless.
dist_info_path = self.app_module_path(app).parent / '{app.module_name}.dist-info'.format(app=app)
if not run_app:
# If we are not running the app, it means we should update dependencies.
update_dependencies = True
if update_dependencies or not dist_info_path.exists():
print()
print('[{app.app_name}] Installing dependencies...'.format(
app=app
))
self.install_dev_dependencies(app, **options)
write_dist_info(app, dist_info_path)
if run_app:
print()
print('[{app.app_name}] Starting in dev mode...'.format(
app=app
))
env = self.get_environment(app)
state = self.run_dev_app(app, env, **options)
return state
|
py | 1a4e5fbec6013f9de36d496cf5958ce21d670402 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 0); |
py | 1a4e60312b243e3753295ce9a99546b958882b6f | import numpy as np
import argparse
import nibabel as nib
parser = argparse.ArgumentParser(description='Convert AFNI to RAS')
reqoptions = parser.add_argument_group('Required arguments')
reqoptions.add_argument('-i', '-in', dest="infile", required=True, help='Dir' )
reqoptions.add_argument('-o', '-out', dest="outfile", required=True, help='Dir' )
args = parser.parse_args()
inFile = args.infile #'/mnt/hgfs/ssd_tmp/ASL/056/'
outFile = args.outfile #'/mnt/hgfs/ssd_tmp/ASL/056/'
afni_vec = np.loadtxt(inFile, skiprows=1)
ras_vec = np.zeros((4,4))
ras_vec[0,0] = afni_vec[0]
ras_vec[0,1] = afni_vec[1]
ras_vec[0,2] = -afni_vec[2]
ras_vec[0,3] = -afni_vec[3]
ras_vec[1,0] = afni_vec[4]
ras_vec[1,1] = afni_vec[5]
ras_vec[1,2] = -afni_vec[6]
ras_vec[1,3] = -afni_vec[7]
ras_vec[2,0] = -afni_vec[8]
ras_vec[2,1] = -afni_vec[9]
ras_vec[2,2] = afni_vec[10]
ras_vec[2,3] = afni_vec[11]
ras_vec[3,0] = 0
ras_vec[3,1] = 0
ras_vec[3,2] = 0
ras_vec[3,3] = 1
np.savetxt(outFile, ras_vec, fmt='%0.10f')
|
py | 1a4e603736608f0307bb6161f9758d3f7f3c2b41 | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ChangeVaultCompartmentDetails(object):
"""
ChangeVaultCompartmentDetails model.
"""
def __init__(self, **kwargs):
"""
Initializes a new ChangeVaultCompartmentDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this ChangeVaultCompartmentDetails.
:type compartment_id: str
"""
self.swagger_types = {
'compartment_id': 'str'
}
self.attribute_map = {
'compartment_id': 'compartmentId'
}
self._compartment_id = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ChangeVaultCompartmentDetails.
The `OCID`__ of the compartment to move the vault to.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this ChangeVaultCompartmentDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ChangeVaultCompartmentDetails.
The `OCID`__ of the compartment to move the vault to.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this ChangeVaultCompartmentDetails.
:type: str
"""
self._compartment_id = compartment_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | 1a4e62f0fa27d538f5958d6c78177b5683976671 | # Generated by Django 2.0.7 on 2018-08-30 22:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Follow', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='guildfollowermodel',
name='Id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
|
py | 1a4e6352652dcda8bd2e4e137759d3bd5f6817c4 | # Copyright (c) Open-MMLab. All rights reserved.
import logging
import torch.nn as nn
import torch.utils.checkpoint as cp
from ..runner import load_checkpoint
from .weight_init import constant_init, kaiming_init
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert not with_cp
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False):
"""Bottleneck block.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
if style == 'pytorch':
conv1_stride = 1
conv2_stride = stride
else:
conv1_stride = stride
conv2_stride = 1
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
def forward(self, x):
def _inner_forward(x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
with_cp=False):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
style=style,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
return nn.Sequential(*layers)
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
running stats (mean and var).
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
frozen_stages=-1,
bn_eval=True,
bn_frozen=False,
with_cp=False):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
assert num_stages >= 1 and num_stages <= 4
block, stage_blocks = self.arch_settings[depth]
stage_blocks = stage_blocks[:num_stages]
assert len(strides) == len(dilations) == num_stages
assert max(out_indices) < num_stages
self.out_indices = out_indices
self.style = style
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.with_cp = with_cp
self.inplanes = 64
self.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_layers = []
for i, num_blocks in enumerate(stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
with_cp=with_cp)
self.inplanes = planes * block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
if mode and self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
self.bn1.eval()
self.bn1.weight.requires_grad = False
self.bn1.bias.requires_grad = False
for i in range(1, self.frozen_stages + 1):
mod = getattr(self, 'layer{}'.format(i))
mod.eval()
for param in mod.parameters():
param.requires_grad = False
|
py | 1a4e643f3493e027ba5a0979e40c560d6264b546 | import sys
import torch
import os
import shutil
from torch.utils.data.dataloader import DataLoader
import random
sys.path.append('.')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def create_exp_dir(path, scripts_to_save=None):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
script_path = os.path.join(path, 'scripts')
if os.path.exists(script_path):
shutil.rmtree(script_path)
os.mkdir(script_path)
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
print(dst_file)
shutil.copytree(script, dst_file)
class ForeverDataIterator:
"""A data iterator that will never stop producing data"""
def __init__(self, data_loader: DataLoader):
self.data_loader = data_loader
self.iter = iter(self.data_loader)
def __next__(self):
try:
data = next(self.iter)
except StopIteration:
self.iter = iter(self.data_loader)
data = next(self.iter)
return data
def __len__(self):
return len(self.data_loader)
|
py | 1a4e64608b57615547c11fc4a0ba5eae8c24e683 | """Flexible method to load confounds generated by fMRIprep.
Authors: Hanad Sharmarke, Dr. Pierre Bellec, Francois Paugam
"""
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import warnings
import os
# Global variables listing the admissible types of noise components
all_confounds = ["motion", "high_pass", "wm_csf", "global", "compcor"]
def _add_suffix(params, model):
"""
Add suffixes to a list of parameters.
Suffixes includes derivatives, power2 and full
"""
params_full = params.copy()
suffix = {
"basic": {},
"derivatives": {"derivative1"},
"power2": {"power2"},
"full": {"derivative1", "power2", "derivative1_power2"},
}
for par in params:
for suff in suffix[model]:
params_full.append(f"{par}_{suff}")
return params_full
def _check_params(confounds_raw, params):
"""Check that specified parameters can be found in the confounds."""
for par in params:
if not par in confounds_raw.columns:
raise ValueError(
f"The parameter {par} cannot be found in the available confounds. You may want to use a different denoising strategy'"
)
return None
def _find_confounds(confounds_raw, keywords):
"""Find confounds that contain certain keywords."""
list_confounds = []
for key in keywords:
key_found = False
for col in confounds_raw.columns:
if key in col:
list_confounds.append(col)
key_found = True
if not key_found:
raise ValueError(f"could not find any confound with the key {key}")
return list_confounds
def _load_global(confounds_raw, global_signal):
"""Load the regressors derived from the global signal."""
global_params = _add_suffix(["global_signal"], global_signal)
_check_params(confounds_raw, global_params)
return confounds_raw[global_params]
def _load_wm_csf(confounds_raw, wm_csf):
"""Load the regressors derived from the white matter and CSF masks."""
wm_csf_params = _add_suffix(["csf", "white_matter"], wm_csf)
_check_params(confounds_raw, wm_csf_params)
return confounds_raw[wm_csf_params]
def _load_high_pass(confounds_raw):
"""Load the high pass filter regressors."""
high_pass_params = _find_confounds(confounds_raw, ["cosine"])
return confounds_raw[high_pass_params]
def _label_compcor(confounds_raw, compcor_suffix, n_compcor):
"""Builds list for the number of compcor components."""
compcor_cols = []
for nn in range(n_compcor + 1):
nn_str = str(nn).zfill(2)
compcor_col = compcor_suffix + "_comp_cor_" + nn_str
if compcor_col not in confounds_raw.columns:
warnings.warn(f"could not find any confound with the key {compcor_col}")
else:
compcor_cols.append(compcor_col)
return compcor_cols
def _load_compcor(confounds_raw, compcor, n_compcor):
"""Load compcor regressors."""
if compcor == "anat":
compcor_cols = _label_compcor(confounds_raw, "a", n_compcor)
if compcor == "temp":
compcor_cols = _label_compcor(confounds_raw, "t", n_compcor)
if compcor == "full":
compcor_cols = _label_compcor(confounds_raw, "a", n_compcor)
compcor_cols.extend(_label_compcor(confounds_raw, "t", n_compcor))
compcor_cols.sort()
_check_params(confounds_raw, compcor_cols)
return confounds_raw[compcor_cols]
def _load_motion(confounds_raw, motion, n_motion):
"""Load the motion regressors."""
motion_params = _add_suffix(
["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"], motion
)
_check_params(confounds_raw, motion_params)
confounds_motion = confounds_raw[motion_params]
# Optionally apply PCA reduction
if n_motion > 0:
confounds_motion = _pca_motion(confounds_motion, n_components=n_motion)
return confounds_motion
def _pca_motion(confounds_motion, n_components):
"""Reduce the motion paramaters using PCA."""
n_available = confounds_motion.shape[1]
if n_components > n_available:
raise ValueError(
f"User requested n_motion={n_components} motion components, but found only {n_available}."
)
confounds_motion = confounds_motion.dropna()
confounds_motion_std = scale(
confounds_motion, axis=0, with_mean=True, with_std=True
)
pca = PCA(n_components=n_components)
motion_pca = pd.DataFrame(pca.fit_transform(confounds_motion_std))
motion_pca.columns = ["motion_pca_" + str(col + 1) for col in motion_pca.columns]
return motion_pca
def _sanitize_strategy(strategy):
"""Defines the supported denoising strategies."""
if isinstance(strategy, list):
for conf in strategy:
if not conf in all_confounds:
raise ValueError(f"{conf} is not a supported type of confounds.")
else:
raise ValueError("strategy needs to be a list of strings")
return strategy
def _confounds_to_df(confounds_raw):
"""Load raw confounds as a pandas DataFrame."""
if not isinstance(confounds_raw, pd.DataFrame):
if "nii" in confounds_raw[-6:]:
suffix = "_space-" + confounds_raw.split("space-")[1]
confounds_raw = confounds_raw.replace(
suffix, "_desc-confounds_timeseries.tsv",
)
# fmriprep has changed the file suffix between v20.1.1 and v20.2.0 with respect to BEP 012.
# cf. https://neurostars.org/t/naming-change-confounds-regressors-to-confounds-timeseries/17637
# Check file with new naming scheme exists or replace, for backward compatibility.
if not os.path.exists(confounds_raw):
confounds_raw = confounds_raw.replace(
"_desc-confounds_timeseries.tsv", "_desc-confounds_regressors.tsv",
)
confounds_raw = pd.read_csv(confounds_raw, delimiter="\t", encoding="utf-8")
return confounds_raw
def _sanitize_confounds(confounds_raw):
"""Make sure the inputs are in the correct format."""
# we want to support loading a single set of confounds, instead of a list
# so we hack it
flag_single = isinstance(confounds_raw, str) or isinstance(
confounds_raw, pd.DataFrame
)
if flag_single:
confounds_raw = [confounds_raw]
return confounds_raw, flag_single
def _confounds_to_ndarray(confounds, demean):
"""Convert confounds from a pandas dataframe to a numpy array."""
# Convert from DataFrame to numpy ndarray
labels = confounds.columns
confounds = confounds.values
# Derivatives have NaN on the first row
# Replace them by estimates at second time point,
# otherwise nilearn will crash.
mask_nan = np.isnan(confounds[0, :])
confounds[0, mask_nan] = confounds[1, mask_nan]
# Optionally demean confounds
if demean:
confounds = scale(confounds, axis=0, with_std=False)
return confounds, labels
class Confounds:
"""
Confounds from fmriprep
Parameters
----------
strategy : list of strings
The type of noise confounds to include.
"motion" head motion estimates.
"high_pass" discrete cosines covering low frequencies.
"wm_csf" confounds derived from white matter and cerebrospinal fluid.
"global" confounds derived from the global signal.
motion : string, optional
Type of confounds extracted from head motion estimates.
"basic" translation/rotation (6 parameters)
"power2" translation/rotation + quadratic terms (12 parameters)
"derivatives" translation/rotation + derivatives (12 parameters)
"full" translation/rotation + derivatives + quadratic terms + power2d derivatives (24 parameters)
n_motion : float
Number of pca components to keep from head motion estimates.
If the parameters is strictly comprised between 0 and 1, a principal component
analysis is applied to the motion parameters, and the number of extracted
components is set to exceed `n_motion` percent of the parameters variance.
If the n_components = 0, then no PCA is performed.
wm_csf : string, optional
Type of confounds extracted from masks of white matter and cerebrospinal fluids.
"basic" the averages in each mask (2 parameters)
"power2" averages and quadratic terms (4 parameters)
"derivatives" averages and derivatives (4 parameters)
"full" averages + derivatives + quadratic terms + power2d derivatives (8 parameters)
global_signal : string, optional
Type of confounds extracted from the global signal.
"basic" just the global signal (1 parameter)
"power2" global signal and quadratic term (2 parameters)
"derivatives" global signal and derivative (2 parameters)
"full" global signal + derivatives + quadratic terms + power2d derivatives (4 parameters)
compcor : string,optional
Type of confounds extracted from a component based noise correction method
"anat" noise components calculated using anatomical compcor
"temp" noise components calculated using temporal compcor
"full" noise components calculated using both temporal and anatomical
n_compcor : int, optional
The number of noise components to be extracted.
demean : boolean, optional
If True, the confounds are standardized to a zero mean (over time).
This step is critical if the confounds are regressed out of time series
using nilearn with no or zscore standardization, but should be turned off
with "spc" normalization.
Attributes
----------
`confounds_` : ndarray
The confounds loaded using the specified model
`columns_`: list of str
The labels of the different confounds
Notes
-----
The predefined strategies implemented in this class are from
adapted from (Ciric et al. 2017). Band-pass filter is replaced
by high-pass filter, as high frequencies have been shown to carry
meaningful signal for connectivity analysis.
References
----------
Ciric et al., 2017 "Benchmarking of participant-level confound regression
strategies for the control of motion artifact in studies of functional
connectivity" Neuroimage 154: 174-87
https://doi.org/10.1016/j.neuroimage.2017.03.020
"""
def __init__(
self,
strategy=["motion", "high_pass", "wm_csf"],
motion="full",
n_motion=0,
wm_csf="basic",
global_signal="basic",
compcor="anat",
n_compcor=10,
demean=True,
):
"""Default parameters."""
self.strategy = _sanitize_strategy(strategy)
self.motion = motion
self.n_motion = n_motion
self.wm_csf = wm_csf
self.global_signal = global_signal
self.compcor = compcor
self.n_compcor = n_compcor
self.demean = demean
def load(self, confounds_raw):
"""
Load fMRIprep confounds
Parameters
----------
confounds_raw : Pandas Dataframe or path to tsv file(s), optionally as a list.
Raw confounds from fmriprep
Returns
-------
confounds : ndarray or list of ndarray
A reduced version of fMRIprep confounds based on selected strategy and flags.
An intercept is automatically added to the list of confounds.
"""
confounds_raw, flag_single = _sanitize_confounds(confounds_raw)
confounds_out = []
columns_out = []
for file in confounds_raw:
conf, col = self._load_single(file)
confounds_out.append(conf)
columns_out.append(col)
# If a single input was provided,
# send back a single output instead of a list
if flag_single:
confounds_out = confounds_out[0]
columns_out = columns_out[0]
self.confounds_ = confounds_out
self.columns_ = columns_out
return confounds_out
def _load_single(self, confounds_raw):
"""Load a single confounds file from fmriprep."""
# Convert tsv file to pandas dataframe
confounds_raw = _confounds_to_df(confounds_raw)
confounds = pd.DataFrame()
if "motion" in self.strategy:
confounds_motion = _load_motion(confounds_raw, self.motion, self.n_motion)
confounds = pd.concat([confounds, confounds_motion], axis=1)
if "high_pass" in self.strategy:
confounds_high_pass = _load_high_pass(confounds_raw)
confounds = pd.concat([confounds, confounds_high_pass], axis=1)
if "wm_csf" in self.strategy:
confounds_wm_csf = _load_wm_csf(confounds_raw, self.wm_csf)
confounds = pd.concat([confounds, confounds_wm_csf], axis=1)
if "global" in self.strategy:
confounds_global_signal = _load_global(confounds_raw, self.global_signal)
confounds = pd.concat([confounds, confounds_global_signal], axis=1)
if "compcor" in self.strategy:
confounds_compcor = _load_compcor(
confounds_raw, self.compcor, self.n_compcor
)
confounds = pd.concat([confounds, confounds_compcor], axis=1)
confounds, labels = _confounds_to_ndarray(confounds, self.demean)
return confounds, labels
|
py | 1a4e6571e88879c0322c38a8940be54bb97f2993 | """
Logging configuration for BlueBird
"""
# TODO(rkm 2020-01-12) Refactor the episode logging code into SimProxy
import json
import logging.config
import uuid
from datetime import datetime
from pathlib import Path
from bluebird.settings import Settings
if not Settings.LOGS_ROOT.exists():
Settings.LOGS_ROOT.mkdir()
def time_for_logfile():
"""Returns the current timestamp formatted for a logfile name"""
return datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
INSTANCE_ID = uuid.uuid1()
INST_LOG_DIR = Path(Settings.LOGS_ROOT, f"{time_for_logfile()}_{INSTANCE_ID}")
INST_LOG_DIR.mkdir()
with open("bluebird/logging_config.json") as f:
LOG_CONFIG = json.load(f)
LOG_CONFIG["handlers"]["console"]["level"] = Settings.CONSOLE_LOG_LEVEL
# Set filenames for logfiles (can't do this from the JSON)
LOG_CONFIG["handlers"]["debug-file"]["filename"] = INST_LOG_DIR / "debug.log"
# Set the logging config
logging.config.dictConfig(LOG_CONFIG)
_LOGGER = logging.getLogger("bluebird")
# Setup episode logging
EP_ID = EP_FILE = None
EP_LOGGER = logging.getLogger("episode")
EP_LOGGER.setLevel(logging.DEBUG)
_LOG_PREFIX = "E"
def close_episode_log(reason):
"""Closes the currently open episode log, if there is one"""
if not EP_LOGGER.hasHandlers():
return
EP_LOGGER.info(f"Episode finished ({reason})", extra={"PREFIX": _LOG_PREFIX})
EP_LOGGER.handlers[-1].close()
EP_LOGGER.handlers.pop()
def _start_episode_log(sim_seed):
"""Starts a new episode logfile"""
global EP_ID, EP_FILE
if EP_LOGGER.hasHandlers():
raise Exception(
f"Episode logger already has a handler assigned: {EP_LOGGER.handlers}"
)
EP_ID = uuid.uuid4()
EP_FILE = INST_LOG_DIR / f"{time_for_logfile()}_{EP_ID}.log"
file_handler = logging.FileHandler(EP_FILE)
file_handler.name = "episode-file"
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s %(PREFIX)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
file_handler.setFormatter(formatter)
EP_LOGGER.addHandler(file_handler)
EP_LOGGER.info(
f"Episode started. SIM_LOG_RATE is {Settings.SIM_LOG_RATE} Hz. "
f"Seed is {sim_seed}",
extra={"PREFIX": _LOG_PREFIX},
)
return EP_ID
def restart_episode_log(sim_seed):
"""
Closes the current episode log and starts a new one. Returns the UUID of the new
episode
"""
close_episode_log("episode logging restarted")
return _start_episode_log(sim_seed)
|
py | 1a4e6639261fbabecf3e13e784abea83a654e864 | from .correlation_threshold import correlation_threshold
from .variance_threshold import variance_threshold, calculate_frequency
from .get_na_columns import get_na_columns
from .transform import Spherize, RobustMAD
from .noise_removal import noise_removal
|
py | 1a4e667d2c8da3709886e6177e62f16686071a99 | # Write a program that takes a list of numbers (for example, a = [5, 10, 15, 20, 25]) and makes a new list of only the first and last elements of the given list.
# For practice, write this code inside a function.
def first_and_last_element_of_a_list(number_list):
if(len(number_list) <= 1):
return number_list
return [number_list[0], number_list[len(number_list) - 1]]
def run():
a = [5, 10, 15, 20, 25]
b = [2]
c = []
d = [2, 3]
print(first_and_last_element_of_a_list(a))
if __name__ == '__main__':
run()
|
py | 1a4e689b456d60ebff0589cec5726db69912c1ae | import copy
import datetime
from operator import attrgetter
from django.core.exceptions import ValidationError
from django.db import models, router
from django.db.models.sql import InsertQuery
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils.timezone import get_fixed_timezone
from .models import (
Article, Department, Event, Model1, Model2, Model3, NonAutoPK, Party,
Worker,
)
class ModelTests(TestCase):
def test_model_init_too_many_args(self):
msg = "Number of args exceeds number of fields"
with self.assertRaisesMessage(IndexError, msg):
Worker(1, 2, 3, 4)
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as string
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# get_next_by_FIELD() and get_previous_by_FIELD() don't crash when
# microseconds values are stored in the database.
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_get_next_prev_by_field_unsaved(self):
msg = 'get_next/get_previous cannot be used on unsaved objects.'
with self.assertRaisesMessage(ValueError, msg):
Event().get_next_by_when()
with self.assertRaisesMessage(ValueError, msg):
Event().get_previous_by_when()
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(str(w), "Full-time")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving and updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
@isolate_apps('model_regress')
def test_metaclass_can_access_attribute_dict(self):
"""
Model metaclasses have access to the class attribute dict in
__init__() (#30254).
"""
class HorseBase(models.base.ModelBase):
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs)
cls.horns = (1 if 'magic' in attrs else 0)
class Horse(models.Model, metaclass=HorseBase):
name = models.CharField(max_length=255)
magic = True
self.assertEqual(Horse.horns, 1)
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
with self.assertRaises(ValidationError):
again.validate_unique()
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
You can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
class ModelFieldsCacheTest(TestCase):
def test_fields_cache_reset_on_copy(self):
department1 = Department.objects.create(id=1, name='department1')
department2 = Department.objects.create(id=2, name='department2')
worker1 = Worker.objects.create(name='worker', department=department1)
worker2 = copy.copy(worker1)
self.assertEqual(worker2.department, department1)
# Changing related fields doesn't mutate the base object.
worker2.department = department2
self.assertEqual(worker2.department, department2)
self.assertEqual(worker1.department, department1)
|
py | 1a4e692641ce304adc4995b5b107e0c74e5b58fd | from collections import namedtuple
from datetime import date, timedelta
from decimal import Decimal, ROUND_UP
from itertools import chain
import os
import os.path
from subprocess import Popen
import sys
from time import sleep
from babel.dates import format_timedelta
import mangopay
import pando.utils
import requests
from liberapay import constants
from liberapay.billing.transactions import Money, transfer
from liberapay.exceptions import NegativeBalance
from liberapay.i18n.currencies import MoneyBasket
from liberapay.utils import group_by
from liberapay.website import website
log = print
def round_up(d):
return d.quantize(constants.D_CENT, rounding=ROUND_UP)
TakeTransfer = namedtuple('TakeTransfer', 'tipper member amount')
class NoPayday(Exception):
__str__ = lambda self: "No payday found where one was expected."
class Payday(object):
@classmethod
def start(cls, public_log=''):
"""Try to start a new Payday.
If there is a Payday that hasn't finished yet, then we work on it some
more. We use the start time of that Payday to synchronize our work.
"""
d = cls.db.one("""
INSERT INTO paydays
(id, public_log, ts_start)
VALUES ( COALESCE((
SELECT id
FROM paydays
WHERE ts_end > ts_start
AND stage IS NULL
ORDER BY id DESC LIMIT 1
), 0) + 1
, %s
, now()
)
ON CONFLICT (id) DO UPDATE
SET ts_start = COALESCE(paydays.ts_start, excluded.ts_start)
RETURNING id, (ts_start AT TIME ZONE 'UTC') AS ts_start, stage
""", (public_log,), back_as=dict)
d['ts_start'] = d['ts_start'].replace(tzinfo=pando.utils.utc)
payday = Payday()
payday.__dict__.update(d)
return payday
def run(self, log_dir='.', keep_log=False, recompute_stats=10, update_cached_amounts=True):
"""This is the starting point for payday.
It is structured such that it can be run again safely (with a
newly-instantiated Payday object) if it crashes.
"""
self.db.self_check()
_start = pando.utils.utcnow()
log("Running payday #%(id)s, started at %(ts_start)s." % self.__dict__)
self.shuffle(log_dir)
self.recompute_stats(limit=recompute_stats)
if update_cached_amounts:
self.update_cached_amounts()
self.notify_participants()
_end = pando.utils.utcnow()
_delta = _end - _start
msg = "Script ran for %s ({0})."
log(msg.format(_delta) % format_timedelta(_delta, locale='en'))
if keep_log:
output_log_name = 'payday-%i.txt' % self.id
output_log_path = log_dir+'/'+output_log_name
if website.s3:
s3_bucket = website.app_conf.s3_payday_logs_bucket
s3_key = 'paydays/'+output_log_name
website.s3.upload_file(output_log_path+'.part', s3_bucket, s3_key)
log("Uploaded log to S3.")
os.rename(output_log_path+'.part', output_log_path)
self.db.run("UPDATE paydays SET stage = NULL WHERE id = %s", (self.id,))
self.stage = None
def shuffle(self, log_dir='.'):
if self.stage > 2:
return
get_transfers = lambda: self.db.all("""
SELECT t.*
, w.remote_owner_id AS tipper_mango_id
, w2.remote_owner_id AS tippee_mango_id
, w.remote_id AS tipper_wallet_id
, w2.remote_id AS tippee_wallet_id
FROM payday_transfers t
LEFT JOIN wallets w ON w.owner = t.tipper AND
w.balance::currency = t.amount::currency AND
w.is_current IS TRUE
LEFT JOIN wallets w2 ON w2.owner = t.tippee AND
w2.balance::currency = t.amount::currency AND
w2.is_current IS TRUE
ORDER BY t.id
""")
if self.stage == 2:
transfers = get_transfers()
done = self.db.all("""
SELECT *
FROM transfers t
WHERE t.timestamp >= %(ts_start)s
AND status = 'succeeded'
""", dict(ts_start=self.ts_start))
done = set((t.tipper, t.tippee, t.context, t.team) for t in done)
transfers = [t for t in transfers if (t.tipper, t.tippee, t.context, t.team) not in done]
else:
assert self.stage == 1
with self.db.get_cursor() as cursor:
self.prepare(cursor, self.ts_start)
self.transfer_virtually(cursor, self.ts_start)
self.check_balances(cursor)
cursor.run("""
UPDATE paydays
SET nparticipants = (SELECT count(*) FROM payday_participants)
WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz;
""")
self.mark_stage_done(cursor)
self.clean_up()
transfers = get_transfers()
self.transfer_for_real(transfers)
self.settle_debts(self.db)
self.db.self_check()
self.end()
self.mark_stage_done()
self.db.run("DROP TABLE payday_transfers")
@staticmethod
def prepare(cursor, ts_start):
"""Prepare the DB: we need temporary tables with indexes and triggers.
"""
cursor.run("""
-- Create the necessary temporary tables and indexes
CREATE TEMPORARY TABLE payday_participants ON COMMIT DROP AS
SELECT id
, username
, join_time
, status
, ( SELECT basket_sum(w.balance)
FROM wallets w
WHERE w.owner = p.id
AND w.is_current
AND %(use_mangopay)s
) AS balances
, goal
, kind
, main_currency
, accepted_currencies
FROM participants p
WHERE join_time < %(ts_start)s
AND is_suspended IS NOT true
AND status <> 'stub'
ORDER BY join_time;
CREATE UNIQUE INDEX ON payday_participants (id);
CREATE TEMPORARY TABLE payday_tips ON COMMIT DROP AS
SELECT t.id, t.tipper, t.tippee, t.amount, (p2.kind = 'group') AS to_team
, coalesce_currency_amount(t.paid_in_advance, t.amount::currency) AS paid_in_advance
, ( t.renewal_mode > 0 AND (p2.goal IS NULL OR p2.goal >= 0) AND
p.status = 'active' AND p2.status = 'active'
) AS process_real_transfers
FROM ( SELECT DISTINCT ON (tipper, tippee) *
FROM tips
WHERE mtime < %(ts_start)s
ORDER BY tipper, tippee, mtime DESC
) t
JOIN payday_participants p ON p.id = t.tipper
JOIN payday_participants p2 ON p2.id = t.tippee
WHERE (p2.goal IS NULL OR p2.goal >= 0 OR t.paid_in_advance > t.amount)
ORDER BY p.join_time ASC, t.ctime ASC;
CREATE INDEX ON payday_tips (tipper);
CREATE INDEX ON payday_tips (tippee);
ALTER TABLE payday_tips ADD COLUMN is_funded boolean;
CREATE TEMPORARY TABLE payday_takes ON COMMIT DROP AS
SELECT team, member, amount, paid_in_advance
FROM ( SELECT DISTINCT ON (team, member) *
FROM takes
WHERE mtime < %(ts_start)s
ORDER BY team, member, mtime DESC
) t
WHERE t.amount IS NOT NULL
AND t.amount <> 0
AND t.team IN (SELECT id FROM payday_participants)
AND t.member IN (SELECT id FROM payday_participants);
CREATE UNIQUE INDEX ON payday_takes (team, member);
DROP TABLE IF EXISTS payday_transfers;
CREATE TABLE payday_transfers
( id serial
, tipper bigint
, tippee bigint
, amount currency_amount
, in_advance currency_amount
, context transfer_context
, team bigint
, invoice int
, UNIQUE (tipper, tippee, context, team)
);
-- Prepare a statement that makes and records a transfer
CREATE OR REPLACE FUNCTION transfer(
a_tipper bigint,
a_tippee bigint,
a_amount currency_amount,
a_context transfer_context,
a_team bigint,
a_invoice int
)
RETURNS void AS $$
DECLARE
in_advance currency_amount;
tip payday_tips;
transfer_amount currency_amount;
BEGIN
IF (a_amount = 0) THEN RETURN; END IF;
in_advance := zero(a_amount);
transfer_amount := a_amount;
IF (a_context IN ('tip', 'take')) THEN
tip := (
SELECT t
FROM payday_tips t
WHERE t.tipper = a_tipper
AND t.tippee = COALESCE(a_team, a_tippee)
);
IF (tip IS NULL) THEN
RAISE 'tip not found: %%, %%, %%', a_tipper, a_tippee, a_team;
END IF;
IF (tip.paid_in_advance > 0) THEN
IF (tip.paid_in_advance >= transfer_amount) THEN
in_advance := transfer_amount;
ELSE
in_advance := tip.paid_in_advance;
END IF;
transfer_amount := transfer_amount - in_advance;
END IF;
IF (NOT tip.process_real_transfers) THEN
transfer_amount := zero(transfer_amount);
END IF;
END IF;
UPDATE payday_participants
SET balances = (balances - transfer_amount)
WHERE id = a_tipper;
IF (NOT FOUND) THEN RAISE 'tipper %% not found', a_tipper; END IF;
UPDATE payday_participants
SET balances = (balances + transfer_amount)
WHERE id = a_tippee;
IF (NOT FOUND) THEN RAISE 'tippee %% not found', a_tippee; END IF;
INSERT INTO payday_transfers
(tipper, tippee, amount, in_advance, context, team, invoice)
VALUES (a_tipper, a_tippee, transfer_amount, in_advance, a_context, a_team, a_invoice);
END;
$$ LANGUAGE plpgsql;
-- Create a function to check whether a tip is "funded" or not
CREATE OR REPLACE FUNCTION check_tip_funding(payday_tips) RETURNS boolean AS $$
DECLARE
tipper_balances currency_basket;
okay boolean = false;
BEGIN
IF ($1.is_funded IS true) THEN RETURN NULL; END IF;
okay := $1.paid_in_advance >= $1.amount;
IF (okay IS NOT true) THEN
IF (NOT $1.process_real_transfers) THEN
RETURN false;
END IF;
tipper_balances := (
SELECT balances
FROM payday_participants p
WHERE id = $1.tipper
);
okay := tipper_balances >= ($1.amount - $1.paid_in_advance);
END IF;
RETURN okay;
END;
$$ LANGUAGE plpgsql;
-- Create a function to settle one-to-one donations
CREATE OR REPLACE FUNCTION settle_tip_graph() RETURNS void AS $$
DECLARE
count integer NOT NULL DEFAULT 0;
i integer := 0;
BEGIN
LOOP
i := i + 1;
WITH updated_rows AS (
UPDATE payday_tips AS t
SET is_funded = true
WHERE is_funded IS NOT true
AND to_team IS NOT true
AND check_tip_funding(t) IS true
RETURNING id, transfer(tipper, tippee, amount, 'tip', NULL, NULL)
)
SELECT COUNT(*) FROM updated_rows INTO count;
IF (count = 0) THEN
EXIT;
END IF;
IF (i > 50) THEN
RAISE 'Reached the maximum number of iterations';
END IF;
END LOOP;
END;
$$ LANGUAGE plpgsql;
""", dict(ts_start=ts_start, use_mangopay=mangopay.sandbox))
log("Prepared the DB.")
@staticmethod
def transfer_virtually(cursor, ts_start):
cursor.run("SELECT settle_tip_graph();")
teams = cursor.all("""
SELECT id, main_currency FROM payday_participants WHERE kind = 'group';
""")
for team_id, currency in teams:
Payday.transfer_takes(cursor, team_id, currency)
cursor.run("""
SELECT settle_tip_graph();
UPDATE payday_tips SET is_funded = false WHERE is_funded IS NULL;
""")
Payday.pay_invoices(cursor, ts_start)
@staticmethod
def transfer_takes(cursor, team_id, currency):
"""Resolve and transfer takes for the specified team
"""
args = dict(team_id=team_id)
tips = cursor.all("""
UPDATE payday_tips AS t
SET is_funded = true
WHERE tippee = %(team_id)s
AND check_tip_funding(t) IS true
RETURNING t.id, t.tipper, t.amount AS full_amount, t.paid_in_advance
, ( SELECT p.balances
FROM payday_participants p
WHERE p.id = t.tipper
) AS balances
, coalesce_currency_amount((
SELECT sum(tr.amount, t.amount::currency)
FROM transfers tr
WHERE tr.tipper = t.tipper
AND tr.team = %(team_id)s
AND tr.context = 'take'
AND tr.status = 'succeeded'
), t.amount::currency) AS past_transfers_sum
""", args)
takes = cursor.all("""
SELECT t.member, t.amount, t.paid_in_advance
, p.main_currency, p.accepted_currencies
FROM payday_takes t
JOIN payday_participants p ON p.id = t.member
WHERE t.team = %(team_id)s;
""", args)
transfers, leftover = Payday.resolve_takes(tips, takes, currency)
for t in transfers:
cursor.run("SELECT transfer(%s, %s, %s, 'take', %s, NULL)",
(t.tipper, t.member, t.amount, team_id))
@staticmethod
def resolve_takes(tips, takes, ref_currency):
"""Resolve many-to-many donations (team takes)
"""
total_income = MoneyBasket(t.full_amount for t in tips)
if total_income == 0:
return (), total_income
if mangopay.sandbox:
takes = [t for t in takes if t.amount != 0]
else:
takes = [t for t in takes if t.amount != 0 and t.paid_in_advance]
if not takes:
return (), total_income
fuzzy_income_sum = total_income.fuzzy_sum(ref_currency)
manual_takes = [t for t in takes if t.amount > 0]
if manual_takes:
manual_takes_sum = MoneyBasket(t.amount for t in manual_takes)
auto_take = fuzzy_income_sum - manual_takes_sum.fuzzy_sum(ref_currency)
if auto_take < 0:
auto_take = auto_take.zero()
else:
auto_take = fuzzy_income_sum
for take in takes:
if take.amount < 0:
take.amount = auto_take.convert(take.amount.currency)
assert take.amount >= 0
total_takes = MoneyBasket(t.amount for t in takes)
fuzzy_takes_sum = total_takes.fuzzy_sum(ref_currency)
tips_by_currency = group_by(tips, lambda t: t.full_amount.currency)
takes_by_preferred_currency = group_by(takes, lambda t: t.main_currency)
takes_by_secondary_currency = {c: [] for c in tips_by_currency}
takes_ratio = min(fuzzy_income_sum / fuzzy_takes_sum, 1)
for take in takes:
take.amount = (take.amount * takes_ratio).round_up()
if take.paid_in_advance is None:
take.paid_in_advance = take.amount.zero()
if take.accepted_currencies is None:
take.accepted_currencies = constants.CURRENCIES
else:
take.accepted_currencies = take.accepted_currencies.split(',')
for accepted in take.accepted_currencies:
skip = (
accepted == take.main_currency or
accepted not in takes_by_secondary_currency
)
if skip:
continue
takes_by_secondary_currency[accepted].append(take)
tips_ratio = min(fuzzy_takes_sum / fuzzy_income_sum, 1)
adjust_tips = tips_ratio != 1
if adjust_tips:
# The team has a leftover, so donation amounts can be adjusted.
# In the following loop we compute the "weeks" count of each tip.
# For example the `weeks` value is 2.5 for a donation currently at
# 10€/week which has distributed 25€ in the past.
for tip in tips:
tip.weeks = round_up(tip.past_transfers_sum / tip.full_amount)
max_weeks = max(tip.weeks for tip in tips)
min_weeks = min(tip.weeks for tip in tips)
adjust_tips = max_weeks != min_weeks
if adjust_tips:
# Some donors have given fewer weeks worth of money than others,
# we want to adjust the amounts so that the weeks count will
# eventually be the same for every donation.
min_tip_ratio = tips_ratio * Decimal('0.1')
# Loop: compute how many "weeks" each tip is behind the "oldest"
# tip, as well as a naive ratio and amount based on that number
# of weeks
for tip in tips:
tip.weeks_to_catch_up = max_weeks - tip.weeks
tip.ratio = min(min_tip_ratio + tip.weeks_to_catch_up, 1)
tip.amount = (tip.full_amount * tip.ratio).round_up()
naive_amounts_sum = MoneyBasket(tip.amount for tip in tips).fuzzy_sum(ref_currency)
total_to_transfer = min(fuzzy_takes_sum, fuzzy_income_sum)
delta = total_to_transfer - naive_amounts_sum
if delta == 0:
# The sum of the naive amounts computed in the previous loop
# matches the end target, we got very lucky and no further
# adjustments are required
adjust_tips = False
else:
# Loop: compute the "leeway" of each tip, i.e. how much it
# can be increased or decreased to fill the `delta` gap
if delta < 0:
# The naive amounts are too high: we want to lower the
# amounts of the tips that have a "high" ratio, leaving
# untouched the ones that are already low
for tip in tips:
if tip.ratio > min_tip_ratio:
min_tip_amount = (tip.full_amount * min_tip_ratio).round_up()
tip.leeway = min_tip_amount - tip.amount
else:
tip.leeway = tip.amount.zero()
else:
# The naive amounts are too low: we can raise all the
# tips that aren't already at their maximum
for tip in tips:
tip.leeway = tip.full_amount - tip.amount
leeway = MoneyBasket(tip.leeway for tip in tips).fuzzy_sum(ref_currency)
if leeway == 0:
# We don't actually have any leeway, give up
adjust_tips = False
else:
leeway_ratio = min(delta / leeway, 1)
tips = sorted(tips, key=lambda tip: (-tip.weeks_to_catch_up, tip.id))
# Loop: compute the adjusted donation amounts, and do the transfers
transfers = []
for tip in tips:
if tip.paid_in_advance is None:
tip.paid_in_advance = tip.full_amount.zero()
if adjust_tips:
tip_amount = (tip.amount + tip.leeway * leeway_ratio).round_up()
if tip_amount == 0:
continue
assert tip_amount > 0
assert tip_amount <= tip.full_amount
tip.amount = tip_amount
else:
tip.amount = (tip.full_amount * tips_ratio).round_up()
tip_currency = tip.amount.currency
sorted_takes = chain(
takes_by_preferred_currency.get(tip_currency, ()),
takes_by_secondary_currency.get(tip_currency, ()),
takes
)
for take in sorted_takes:
if take.amount == 0 or tip.tipper == take.member:
continue
fuzzy_take_amount = take.amount.convert(tip_currency)
in_advance_amount = min(
tip.amount,
fuzzy_take_amount,
max(tip.paid_in_advance, 0),
max(take.paid_in_advance.convert(tip_currency), 0),
)
on_time_amount = min(
max(tip.amount - in_advance_amount, 0),
max(fuzzy_take_amount - in_advance_amount, 0),
tip.balances[tip_currency],
)
transfer_amount = in_advance_amount + on_time_amount
if transfer_amount == 0:
continue
transfers.append(TakeTransfer(tip.tipper, take.member, transfer_amount))
if transfer_amount == fuzzy_take_amount:
take.amount = take.amount.zero()
else:
take.amount -= transfer_amount.convert(take.amount.currency)
if in_advance_amount:
tip.paid_in_advance -= in_advance_amount
take.paid_in_advance -= in_advance_amount.convert(take.amount.currency)
if on_time_amount:
tip.balances -= on_time_amount
tip.amount -= transfer_amount
if tip.amount == 0:
break
leftover = total_income - MoneyBasket(t.amount for t in transfers)
return transfers, leftover
@staticmethod
def pay_invoices(cursor, ts_start):
"""Settle pending invoices
"""
invoices = cursor.all("""
SELECT i.*
FROM invoices i
WHERE i.status = 'accepted'
AND ( SELECT ie.ts
FROM invoice_events ie
WHERE ie.invoice = i.id
ORDER BY ts DESC
LIMIT 1
) < %(ts_start)s;
""", dict(ts_start=ts_start))
for i in invoices:
can_pay = cursor.one("""
SELECT p.balances >= %s AS can_pay
FROM payday_participants p
WHERE id = %s
""", (i.amount, i.addressee))
if not can_pay:
continue
cursor.run("""
SELECT transfer(%(addressee)s, %(sender)s, %(amount)s,
%(nature)s::transfer_context, NULL, %(id)s);
UPDATE invoices
SET status = 'paid'
WHERE id = %(id)s;
INSERT INTO invoice_events
(invoice, participant, status)
VALUES (%(id)s, %(addressee)s, 'paid');
""", i._asdict())
@staticmethod
def check_balances(cursor):
"""Check that balances aren't becoming (more) negative
"""
oops = cursor.one("""
SELECT p.id
, p.username
, p2.balances
FROM payday_participants p2
JOIN participants p ON p.id = p2.id
WHERE p2.balances->'EUR' < 0 OR p2.balances->'USD' < 0
LIMIT 1
""")
if oops:
log(oops)
raise NegativeBalance()
log("Checked the balances.")
def transfer_for_real(self, transfers):
db = self.db
print("Starting transfers (n=%i)" % len(transfers))
msg = "%s transfer #%i (amount=%s in_advance=%s context=%s team=%s)%s"
for t in transfers:
if t.amount:
delay = getattr(self, 'transfer_delay', 0)
action = 'Executing'
when = ' in %.2f seconds' % delay if delay else ' now'
else:
delay = 0
action = 'Recording'
when = ''
log(msg % (action, t.id, t.amount, t.in_advance, t.context, t.team, when))
if delay:
sleep(delay)
if t.in_advance:
db.run("""
INSERT INTO transfers
(tipper, tippee, amount, context,
team, invoice, status,
wallet_from, wallet_to, virtual)
VALUES (%(tipper)s, %(tippee)s, %(in_advance)s, %(context)s,
%(team)s, %(invoice)s, 'succeeded',
'x', 'y', true);
WITH latest_tip AS (
SELECT *
FROM tips
WHERE tipper = %(tipper)s
AND tippee = coalesce(%(team)s, %(tippee)s)
ORDER BY mtime DESC
LIMIT 1
)
UPDATE tips t
SET paid_in_advance = (t.paid_in_advance - %(in_advance)s)
FROM latest_tip lt
WHERE t.tipper = lt.tipper
AND t.tippee = lt.tippee
AND t.mtime >= lt.mtime;
""", t.__dict__)
if t.team:
db.run("""
WITH latest_take AS (
SELECT t.*
FROM takes t
WHERE t.team = %(team)s
AND t.member = %(tippee)s
AND t.amount IS NOT NULL
ORDER BY t.mtime DESC
LIMIT 1
)
UPDATE takes t
SET paid_in_advance = (
coalesce_currency_amount(lt.paid_in_advance, lt.amount::currency) -
convert(%(in_advance)s, lt.amount::currency)
)
FROM latest_take lt
WHERE t.team = lt.team
AND t.member = lt.member
AND t.mtime >= lt.mtime;
""", t.__dict__)
if t.amount:
transfer(db, **t.__dict__)
@classmethod
def clean_up(cls):
cls.db.run("""
DROP FUNCTION settle_tip_graph();
DROP FUNCTION transfer(bigint, bigint, currency_amount, transfer_context, bigint, int);
""")
@staticmethod
def settle_debts(db):
while True:
with db.get_cursor() as cursor:
debt = cursor.one("""
SELECT d.id, d.debtor AS tipper, d.creditor AS tippee, d.amount
, 'debt' AS context
, w_debtor.remote_owner_id AS tipper_mango_id
, w_debtor.remote_id AS tipper_wallet_id
, w_creditor.remote_owner_id AS tippee_mango_id
, w_creditor.remote_id AS tippee_wallet_id
FROM debts d
JOIN wallets w_debtor ON w_debtor.owner = d.debtor AND
w_debtor.balance::currency = d.amount::currency AND
w_debtor.is_current IS TRUE
LEFT JOIN wallets w_creditor ON w_creditor.owner = d.creditor AND
w_creditor.balance::currency = d.amount::currency AND
w_creditor.is_current IS TRUE
JOIN participants p_creditor ON p_creditor.id = d.creditor
WHERE d.status = 'due'
AND w_debtor.balance >= d.amount
AND p_creditor.status = 'active'
LIMIT 1
FOR UPDATE OF d
""")
if not debt:
break
try:
t_id = transfer(db, **debt._asdict())[1]
except NegativeBalance:
continue
cursor.run("""
UPDATE debts
SET status = 'paid'
, settlement = %s
WHERE id = %s
""", (t_id, debt.id))
@classmethod
def update_stats(cls, payday_id):
ts_start, ts_end = cls.db.one("""
SELECT ts_start, ts_end FROM paydays WHERE id = %s
""", (payday_id,))
if payday_id > 1:
previous_ts_start = cls.db.one("""
SELECT ts_start
FROM paydays
WHERE id = %s
""", (payday_id - 1,))
else:
previous_ts_start = constants.EPOCH
assert previous_ts_start
cls.db.run("""\
WITH our_transfers AS (
SELECT *
FROM transfers
WHERE "timestamp" >= %(ts_start)s
AND "timestamp" <= %(ts_end)s
AND status = 'succeeded'
AND context IN ('tip', 'take')
)
, our_tips AS (
SELECT *
FROM our_transfers
WHERE context = 'tip'
)
, our_takes AS (
SELECT *
FROM our_transfers
WHERE context = 'take'
)
, week_exchanges AS (
SELECT e.*
, ( EXISTS (
SELECT e2.id
FROM exchanges e2
WHERE e2.refund_ref = e.id
)) AS refunded
FROM exchanges e
WHERE e.timestamp < %(ts_start)s
AND e.timestamp >= %(previous_ts_start)s
AND status <> 'failed'
)
UPDATE paydays
SET nactive = (
SELECT DISTINCT count(*) FROM (
SELECT tipper FROM our_transfers
UNION
SELECT tippee FROM our_transfers
) AS foo
)
, ntippers = (SELECT count(DISTINCT tipper) FROM our_transfers)
, ntippees = (SELECT count(DISTINCT tippee) FROM our_transfers)
, ntips = (SELECT count(*) FROM our_tips)
, ntakes = (SELECT count(*) FROM our_takes)
, take_volume = (SELECT basket_sum(amount) FROM our_takes)
, ntransfers = (SELECT count(*) FROM our_transfers)
, transfer_volume = (SELECT basket_sum(amount) FROM our_transfers)
, transfer_volume_refunded = (
SELECT basket_sum(amount)
FROM our_transfers
WHERE refund_ref IS NOT NULL
)
, nusers = (
SELECT count(*)
FROM participants p
WHERE p.kind IN ('individual', 'organization')
AND p.join_time < %(ts_start)s
AND COALESCE((
SELECT payload::text
FROM events e
WHERE e.participant = p.id
AND e.type = 'set_status'
AND e.ts < %(ts_start)s
ORDER BY ts DESC
LIMIT 1
), '') <> '"closed"'
)
, week_deposits = (
SELECT basket_sum(amount)
FROM week_exchanges
WHERE amount > 0
AND refund_ref IS NULL
AND status = 'succeeded'
)
, week_deposits_refunded = (
SELECT basket_sum(amount)
FROM week_exchanges
WHERE amount > 0
AND refunded
)
, week_withdrawals = (
SELECT basket_sum(-amount)
FROM week_exchanges
WHERE amount < 0
AND refund_ref IS NULL
)
, week_withdrawals_refunded = (
SELECT basket_sum(amount)
FROM week_exchanges
WHERE amount < 0
AND refunded
)
, week_payins = (
SELECT basket_sum(pi.amount)
FROM payins pi
WHERE pi.ctime < %(ts_start)s
AND pi.ctime >= %(previous_ts_start)s
AND pi.status = 'succeeded'
)
WHERE id = %(payday_id)s
""", locals())
log("Updated stats of payday #%i." % payday_id)
@classmethod
def recompute_stats(cls, limit=None):
ids = cls.db.all("""
SELECT id
FROM paydays
WHERE ts_end > ts_start
ORDER BY id DESC
LIMIT %s
""", (limit,))
for payday_id in ids:
cls.update_stats(payday_id)
@classmethod
def update_cached_amounts(cls):
now = pando.utils.utcnow()
with cls.db.get_cursor() as cursor:
cursor.run("LOCK TABLE takes IN EXCLUSIVE MODE")
cls.prepare(cursor, now)
cls.transfer_virtually(cursor, now)
cursor.run("""
UPDATE tips t
SET is_funded = t2.is_funded
FROM payday_tips t2
WHERE t.id = t2.id
AND t.is_funded <> t2.is_funded;
""")
cursor.run("""
WITH active_donors AS (
SELECT DISTINCT tr.tipper AS id
FROM transfers tr
WHERE tr.context IN ('tip', 'take')
AND tr.timestamp > (current_timestamp - interval '30 days')
AND tr.status = 'succeeded'
UNION
SELECT DISTINCT pi.payer AS id
FROM payins pi
WHERE pi.ctime > (current_timestamp - interval '30 days')
AND pi.status = 'succeeded'
)
UPDATE tips t
SET is_funded = t2.is_funded
FROM ( SELECT t2.id, (t2.tipper IN (SELECT ad.id FROM active_donors ad)) AS is_funded
FROM current_tips t2
JOIN participants tippee_p ON tippee_p.id = t2.tippee
WHERE tippee_p.status = 'stub'
) t2
WHERE t2.id = t.id
AND t.is_funded <> t2.is_funded;
""")
cursor.run("""
UPDATE participants p
SET receiving = p2.receiving
, npatrons = p2.npatrons
FROM ( SELECT p2.id
, count(*) AS npatrons
, coalesce_currency_amount(
sum(t.amount, p2.main_currency),
p2.main_currency
) AS receiving
FROM current_tips t
JOIN participants p2 ON p2.id = t.tippee
WHERE p2.status = 'stub'
AND t.is_funded
GROUP BY p2.id
) p2
WHERE p.id = p2.id
AND p.receiving <> p2.receiving
AND p.npatrons <> p2.npatrons
AND p.status = 'stub';
""")
cursor.run("""
UPDATE takes t
SET actual_amount = t2.actual_amount
FROM ( SELECT t2.id
, (
SELECT basket_sum(tr.amount + tr.in_advance)
FROM payday_transfers tr
WHERE tr.team = t2.team
AND tr.tippee = t2.member
AND tr.context = 'take'
) AS actual_amount
FROM current_takes t2
) t2
WHERE t.id = t2.id
AND t.actual_amount <> t2.actual_amount;
""")
cursor.run("""
UPDATE participants p
SET giving = p2.giving
FROM ( SELECT p2.id
, coalesce_currency_amount((
SELECT sum(amount, p2.main_currency)
FROM payday_tips t
WHERE t.tipper = p2.id
AND t.is_funded
), p2.main_currency) AS giving
FROM participants p2
) p2
WHERE p.id = p2.id
AND p.giving <> p2.giving;
""")
cursor.run("""
UPDATE participants p
SET taking = p2.taking
FROM ( SELECT p2.id
, coalesce_currency_amount((
SELECT sum(t.amount + t.in_advance, p2.main_currency)
FROM payday_transfers t
WHERE t.tippee = p2.id
AND context = 'take'
), p2.main_currency) AS taking
FROM participants p2
) p2
WHERE p.id = p2.id
AND p.taking <> p2.taking;
""")
cursor.run("""
UPDATE participants p
SET receiving = p2.receiving
FROM ( SELECT p2.id
, p2.taking + coalesce_currency_amount((
SELECT sum(amount, p2.main_currency)
FROM payday_tips t
WHERE t.tippee = p2.id
AND t.is_funded
), p2.main_currency) AS receiving
FROM participants p2
) p2
WHERE p.id = p2.id
AND p.receiving <> p2.receiving
AND p.status <> 'stub';
""")
cursor.run("""
UPDATE participants p
SET leftover = p2.leftover
FROM ( SELECT p2.id
, (
SELECT basket_sum(t.amount)
FROM payday_tips t
WHERE t.tippee = p2.id
AND t.is_funded
) - (
SELECT basket_sum(t.amount + t.in_advance)
FROM payday_transfers t
WHERE t.tippee = p2.id
OR t.team = p2.id
) AS leftover
FROM participants p2
) p2
WHERE p.id = p2.id
AND p.leftover <> p2.leftover;
""")
cursor.run("""
UPDATE participants p
SET nteampatrons = p2.nteampatrons
FROM ( SELECT p2.id
, ( SELECT count(DISTINCT t.tipper)
FROM payday_transfers t
WHERE t.tippee = p2.id
AND t.context = 'take'
) AS nteampatrons
FROM participants p2
WHERE p2.status <> 'stub'
AND p2.kind IN ('individual', 'organization')
) p2
WHERE p.id = p2.id
AND p.nteampatrons <> p2.nteampatrons;
""")
cursor.run("""
UPDATE participants p
SET npatrons = p2.npatrons
FROM ( SELECT p2.id
, ( SELECT count(*)
FROM payday_tips t
WHERE t.tippee = p2.id
AND t.is_funded
) AS npatrons
FROM participants p2
WHERE p2.status <> 'stub'
) p2
WHERE p.id = p2.id
AND p.npatrons <> p2.npatrons;
""")
cls.clean_up()
log("Updated receiving amounts.")
def mark_stage_done(self, cursor=None):
self.stage = (cursor or self.db).one("""
UPDATE paydays
SET stage = stage + 1
WHERE id = %s
RETURNING stage
""", (self.id,), default=NoPayday)
def end(self):
self.ts_end = self.db.one("""
UPDATE paydays
SET ts_end=now()
WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz
RETURNING ts_end AT TIME ZONE 'UTC'
""", default=NoPayday).replace(tzinfo=pando.utils.utc)
def notify_participants(self):
if self.stage == 3:
self.generate_income_notifications()
self.mark_stage_done()
if self.stage == 4:
from liberapay.payin.cron import send_donation_reminder_notifications
send_donation_reminder_notifications()
self.mark_stage_done()
if self.stage == 5:
self.generate_payment_account_required_notifications()
self.mark_stage_done()
def generate_income_notifications(self):
previous_ts_end = self.db.one("""
SELECT ts_end
FROM paydays
WHERE ts_start < %s
ORDER BY ts_end DESC
LIMIT 1
""", (self.ts_start,), default=constants.BIRTHDAY)
n = 0
r = self.db.all("""
SELECT tippee, json_agg(t) AS transfers
FROM transfers t
WHERE "timestamp" > %(previous_ts_end)s
AND "timestamp" <= %(ts_end)s
AND context IN ('tip', 'take', 'final-gift')
AND status = 'succeeded'
AND NOT EXISTS (
SELECT 1
FROM notifications n
WHERE n.participant = tippee
AND n.event LIKE 'income~%%'
AND n.ts > %(ts_end)s
)
GROUP BY tippee
ORDER BY tippee
""", dict(previous_ts_end=previous_ts_end, ts_end=self.ts_end))
for tippee_id, transfers in r:
p = self.db.Participant.from_id(tippee_id)
if p.status != 'active' or not p.accepts_tips:
continue
for t in transfers:
t['amount'] = Money(**t['amount'])
by_team = {
k: (
MoneyBasket(t['amount'] for t in v).fuzzy_sum(p.main_currency),
len(set(t['tipper'] for t in v))
) for k, v in group_by(transfers, 'team').items()
}
total = sum((t[0] for t in by_team.values()), MoneyBasket())
nothing = (MoneyBasket(), 0)
personal, personal_npatrons = by_team.pop(None, nothing)
teams = p.get_teams()
team_ids = set(t.id for t in teams) | set(by_team.keys())
team_names = {t.id: t.name for t in teams}
get_username = lambda i: team_names.get(i) or self.db.one(
"SELECT username FROM participants WHERE id = %s", (i,)
)
by_team = {get_username(t_id): by_team.get(t_id, nothing) for t_id in team_ids}
p.notify(
'income~v2',
total=total.fuzzy_sum(p.main_currency),
personal=personal,
personal_npatrons=personal_npatrons,
by_team=by_team,
mangopay_balance=p.get_balances(),
)
n += 1
log("Sent %i income notifications." % n)
def generate_payment_account_required_notifications(self):
n = 0
participants = self.db.all("""
SELECT p
FROM participants p
WHERE p.payment_providers = 0
AND p.status = 'active'
AND p.kind IN ('individual', 'organization')
AND (p.goal IS NULL OR p.goal >= 0)
AND p.is_suspended IS NOT true
AND ( EXISTS (
SELECT 1
FROM current_tips t
JOIN participants tipper ON tipper.id = t.tipper
WHERE t.tippee = p.id
AND t.amount > 0
AND t.renewal_mode > 0
AND (t.paid_in_advance IS NULL OR t.paid_in_advance < t.amount)
AND tipper.email IS NOT NULL
AND tipper.is_suspended IS NOT true
AND tipper.status = 'active'
) OR EXISTS (
SELECT 1
FROM current_takes take
JOIN participants team ON team.id = take.team
WHERE take.member = p.id
AND take.amount <> 0
AND team.receiving > 0
) )
AND NOT EXISTS (
SELECT 1
FROM notifications n
WHERE n.participant = p.id
AND n.event = 'payment_account_required'
AND n.ts > (current_timestamp - interval '6 months')
)
""")
for p in participants:
p.notify('payment_account_required', force_email=True)
n += 1
log("Sent %i payment_account_required notifications." % n)
def compute_next_payday_date():
today = pando.utils.utcnow().date()
days_till_wednesday = (3 - today.isoweekday()) % 7
if days_till_wednesday == 0:
payday_is_already_done = website.db.one("""
SELECT count(*) > 0
FROM paydays
WHERE ts_start::date = %s
AND ts_end > ts_start
""", (today,))
if payday_is_already_done:
days_till_wednesday = 7
return today + timedelta(days=days_till_wednesday)
def create_payday_issue():
# Make sure today is payday
today = date.today()
today_weekday = today.isoweekday()
today_is_wednesday = today_weekday == 3
assert today_is_wednesday, today_weekday
# Fetch last payday from DB
last_payday = website.db.one("SELECT * FROM paydays ORDER BY id DESC LIMIT 1")
if last_payday:
last_start = last_payday.ts_start
if last_start is None or last_start.date() == today:
return
next_payday_id = str(last_payday.id + 1 if last_payday else 1)
# Prepare to make API requests
app_conf = website.app_conf
sess = requests.Session()
sess.auth = (app_conf.bot_github_username, app_conf.bot_github_token)
github = website.platforms.github
label, repo = app_conf.payday_label, app_conf.payday_repo
# Fetch the previous payday issue
path = '/repos/%s/issues' % repo
params = {'state': 'all', 'labels': label, 'per_page': 1}
r = github.api_get('', path, params=params, sess=sess).json()
last_issue = r[0] if r else None
# Create the next payday issue
next_issue = {'body': '', 'labels': [label]}
if last_issue:
last_issue_payday_id = str(int(last_issue['title'].split()[-1].lstrip('#')))
if last_issue_payday_id == next_payday_id:
return # already created
assert last_issue_payday_id == str(last_payday.id)
next_issue['title'] = last_issue['title'].replace(last_issue_payday_id, next_payday_id)
next_issue['body'] = last_issue['body']
else:
next_issue['title'] = "Payday #%s" % next_payday_id
next_issue = github.api_request('POST', '', '/repos/%s/issues' % repo, json=next_issue, sess=sess).json()
website.db.run("""
INSERT INTO paydays
(id, public_log, ts_start)
VALUES (%s, %s, NULL)
""", (next_payday_id, next_issue['html_url']))
def payday_preexec(): # pragma: no cover
# Tweak env
from os import environ
environ['CACHE_STATIC'] = 'no'
environ['CLEAN_ASSETS'] = 'no'
environ['RUN_CRON_JOBS'] = 'no'
environ['PYTHONPATH'] = website.project_root
# Write PID file
pid_file = open(website.env.log_dir+'/payday.pid', 'w')
pid_file.write(str(os.getpid()))
def exec_payday(log_file): # pragma: no cover
# Fork twice, like a traditional unix daemon
if os.fork():
return
if os.fork():
os.execlp('true', 'true')
# Fork again and exec
devnull = open(os.devnull)
Popen(
[sys.executable, '-u', 'liberapay/billing/payday.py'],
stdin=devnull, stdout=log_file, stderr=log_file,
close_fds=True, cwd=website.project_root, preexec_fn=payday_preexec,
)
os.execlp('true', 'true')
def main(override_payday_checks=False):
from liberapay.billing.transactions import sync_with_mangopay
from liberapay.main import website
from liberapay.payin import paypal
# https://github.com/liberapay/salon/issues/19#issuecomment-191230689
from liberapay.billing.payday import Payday
if not website.env.override_payday_checks and not override_payday_checks:
# Check that payday hasn't already been run this week
r = website.db.one("""
SELECT id
FROM paydays
WHERE ts_start >= now() - INTERVAL '6 days'
AND ts_end >= ts_start
AND stage IS NULL
""")
assert not r, "payday has already been run this week"
# Prevent a race condition, by acquiring a DB lock
with website.db.lock('payday', blocking=False):
try:
sync_with_mangopay(website.db)
paypal.sync_all_pending_payments(website.db)
Payday.start().run(website.env.log_dir, website.env.keep_payday_logs)
except KeyboardInterrupt: # pragma: no cover
pass
except Exception as e: # pragma: no cover
website.tell_sentry(e, {}, allow_reraise=False)
raise
if __name__ == '__main__': # pragma: no cover
main()
|
py | 1a4e699bf0280417ab011f2c55913e3cdc5a2dfd | # Generated by Django 3.0.3 on 2020-03-17 23:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('LibreBadge', '0015_auto_20200317_2335'),
]
operations = [
migrations.AlterField(
model_name='badgetemplate',
name='template',
field=models.FileField(upload_to='badgeTemplates/'),
),
]
|
py | 1a4e6a77642261737a839f340d02fd1e65052ec4 | from __future__ import annotations
import inspect
from pathlib import Path
import pytest
from _pytest.monkeypatch import MonkeyPatch
import platformdirs
from platformdirs.android import Android
def test_package_metadata() -> None:
assert hasattr(platformdirs, "__version__")
assert hasattr(platformdirs, "__version_info__")
def test_method_result_is_str(func: str) -> None:
method = getattr(platformdirs, func)
result = method()
assert isinstance(result, str)
def test_property_result_is_str(func: str) -> None:
dirs = platformdirs.PlatformDirs("MyApp", "MyCompany", version="1.0")
result = getattr(dirs, func)
assert isinstance(result, str)
def test_method_result_is_path(func_path: str) -> None:
method = getattr(platformdirs, func_path)
result = method()
assert isinstance(result, Path)
def test_property_result_is_path(func_path: str) -> None:
dirs = platformdirs.PlatformDirs("MyApp", "MyCompany", version="1.0")
result = getattr(dirs, func_path)
assert isinstance(result, Path)
def test_function_interface_is_in_sync(func: str) -> None:
function_dir = getattr(platformdirs, func)
function_path = getattr(platformdirs, func.replace("_dir", "_path"))
assert inspect.isfunction(function_dir)
assert inspect.isfunction(function_path)
function_dir_signature = inspect.Signature.from_callable(function_dir)
function_path_signature = inspect.Signature.from_callable(function_path)
assert function_dir_signature.parameters == function_path_signature.parameters
@pytest.mark.parametrize("root", ["A", "/system", None])
@pytest.mark.parametrize("data", ["D", "/data", None])
def test_android_active(monkeypatch: MonkeyPatch, root: str | None, data: str | None) -> None:
for env_var, value in {"ANDROID_DATA": data, "ANDROID_ROOT": root}.items():
if value is None:
monkeypatch.delenv(env_var, raising=False)
else:
monkeypatch.setenv(env_var, value)
expected = root == "/system" and data == "/data"
if expected:
assert platformdirs._set_platform_dir_class() is Android
else:
assert platformdirs._set_platform_dir_class() is not Android
|
py | 1a4e6c4fa8217bddd145a995f4359b6d7d30735a | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import IntegrityError, models, transaction
class Migration(DataMigration):
def forwards(self, orm):
db.commit_transaction()
try:
self._forwards(orm)
except Exception:
# Explicitly resume the transaction because
# South is going to try and roll it back, but when
# it can't find one, it'll error itself, masking
# the actual exception being raised
#
# See https://github.com/getsentry/sentry/issues/5035
db.start_transaction()
raise
db.start_transaction()
def _forwards(self, orm):
"Write your forwards methods here."
dupe_envs = orm.Environment.objects.values_list('name', 'organization_id')\
.annotate(ecount=models.Count('id'))\
.filter(ecount__gt=1)
for name, organization_id in dupe_envs:
envs = list(
orm.Environment.objects.filter(
name=name,
organization_id=organization_id,
).order_by('date_added')
)
to_env = envs[0]
from_envs = envs[1:]
try:
with transaction.atomic():
orm.EnvironmentProject.objects.filter(
environment__in=from_envs,
).update(environment=to_env)
except IntegrityError:
for ep in orm.EnvironmentProject.objects.filter(environment__in=from_envs):
try:
with transaction.atomic():
orm.EnvironmentProject.objects.filter(
id=ep.id,
).update(environment=to_env)
except IntegrityError:
ep.delete()
from_env_ids = [e.id for e in from_envs]
try:
with transaction.atomic():
orm.ReleaseEnvironment.objects.filter(
environment_id__in=from_env_ids,
).update(environment_id=to_env.id)
except IntegrityError:
for re in orm.ReleaseEnvironment.objects.filter(environment_id__in=from_env_ids):
try:
with transaction.atomic():
orm.ReleaseEnvironment.objects.filter(
id=re.id,
).update(environment_id=to_env.id)
except IntegrityError:
re.delete()
orm.Environment.objects.filter(id__in=from_env_ids).delete()
dupe_release_envs = orm.ReleaseEnvironment.objects.values(
'release_id', 'organization_id', 'environment_id'
).annotate(recount=models.Count('id')).filter(recount__gt=1)
for renv in dupe_release_envs:
release_id = renv['release_id']
organization_id = renv['organization_id']
environment_id = renv['environment_id']
renvs = list(
orm.ReleaseEnvironment.objects.filter(
release_id=release_id,
organization_id=organization_id,
environment_id=environment_id,
).order_by('first_seen')
)
to_renv = renvs[0]
from_renvs = renvs[1:]
last_seen = max([re.last_seen for re in renvs])
orm.ReleaseEnvironment.objects.filter(
id=to_renv.id,
).update(last_seen=last_seen)
orm.ReleaseEnvironment.objects.filter(
id__in=[re.id for re in from_renvs],
).delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 3, 2, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'through': "orm['sentry.EnvironmentProject']",
'symmetrical': 'False'
}
)
},
'sentry.environmentproject': {
'Meta': {
'unique_together': "(('project', 'environment'),)",
'object_name': 'EnvironmentProject'
},
'environment': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Environment']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {
'unique_together': "(('raw_event', 'processing_issue'),)",
'object_name': 'EventProcessingIssue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'processing_issue': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ProcessingIssue']"
}
),
'raw_event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.RawEvent']"
}
)
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupcommitresolution': {
'Meta': {
'unique_together': "(('group_id', 'commit_id'),)",
'object_name': 'GroupCommitResolution'
},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationavatar': {
'Meta': {
'object_name': 'OrganizationAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.Organization']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {
'unique_together': "(('project', 'checksum', 'type'),)",
'object_name': 'ProcessingIssue'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'db_index': 'True'
}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '30'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'flags':
('django.db.models.fields.BigIntegerField', [], {
'default': '0',
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'RawEvent'
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.release': {
'Meta': {
'unique_together': "(('organization', 'version'),)",
'object_name': 'Release'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.reprocessingreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'ReprocessingReport'
},
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'xSM70zG7MyRUVIUcaNBY2CyvizXoGfhQ'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
symmetrical = True
|
py | 1a4e6c648f7edea76388d86e6d981a5a84b3f6d6 | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.filtering_policy_proto
class O365BackupEnvParams(object):
"""Implementation of the 'O365BackupEnvParams' model.
TODO: type model description here.
Attributes:
filtering_policy (FilteringPolicyProto): Proto to encapsulate the
filtering policy for backup objects like files or directories. If
an object is not matched by any of the 'allow_filters', it will be
excluded in the backup. If an object is matched by one of the
'deny_filters', it will always be excluded in the backup.
Basically 'deny_filters' overwrite 'allow_filters' if they both
match the same object. Currently we only support two kinds of
filter: prefix which always starts with '/', or postfix which
always starts with '*' (cannot be "*" only). We don't support
regular expression right now. A concrete example is: Allow
filters: "/" Deny filters: "/tmp", "*.mp4" Using such a policy
will include everything under the root directory except the /tmp
directory and all the mp4 files.
"""
# Create a mapping from Model property names to API property names
_names = {
"filtering_policy":'filteringPolicy'
}
def __init__(self,
filtering_policy=None):
"""Constructor for the O365BackupEnvParams class"""
# Initialize members of the class
self.filtering_policy = filtering_policy
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
filtering_policy = cohesity_management_sdk.models.filtering_policy_proto.FilteringPolicyProto.from_dictionary(dictionary.get('filteringPolicy')) if dictionary.get('filteringPolicy') else None
# Return an object of this model
return cls(filtering_policy)
|
py | 1a4e6ce5efc802a7279d7bad7db4944bd8510f5f | def string_to_array(s):
return [i for i in s.split()] if len(s) >= 1 else [""] |
py | 1a4e6d9d0768e713fd8c07f1c6ef22887033f7ae | '''The vtk_anim module define a set of classes to generate 3d
animations with vtk in the form of a series of png images.
'''
import vtk
import os
import numpy as np
from pymicro.view.vtk_utils import set_opacity
class vtkAnimationScene:
def __init__(self, ren, ren_size=(600, 600)):
self.timer_count = 0
self.timer_incr = 1
self.timer_end = -1 # run until 'q' is pressed
self.save_image = False
self.prefix = 'prefix'
self.verbose = False
self.anims = []
# Create a window for the renderer
self.renWin = vtk.vtkRenderWindow()
self.renWin.AddRenderer(ren)
self.renWin.SetSize(ren_size)
# Start the initialization and rendering
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.renWin)
self.renWin.Render()
self.iren.Initialize()
def add_animation(self, anim):
anim.scene = self
anim.save_image = self.save_image
anim.prefix = self.prefix
anim.verbose = self.verbose
self.anims.append(anim)
self.iren.AddObserver('TimerEvent', anim.execute)
def write_image(self):
# capture the display and write a png image
w2i = vtk.vtkWindowToImageFilter()
w2i.SetInput(self.iren.GetRenderWindow())
# the next two lines fix some opacity problems but slow things down...
# self.renWin.Render()
# self.iren.Render()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(w2i.GetOutputPort())
file_name = os.path.join(self.prefix, '%s_%03d.png' % (self.prefix, self.timer_count))
writer.SetFileName(file_name)
writer.Write()
def execute(self, iren, event):
self.timer_count += self.timer_incr
if self.verbose:
print('animation scene timer_count=', self.timer_count)
if self.timer_end > 0 and self.timer_count > self.timer_end:
print('end of animation loop, exiting...')
self.iren.ExitCallback()
else:
self.iren.Render()
if self.save_image:
self.write_image()
def render(self):
if self.save_image and not os.path.exists(self.prefix):
os.mkdir(self.prefix) # create a folder to store the images
timerId = self.iren.CreateRepeatingTimer(100) # time in ms
self.iren.AddObserver('TimerEvent', self.execute)
self.iren.Start()
def render_at(self, time=0.):
if self.save_image and not os.path.exists(self.prefix):
os.mkdir(self.prefix) # create a folder to store the images
self.timer_count = time
self.iren.CreateOneShotTimer(100) # time in ms
self.iren.AddObserver('TimerEvent', self.execute)
self.iren.Start()
'''
Abstract class for all vtk animation stuff.
'''
class vtkAnimation:
def __init__(self, t, duration=10):
self.scene = None
self.time_anim_starts = t
self.time_anim_ends = t + duration
self.verbose = False
def pre_execute(self):
if self.verbose:
print(self.__repr__())
if self.scene.timer_count < self.time_anim_starts or self.scene.timer_count > self.time_anim_ends:
return 0
else:
return 1
def post_execute(self, iren, event):
pass
def __repr__(self):
out = [self.__class__.__name__,
' timer: ' + str(self.scene.timer_count),
' anim starts at: ' + str(self.time_anim_starts),
' anim ends at: ' + str(self.time_anim_ends)]
return '\n'.join(out)
class vtkAnimCameraAroundZ(vtkAnimation):
'''
Animate the camera around the vertical axis.
This class can be used to generate a series of images (default 36)
while the camera rotate around the vertical axis (defined by the
camera SetViewUp method).
'''
def __init__(self, t, cam, turn=360):
'''Initialize the animation.
The animation perform a full turn in 36 frames by default.
'''
print('init vtkAnimCameraAroundZ')
vtkAnimation.__init__(self, t)
self.turn = turn
self.time_anim_ends = t + abs(self.turn) / 10
print('time_anim_starts', self.time_anim_starts)
print('time_anim_ends', self.time_anim_ends)
print('turn', self.turn)
self.camera = cam
def execute(self, iren, event):
'''Execute method called to rotate the camera.'''
do = vtkAnimation.pre_execute(self)
if not do: return
t1 = self.time_anim_starts
t2 = self.time_anim_ends
r = self.turn / (t2 - t1)
if self.scene.verbose:
print('moving azimuth by', r)
self.camera.Azimuth(r)
vtkAnimation.post_execute(self, iren, event)
class vtkRotateActorAroundAxis(vtkAnimation):
def __init__(self, t=0, duration=10, axis=(0., 0., 1.), angle=360):
vtkAnimation.__init__(self, t, duration)
self.actor = None
self.axis = axis
self.angle = angle
def set_actor(self, actor):
self.actor = actor
# keep track of the initial user transform matrix
transform = actor.GetUserTransform()
if not transform:
transform = vtk.vtkTransform()
transform.Identity()
actor.SetUserTransform(transform)
#self.user_transform_matrix = actor.GetUserTransform().GetMatrix()
def execute(self, iren, event):
'''instruction block executed when a TimerEvent is captured by the vtkRotateActorAroundAxis.
If the time is not in [start, end] nothing is done. Otherwise the
transform matrix corresponding to the 3D rotation is applied to the actor.
'''
do = vtkAnimation.pre_execute(self)
if not do:
return
t1 = self.time_anim_starts
t2 = self.time_anim_ends
angle = (self.scene.timer_count - t1) / float(t2 - t1) * self.angle
from pymicro.crystal.microstructure import Orientation
om = Orientation.Axis2OrientationMatrix(self.axis, angle)
m = vtk.vtkMatrix4x4() # row major order, 16 elements matrix
m.Identity()
for j in range(3):
for i in range(3):
m.SetElement(j, i, om[i, j])
t = vtk.vtkTransform()
#t.SetMatrix(self.user_transform_matrix)
t.SetMatrix(self.actor.GetUserTransform().GetMatrix())
t.Concatenate(m)
self.actor.SetUserTransform(t)
vtkAnimation.post_execute(self, iren, event)
class vtkRotateActorAroundZAxis(vtkRotateActorAroundAxis):
def __init__(self, t=0):
vtkRotateActorAroundAxis.__init__(self, t, duration=360, axis=(0., 0., 1.), angle=360)
class vtkAnimCameraToZ(vtkAnimation):
def __init__(self, t, cam):
vtkAnimation.__init__(self, t)
self.camera = cam
def execute(self, iren, event):
do = vtkAnimation.pre_execute(self)
if not do: return
t1 = self.time_anim_starts
t2 = self.time_anim_ends
angle = 90 - (t2 - self.scene.timer_count) / float(t2 - t1) * (90 - 15)
if self.verbose:
print(self.scene.timer_count, self.camera.GetPosition(), angle)
self.camera.SetPosition(0, -2 * np.cos(angle * np.pi / 180.), 2 * np.sin(angle * np.pi / 180.))
vtkAnimation.post_execute(self, iren, event)
class vtkZoom(vtkAnimation):
def __init__(self, t, cam, zoom):
vtkAnimation.__init__(self, t)
self.camera = cam
self.zoom = zoom
self.timer_end = t + 10
def execute(self, iren, event):
do = vtkAnimation.pre_execute(self)
if not do: return
t1 = self.time_anim_starts
t2 = self.time_anim_ends
z = 1 + (self.zoom - 1) * (self.scene.timer_count - t1) / float(t2 - t1)
if self.verbose:
print('zooming to', z)
self.camera.Zoom(z)
vtkAnimation.post_execute(self, iren, event)
class vtkSetVisibility(vtkAnimation):
def __init__(self, t, actor, visible=1, max_opacity=1, gradually=False):
vtkAnimation.__init__(self, t)
self.actor = actor
self.visible = visible
self.gradually = gradually
self.max_opacity = max_opacity
def execute(self, iren, event):
do = vtkAnimation.pre_execute(self)
if not do: return
if not self.gradually:
self.actor.SetVisibility(self.visible)
set_opacity(self.actor, 1)
else:
t1 = self.time_anim_starts
t2 = self.time_anim_ends
if self.scene.timer_count >= t1 and self.scene.timer_count <= t2: # useless to test this (do == 1 here)
if self.actor.GetVisibility() == 0:
self.actor.SetVisibility(1) # make the actor visible
if self.visible:
opacity = self.max_opacity * (1 - (t2 - self.scene.timer_count) / float(t2 - t1))
else:
opacity = self.max_opacity * (t2 - self.scene.timer_count) / float(t2 - t1)
if self.verbose:
print('opacity=', opacity)
# change the opacity for each actor in the assembly
set_opacity(self.actor, opacity)
vtkAnimation.post_execute(self, iren, event)
class vtkMoveActor(vtkAnimation):
def __init__(self, t, actor, motion):
vtkAnimation.__init__(self, t)
self.actor = actor
if self.actor.GetUserTransform() == None:
if self.verbose:
print('setting initial 4x4 matrix')
t = vtk.vtkTransform()
t.Identity()
self.actor.SetUserTransform(t)
self.motion = np.array(motion).astype(float)
def execute(self, iren, event):
do = vtkAnimation.pre_execute(self)
if not do: return
t1 = self.time_anim_starts
t2 = self.time_anim_ends
d = self.motion / (t2 - t1)
if self.verbose:
print('will move actor by', d)
self.actor.GetUserTransform().Translate(d)
vtkAnimation.post_execute(self, iren, event)
class vtkAnimLine(vtkAnimation):
def __init__(self, points, t1, t2):
vtkAnimation.__init__(self, t1)
self.time_anim_line_end = t2
self.line_points = points
self.p0 = np.array(self.line_points.GetPoint(0))
self.p1 = np.array(self.line_points.GetPoint(1))
self.grid = None
self.actor = None
self.pole = None
def execute(self, iren, event):
do = vtkAnimation.pre_execute(self)
if not do: return
t1 = self.time_anim_starts
t2 = self.time_anim_ends
# if self.scene.timer_count >= t1 and self.scene.timer_count <= t2:
self.actor.SetVisibility(1)
point = self.p1 + (t2 - self.scene.timer_count) / float(t2 - t1) * (self.p0 - self.p1)
self.line_points.SetPoint(1, point)
if point[2] <= 0 and self.pole != None:
self.pole.SetVisibility(1)
self.grid.Modified()
vtkAnimation.post_execute(self, iren, event)
class vtkUpdateText(vtkAnimation):
def __init__(self, text_actor, str_method, t=0, duration=10):
vtkAnimation.__init__(self, t, duration)
self.actor = text_actor
self.str_method = str_method
def execute(self, iren, event):
do = vtkAnimation.pre_execute(self)
if not do:
return
t1 = self.time_anim_starts
t2 = self.time_anim_ends
updated_text = self.str_method() #self.scene.timer_count, t1, t2)
self.actor.GetMapper().SetInput(updated_text)
vtkAnimation.post_execute(self, iren, event)
if __name__ == '__main__':
cam = vtk.vtkCamera()
anim = vtkAnimCameraAroundZ(cam, 10)
anim.verbose = True
|
py | 1a4e6e772baae00519efe34dbbbc4312df79e797 | #!/usr/bin/python3
try:
import os, sys, requests
import argparse, json
import datetime as dt
import configparser
from elasticsearch import Elasticsearch
from github import Github
from string import ascii_letters
print("All libraries/modules loaded as expected !!!!! ")
except Exception as err:
print("Missing Modules =====> %s" %err)
print("Kindly installed using pip3 install <pip-package-name>")
sys.exit(1)
parser=argparse.ArgumentParser(prog='Github_commit_indexer',
epilog=''' NOTE: This script basically pull commits from a public github repo,
then index each commit before storing them into elastic search deployment clould server
''')
parser.add_argument('--GithubUser', nargs='?', default='RockstarLang', help= 'Github user account')
parser.add_argument('--repo', nargs='?', default='rockstar', help= 'Github repo')
if len(sys.argv)==1:
parser.print_help(sys.stderr)
#sys.exit(1)
args=parser.parse_args()
def to_stderr(msg):
print(msg, file=sys.stderr, flush=True)
def error(msg):
to_stderr('ERROR: ' + msg)
sys.exit(1)
def datetime_formater(unformated_datetime):
'''
This function basically convert daytime to human readable format
'''
date_time = unformated_datetime.split("T")
date = date_time[0].split("-")
time = date_time[1].rstrip(ascii_letters).split(":")
formated_datetime = dt.datetime(int(date[0]), int(date[1]), int(date[2]), int(time[0]), int(time[1]), int(time[2]))
return formated_datetime.strftime("%d-%b-%Y %H:%M:%S")
def Elastic_Search(elk_object, commit_document,indx):
try:
'''
Ingesting commit history document to ElasticServer deployment
'''
#elk_object.indices.create(index = indx, ignore=400)
ingest_status = elk_object.index(index=indx, body=commit_document)
if ingest_status["result"] != "created" and int(ingest_status["_shards"]["failed"]) == 1:
print(json.dumps(commit_document, indent = 2))
error("Ingesting to ElasticServer deployment failed for last committed indexed document \n ")
elk_object.indices.refresh(index = indx)
except Exception as err:
error(str(err))
def commit_info(api_commit_url, ElasticSearch):
'''
This function basically pull out needed info to be ingested as index documents for cloud elastic search
'''
global document
global count
try:
commit_data = requests.get(api_commit_url).json()
document.setdefault("Date", datetime_formater(commit_data["commit"]["author"]["date"]))
document.setdefault("Username", commit_data["author"]["login"])
document.setdefault("Message", commit_data["commit"]["message"].replace("\n\n", " "))
Elastic_Search(ElasticSearch, document, document["Username"])
print(json.dumps(document, indent = 2))
print("indexed document ingested into clould deployment successfully !!!!!")
print("\n\n")
document = {}
except Exception as err:
print("\n\n")
error(str("%s: %s" %(err,commit_data["message"])))
if __name__ == '__main__':
try:
document = {}
'''
Parse login credential for Github and ElasticSearch
'''
login_config_parse = configparser.ConfigParser()
login_config_parse.read('login_credential.ini')
# verify that Elastic login_credential.ini file exist
if not os.path.isfile("login_credential.ini"):
print('\n\n### Kindly create a basic authentication file named "login_credential.ini"')
print("[ELASTIC]")
print('cloud_id = "DEPLOYMENT_NAME:CLOUD_ID_DETAILS" ')
print('user = Username' )
print('Password = Password \n\n\n')
print('[GITHUB]')
print('login_or_token = Github Person Access Token')
sys.exit(1)
'''
Connect to Github repo
kindly note that unauthenticated API calls are rate limited to 60 requests/hour
'''
GH = Github(login_or_token=login_config_parse['GITHUB']['login_or_token'])
github_object = GH.get_user(args.GithubUser)
GH_repo = github_object.get_repo(args.repo)
'''
Connect to elastic search cloud deployment using cloud_id & http_auth method
'''
ES = Elasticsearch(
cloud_id = login_config_parse['ELASTIC']['cloud_id'],
http_auth = (login_config_parse['ELASTIC']['user'], login_config_parse['ELASTIC']['password'])
)
#print(json.dumps(ES.info(), indent = 2))
'''
Verify successfull communication with ElasticSearch Deployment
'''
if ES.ping() is not True:
print("Kindly verify your deployment status/login credential, refers to the below official ElasticSearch documentation on basic authentication")
print("https://www.elastic.co/guide/en/cloud/current/ec-getting-started-python.html")
'''
Note:- Wont scale nicely based on github API rate-limiting with limited number of request/hour
'''
commit = GH_repo.get_commits()
count = 0
'''
This loop over commit SHA paginated list, then parse each commit hash signed with SHA to repos commit API url
'''
for commit_hash in commit:
commit_sha = str(commit_hash).split('"')[1]
commit_url = GH_repo.commits_url.split("{/sha}")[0]+"/{}".format(commit_sha)
commit_info(commit_url, ES)
count+=1
print("Process now completed!!!!!!")
except Exception as err:
error(str(err))
|
py | 1a4e6ee938c3a5a76020aa892f2a83053f557738 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
class VGG_enc(nn.Module):
def __init__(self, input_channels=6):
super(VGG_enc, self).__init__()
in_channels = input_channels
self.c11 = nn.Conv2d(in_channels, 64, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(64)
self.c12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(64)
self.p1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c21 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(128)
self.c22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(128)
self.p2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c31 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(256)
self.c32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(256)
self.c33 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(256)
self.p3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c41 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(512)
self.c42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(512)
self.c43 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(512)
self.p4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51 = nn.BatchNorm2d(512)
self.c52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52 = nn.BatchNorm2d(512)
self.c53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53 = nn.BatchNorm2d(512)
def forward(self, x):
o11 = F.relu(self.bn11(self.c11(x)), inplace=True)
o12 = F.relu(self.bn12(self.c12(o11)), inplace=True)
o1p = self.p1(o12)
o21 = F.relu(self.bn21(self.c21(o1p)), inplace=True)
o22 = F.relu(self.bn22(self.c22(o21)), inplace=True)
o2p = self.p2(o22)
o31 = F.relu(self.bn31(self.c31(o2p)), inplace=True)
o32 = F.relu(self.bn32(self.c32(o31)), inplace=True)
o33 = F.relu(self.bn33(self.c33(o32)), inplace=True)
o3p = self.p3(o33)
o41 = F.relu(self.bn41(self.c41(o3p)), inplace=True)
o42 = F.relu(self.bn42(self.c42(o41)), inplace=True)
o43 = F.relu(self.bn43(self.c43(o42)), inplace=True)
o4p = self.p4(o43)
o51 = F.relu(self.bn51(self.c51(o4p)), inplace=True)
o52 = F.relu(self.bn52(self.c52(o51)), inplace=True)
o53 = F.relu(self.bn53(self.c53(o52)), inplace=True)
return o53, o43, o33
class VGG_dec(nn.Module):
def __init__(self):
super(VGG_dec, self).__init__()
out_channels = 6
self.c53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53 = nn.BatchNorm2d(512)
self.c52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52 = nn.BatchNorm2d(512)
self.c51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51 = nn.BatchNorm2d(512)
self.u5 = nn.Upsample(scale_factor=2, mode='nearest')
self.c43 = nn.Conv2d(1024, 512, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(512)
self.c42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(512)
self.c41 = nn.Conv2d(512, 256, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(256)
self.u4 = nn.Upsample(scale_factor=2, mode='nearest')
self.c33 = nn.Conv2d(512, 256, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(256)
self.c32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(256)
self.c31 = nn.Conv2d(256, 128, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(128)
self.u3 = nn.Upsample(scale_factor=2, mode='nearest')
self.c22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(128)
self.c21 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(64)
self.u2 = nn.Upsample(scale_factor=2, mode='nearest')
self.c12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(64)
#self.c11 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
#self.bn11 = nn.BatchNorm2d(64)
def forward(self, i53, i43, i33):
o53 = F.relu(self.bn53(self.c53(i53)), inplace=True)
o52 = F.relu(self.bn52(self.c52(o53)), inplace=True)
o51 = F.relu(self.bn51(self.c51(o52)), inplace=True)
o5u = self.u5(o51)
o5c = torch.cat((o5u, i43), 1)
o43 = F.relu(self.bn43(self.c43(o5c)), inplace=True)
o42 = F.relu(self.bn42(self.c42(o43)), inplace=True)
o41 = F.relu(self.bn41(self.c41(o42)), inplace=True)
o4u = self.u4(o41)
o4c = torch.cat((o4u, i33), 1)
o33 = F.relu(self.bn33(self.c33(o4c)), inplace=True)
o32 = F.relu(self.bn32(self.c32(o33)), inplace=True)
o31 = F.relu(self.bn31(self.c31(o32)), inplace=True)
o3u = self.u3(o31)
o22 = F.relu(self.bn22(self.c22(o3u)), inplace=True)
o21 = F.relu(self.bn21(self.c21(o22)), inplace=True)
o2u = self.u2(o21)
o12 = F.relu(self.bn12(self.c12(o2u)), inplace=True)
#o11 = F.relu(self.bn11(self.c11(o12)), inplace=True)
return o12
class VGG_net(nn.Module):
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512]
def __init__(self, input_channels):
super(VGG_net, self).__init__()
self.enc_net = VGG_enc(input_channels)
self.dec_net = VGG_dec()
self.conv_warp = nn.Conv2d(self.cfg[0], 2, kernel_size=3, padding=1)
self.conv_mask = nn.Conv2d(self.cfg[0], 1, kernel_size=3, padding=1)
self.conv_comp = nn.Conv2d(self.cfg[0], 3, kernel_size=3, padding=1)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
# input: Nx3x3x256x320
def forward(self, x):
dec_feat = self.dec_net(*self.enc_net(x))
flow = self.conv_warp(dec_feat)
mask = self.conv_mask(dec_feat)
comp = self.conv_comp(dec_feat)
return flow, mask, comp
def VGG_Warper(input_channels = 6):
return VGG_net(input_channels)
|
py | 1a4e6f28ef2896ca97a5f8591425a344d142507a |
import numpy as np
class Note(object):
def __init__(self,name,ratio):
self.__name = name
self.__ratio = ratio
self.__cent = 1200*np.log(self.__ratio)/np.log(2)
def setName(self,name):
self.__name = name
def getName(self):
return self.__name
def setRatio(self,ratio):
self.ratio = ratio
self.__cent = 1200*np.log(self.__ratio)/np.log(2)
def getRatio(self):
return self.__ratio
def getCent(self):
return self.__cent
name = property (getName,setName)
ratio = property (getRatio,setRatio)
cent = property (getCent)
|
py | 1a4e6fb4c8dd8caf598b67f199f0a1c40665f3a5 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
from tensorflow.python.training import training as training_module
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_functional_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_with_tf_optimizer(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model.model._make_train_function()
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_lambda_numpy_array_arguments(self):
if h5py is None:
return # Skip test if models cannot be saved.
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
class TestSequential(test.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
def test_basic_methods(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
model.add(keras.layers.Dropout(0.3, name='dp'))
model.add(keras.layers.Dense(2, kernel_regularizer='l2',
kernel_constraint='max_norm'))
model.build()
self.assertEqual(model.state_updates, model.model.state_updates)
self.assertEqual(model.get_layer(name='dp').name, 'dp')
def test_sequential_pop(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.compile(loss='mse', optimizer='sgd')
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertEqual(model.output_shape, (None, num_hidden))
model.compile(loss='mse', optimizer='sgd')
y = np.random.random((batch_size, num_hidden))
model.fit(x, y, epochs=1)
# Test popping single-layer model
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.pop()
self.assertEqual(len(model.layers), 0)
self.assertEqual(len(model.outputs), 0)
# Invalid use case
model = keras.models.Sequential()
with self.assertRaises(TypeError):
model.pop()
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
def test_invalid_use_cases(self):
with self.test_session():
# Added objects must be layer instances
with self.assertRaises(TypeError):
model = keras.models.Sequential()
model.add(None)
# Added layers must have an inputs shape
with self.assertRaises(ValueError):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1))
# Added layers cannot have multiple outputs
class MyLayer(keras.layers.Layer):
def call(self, inputs):
return [3 * inputs, 2 * inputs]
def _compute_output_shape(self, input_shape):
return [input_shape, input_shape]
with self.assertRaises(ValueError):
model = keras.models.Sequential()
model.add(MyLayer(input_shape=(3,)))
with self.assertRaises(TypeError):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=1))
model.add(MyLayer())
# Building empty model
model = keras.models.Sequential()
with self.assertRaises(TypeError):
model.build()
class TestModelCloning(test.TestCase):
def test_clone_sequential_model(self):
with self.test_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(4))
# Everything should work in a new session.
keras.backend.clear_session()
with self.test_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
# On top of new tensor
input_a = keras.Input(shape=(4,))
new_model = keras.models.clone_model(
model, input_tensors=input_a)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
# On top of new, non-Keras tensor
input_a = keras.backend.variable(val_a)
new_model = keras.models.clone_model(
model, input_tensors=input_a)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
def test_clone_functional_model(self):
with self.test_session():
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
# Everything should work in a new session.
keras.backend.clear_session()
with self.test_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
input_a = keras.Input(shape=(4,), name='a')
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new, non-Keras tensors
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
def test_model_cloning_invalid_use_cases(self):
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(4, input_shape=(4,)))
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
fn_model = keras.models.Model(x, y)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(seq_model)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(None)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(fn_model)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=[x, x])
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=y)
if __name__ == '__main__':
test.main()
|
py | 1a4e6fe80931a5b3f1c32cdc077388c23e06e5a7 | class TimeoutException(Exception):
"""
Raised when max timeout reached while waiting for a lock to be acquired
"""
|
py | 1a4e70edd872fb96534e81968f6f2b16139e67f4 | import boto3
import copy
import hashlib
import logging
import json
import time
import typing
import uuid
from bert import \
encoders as bert_encoders, \
datasource as bert_datasource, \
constants as bert_constants
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
PWN = typing.TypeVar('PWN')
DELAY: int = 15
class QueueItem:
__slots__ = ('_payload', '_identity')
_payload: typing.Dict[str, typing.Any]
_identity: str
def __init__(self: PWN, payload: typing.Dict[str, typing.Any], identity: str = None) -> None:
self._payload = payload
self._identity = identity
def calc_identity(self: PWN) -> str:
if self._identity:
return self._identity
combined: str = ''.join(bert_encoders.encode_identity_object(self._payload))
combined: str = f'{combined}-{uuid.uuid4()}'
return hashlib.sha256(combined.encode(bert_constants.ENCODING)).hexdigest()
def keys(self: PWN) -> typing.Any:
return super(QueueItem, self).keys()
def get(self: PWN, name: str, default: typing.Any = None) -> typing.Any:
return self._payload.get(name, default)
def clone(self: PWN) -> typing.Any:
return self.__class__(copy.deepcopy(self._payload))
def __getitem__(self: PWN, name: str) -> typing.Any:
try:
return self._payload[name]
except KeyError:
raise KeyError(f'key-name[{name}] not found')
def __setitem__(self: PWN, name: str, value: typing.Any) -> None:
self._payload[name] = value
def __delitem__(self: PWN, name: str) -> None:
try:
del self._payload[name]
except KeyError:
raise KeyError(f'key-name[{name}] not found')
class BaseQueue:
_table_name: str
_value: QueueItem
def __init__(self: PWN, table_name: str) -> None:
self._table_name = table_name
self._value = None
def __next__(self) -> typing.Any:
if not self._value is None:
logger.debug('Destroying Value')
self._destroy(self._value)
self._value = None
self._value = self.get()
if self._value is None or self._value == 'STOP':
raise StopIteration
return self._value
def get(self: PWN) -> QueueItem:
raise NotImplementedError
def put(self: PWN, value: typing.Union[typing.Dict[str, typing.Any], QueueItem]) -> None:
raise NotImplementedError
def __iter__(self) -> PWN:
return self
def _destroy(self: PWN, queue_item: QueueItem) -> None:
raise NotImplementedError
def size(self: PWN) -> str:
raise NotImplementedError
class DynamodbQueue(BaseQueue):
_dynamodb_client: 'boto3.client("dynamodb")'
def __init__(self: PWN, table_name: str) -> None:
super(DynamodbQueue, self).__init__(table_name)
self._dynamodb_client = boto3.client('dynamodb')
def _destroy(self: PWN, queue_item: QueueItem, confirm_delete: bool = False) -> None:
if confirm_delete:
self._dynamodb_client.delete_item(
TableName=self._table_name,
Key={'identity': {'S': queue_item.calc_identity()}},
Expected={'identity': {'Exists': True, 'Value': value['identity']}})
else:
self._dynamodb_client.delete_item(
TableName=self._table_name,
Key={'identity': {'S': queue_item.calc_identity()}})
def put(self: PWN, value: typing.Union[typing.Dict[str, typing.Any], QueueItem]) -> None:
if isinstance(value, dict):
queue_item = QueueItem(value)
elif isinstance(value, QueueItem):
queue_item = value
else:
raise NotImplementedError
encoded_value = bert_encoders.encode_object({
'identity': queue_item.calc_identity(),
'datum': queue_item.clone(),
})
self._dynamodb_client.put_item(TableName=self._table_name, Item=encoded_value)
def get(self: PWN) -> typing.Dict[str, typing.Any]:
try:
value: typing.Any = self._dynamodb_client.scan(TableName=self._table_name, Select='ALL_ATTRIBUTES', Limit=1)['Items'][0]
except IndexError:
return None
else:
queue_item = QueueItem(bert_encoders.decode_object(value['datum']), value['identity']['S'])
if value['identity']['S'] in ['sns-entry', 'invoke-arg', 'api-gateway', 'cognito']:
return queue_item
# The order of data when coming out of the database maynot be preserved, resulting in a different identity
# assert queue_item.calc_identity() == value['identity']['S'], f'{queue_item.calc_identity()} != {value["identity"]["S"]}'
return queue_item
class RedisQueue(BaseQueue):
_table_name: str
_redis_client: 'redis-client'
def __init__(self, table_name: str) -> None:
super(RedisQueue, self).__init__(table_name)
self._redis_client = bert_datasource.RedisConnection.ParseURL(bert_constants.REDIS_URL).client()
self._redis_client_async = None
def flushdb(self) -> None:
self._redis_client.flushdb()
def _destroy(self: PWN, queue_item: QueueItem) -> None:
pass
def size(self: PWN) -> int:
return int(self._redis_client.llen(self._table_name))
async def _resolve_connection(self: PWN) -> None:
if self._redis_client_async is None:
self._redis_client_async = await bert_datasource.RedisConnection.ParseURL(bert_constants.REDIS_URL).client_async()
return self._redis_client_async
async def size_async(self: PWN) -> int:
await self._resolve_connection()
return int(await self._redis_client_async.execute('llen', self._table_name))
def get(self) -> QueueItem:
try:
value: str = self._redis_client.lpop(self._table_name).decode(bert_constants.ENCODING)
except AttributeError:
return 'STOP'
else:
# if self._cache_backend.has(value):
# return self._cache_backend.obtain(value)
return bert_encoders.decode_object(json.loads(value)['datum'])
async def get_async(self: PWN, prefetch: int = 1) -> typing.List[QueueItem]:
await self._resolve_connection()
list_len = await self._redis_client_async.execute('llen', self._table_name)
batch = await self._redis_client_async.execute('lrange', self._table_name, 0, prefetch - 1)
if batch:
await self._redis_client_async.execute('ltrim', self._table_name, len(batch), list_len)
return [bert_encoders.decode_object(json.loads(value.decode(bert_constants.ENCODING))['datum']) for value in batch]
return []
def put(self: PWN, value: typing.Dict[str, typing.Any]) -> None:
encoded_value = json.dumps(bert_encoders.encode_object({
'identity': 'local-queue',
'datum': value
})).encode(bert_constants.ENCODING)
# self._cache_backend.store(encoded_value)
self._redis_client.rpush(self._table_name, encoded_value)
async def put_async(self: PWN, values: typing.List[typing.Dict[str, typing.Any]]) -> None:
await self._resolve_connection()
encoded_values = [json.dumps(bert_encoders.encode_object({
'identity': 'local-queue',
'datum': value,
})).encode(bert_constants.ENCODING) for value in values]
await self._redis_client_async.execute('rpush', self._table_name, *encoded_values)
class StreamingQueue(DynamodbQueue):
"""
When deploying functions to AWS Lambda, auto-invocation is available as an option to run the functions. With StreamingQueue, we want to push local objects into
the available API already utilized. We also want to keep the available `put` function so that the `done_queue` api will still push contents into the next `work_queue`.
We'll also argment the local `get` function api and only pull from records local to the stream and not pull from dynamodb.
"""
# Share the memory across invocations, within the same process/thread. This allows for
# comm_binders to be called multipule-times and still pull from the same queue
_queue: typing.List[typing.Dict[str, typing.Any]] = []
def local_put(self: PWN, record: typing.Union[typing.Dict[str, typing.Any], QueueItem]) -> None:
if isinstance(record, dict):
queue_item = QueueItem(bert_encoders.decode_object(record['datum']), record['identity']['S'])
elif isinstance(record, QueueItem):
queue_item = record
self._queue.append(queue_item)
def get(self: PWN) -> QueueItem:
try:
value: QueueItem = self._queue.pop(0)
except IndexError:
# return super(StreamingQueue, self).get()
return None
else:
return value
class LocalQueue(DynamodbQueue):
"""
When testing, its convenient to use only a LocalQueue
"""
_key: str = None
# Share the memory across invocations, within the same process/thread. This allows for
# comm_binders to be called multipule-times and still pull from the same queue
_queue: typing.List[typing.Dict[str, typing.Any]] = []
def __init__(self: PWN, key: str) -> None:
self._key = key
self._value = None
def local_put(self: PWN, record: typing.Dict[str, typing.Any]) -> None:
self._queue.append(copy.deepcopy(record))
def put(self: PWN, record: typing.Dict[str, typing.Any]) -> None:
logger.info(f'LocalQueue Put[{record}]')
def get(self: PWN) -> typing.Dict[str, typing.Any]:
try:
# may need to unpack because local queues are used for debugging in AWS Lambda
value: typing.Any = self._queue.pop(0)
except IndexError:
return None
else:
return value
|
py | 1a4e7134a9ae788d62efdba3c239ba7ad3b15d50 | import numpy as np
from astropy.wcs.wcsapi import BaseHighLevelWCS
from glue.core import BaseData
from glue_jupyter.bqplot.image import BqplotImageView
from jdaviz.core.registries import viewer_registry
__all__ = ['ImvizImageView']
@viewer_registry("imviz-image-viewer", label="Image 2D (Imviz)")
class ImvizImageView(BqplotImageView):
tools = ['bqplot:panzoom', 'bqplot:rectangle', 'bqplot:circle', 'bqplot:matchwcs']
default_class = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.label_mouseover = None
self.add_event_callback(self.on_mouse_or_key_event, events=['mousemove', 'mouseenter',
'mouseleave', 'keydown'])
self.state.show_axes = False
def on_mouse_or_key_event(self, data):
# Find visible layers
visible_layers = [layer for layer in self.state.layers if layer.visible]
if len(visible_layers) == 0:
return
if self.label_mouseover is None:
if 'g-coords-info' in self.session.application._tools:
self.label_mouseover = self.session.application._tools['g-coords-info']
else:
return
if data['event'] == 'mousemove':
# Display the current cursor coordinates (both pixel and world) as
# well as data values. For now we use the first dataset in the
# viewer for the data values.
# Extract first dataset from visible layers and use this for coordinates - the choice
# of dataset shouldn't matter if the datasets are linked correctly
image = visible_layers[0].layer
# Extract data coordinates - these are pixels in the image
x = data['domain']['x']
y = data['domain']['y']
maxsize = int(np.ceil(np.log10(np.max(image.shape)))) + 3
fmt = 'x={0:0' + str(maxsize) + '.1f} y={1:0' + str(maxsize) + '.1f}'
self.label_mouseover.pixel = (fmt.format(x, y))
if isinstance(image.coords, BaseHighLevelWCS):
# Convert these to a SkyCoord via WCS - note that for other datasets
# we aren't actually guaranteed to get a SkyCoord out, just for images
# with valid celestial WCS
try:
celestial_coordinates = (image.coords.pixel_to_world(x, y).icrs
.to_string('hmsdms', precision=4, pad=True))
except Exception:
self.label_mouseover.world = ''
else:
self.label_mouseover.world = f'{celestial_coordinates:32s} (ICRS)'
else:
self.label_mouseover.world = ''
# Extract data values at this position.
# TODO: for now we just use the first visible layer but we should think
# of how to display values when multiple datasets are present.
if x > -0.5 and y > -0.5 and x < image.shape[1] - 0.5 and y < image.shape[0] - 0.5:
attribute = visible_layers[0].attribute
value = image.get_data(attribute)[int(round(y)), int(round(x))]
unit = image.get_component(attribute).units
self.label_mouseover.value = f'{value:+10.5e} {unit}'
else:
self.label_mouseover.value = ''
elif data['event'] == 'mouseleave' or data['event'] == 'mouseenter':
self.label_mouseover.pixel = ""
self.label_mouseover.world = ""
self.label_mouseover.value = ""
if data['event'] == 'keydown' and data['key'] == 'b':
# Simple blinking of images - this will make it so that only one
# layer is visible at a time and cycles through the layers.
if len(self.state.layers) > 1:
# If only one layer is visible, pick the next one to be visible,
# otherwise start from the last visible one.
visible = [ilayer for ilayer, layer in
enumerate(self.state.layers) if layer.visible]
if len(visible) > 0:
next_layer = (visible[-1] + 1) % len(self.state.layers)
self.state.layers[next_layer].visible = True
for ilayer in visible:
if ilayer != next_layer:
self.state.layers[ilayer].visible = False
def set_plot_axes(self):
self.figure.axes[1].tick_format = None
self.figure.axes[0].tick_format = None
self.figure.axes[1].label = "y: pixels"
self.figure.axes[0].label = "x: pixels"
# Make it so y axis label is not covering tick numbers.
self.figure.axes[1].label_offset = "-50"
def data(self, cls=None):
return [layer_state.layer # .get_object(cls=cls or self.default_class)
for layer_state in self.state.layers
if hasattr(layer_state, 'layer') and
isinstance(layer_state.layer, BaseData)]
|
py | 1a4e717068e0a94447add52f455b1633983e9106 | """Auto-generated file, do not edit by hand. GW metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GW = PhoneMetadata(id='GW', country_code=245, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:4(?:0\\d{5}|4\\d{7})|9\\d{8})', possible_number_pattern='\\d{7,9}', possible_length=(7, 9)),
fixed_line=PhoneNumberDesc(national_number_pattern='443\\d{6}', possible_number_pattern='\\d{9}', example_number='443201234', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='9(?:5(?:5\\d|6[0-2])|6(?:5[0-2]|6\\d|9[012])|77\\d)\\d{5}', possible_number_pattern='\\d{9}', example_number='955012345', possible_length=(9,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(national_number_pattern='40\\d{5}', possible_number_pattern='\\d{7}', example_number='4012345', possible_length=(7,)),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['44|9[567]']),
NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['40'])])
|
py | 1a4e71ef1cd3f11d4c5f25efd03d610376414a9f | """
Django settings for itunes project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
environ.Env.read_env(env_file=(environ.Path(__file__) - 2)(".env"))
env = environ.Env(
# set casting, default value
DEBUG=(bool, False),
ALLOWED_HOSTS=(lambda v: [s.strip() for s in v.split(",")], "*"),
LANGUAGE_CODE=(str, "en-us"),
TIME_ZONE=(str, "UTC"),
STATIC_URL=(str, "/static/"),
CELERY_BROKER_URL=(str, "redis://localhost:6379/0"),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG")
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework.authtoken",
"drf_yasg",
"django_filters",
"corsheaders",
"core.authentication",
"core.common",
"core.api",
"django_celery_results",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "core.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "core.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {"default": env.db(var="DEFAULT_DATABASE", default="sqlite:///db.sqlite3")}
# Auth
AUTH_USER_MODEL = "authentication.User"
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = env.str("LANGUAGE_CODE")
TIME_ZONE = env.str("TIME_ZONE")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = env.str("STATIC_URL")
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# rest framework
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.TokenAuthentication"
],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination." + "LimitOffsetPagination",
"PAGE_SIZE": 20,
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"DEFAULT_METADATA_CLASS": "core.api.v1.metadata.Metadata",
}
# Celery
CELERY_RESULT_BACKEND = "django-db"
CELERY_BROKER_URL = env.str("CELERY_BROKER_URL")
CELERY_ACCEPT_CONTENT = ["application/json"]
CELERY_RESULT_SERIALIZER = "json"
CELERY_TASK_SERIALIZER = "json"
# cors headers
CORS_ORIGIN_ALLOW_ALL = True
# CSRF
CSRF_COOKIE_DOMAIN = env.str("CSRF_COOKIE_DOMAIN", None)
CSRF_COOKIE_SECURE = env.bool("CSRF_COOKIE_SECURE", False)
# Swagger
SWAGGER_SETTINGS = {
"USE_SESSION_AUTH": False,
"DOC_EXPANSION": "list",
"APIS_SORTER": "alpha",
"SECURITY_DEFINITIONS": {
"api_key": {"type": "apiKey", "name": "Authorization", "in": "header"}
},
}
|
py | 1a4e73a698dcb03b897aa15f01c30091c87ac242 | import cv2
import numpy as np
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser()
parser.add_argument('--normal_path', type=str)
parser.add_argument('--depth_path', type=str)
parser.add_argument('--silhou_path', type=str)
parser.add_argument('--output_path', type=str)
parser.add_argument('--mode', type=int) # 0-combine, 1-rescale depth, 2-resize
args = parser.parse_args()
return args.normal_path, args.depth_path, args.silhou_path, args.output_path, args.mode
if __name__ == '__main__':
f_normal, f_depth, f_silhou, f_output, mode = parse_args()
img_normal = cv2.imread(f_normal)
img_depth = cv2.imread(f_depth)
img_silhou = cv2.imread(f_silhou)
if mode == 0:
pass
elif mode == 1:
img_depth = 255.*(img_depth/255. + 0.2)/1.2
img_depth *= (img_silhou>100)
img_normal *= (img_silhou>100)
# elif mode == 2:
img_combine_depth = np.concatenate([img_depth, img_depth], 1)
img_combine_normal = np.concatenate([img_normal, img_normal], 1)
img_combine_sym = np.concatenate([img_silhou, img_silhou], 1)
img_combine = np.concatenate([img_combine_normal, img_combine_depth, img_combine_sym], 1)
cv2.imwrite(f_output, img_combine)
|
py | 1a4e74ff0c5b784a93485ff1c3111bc541ac752a | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The Text REtrieval Conference (TREC) Question Classification dataset."""
from __future__ import absolute_import, division, print_function
import datasets
_CITATION = """\
@inproceedings{li-roth-2002-learning,
title = "Learning Question Classifiers",
author = "Li, Xin and
Roth, Dan",
booktitle = "{COLING} 2002: The 19th International Conference on Computational Linguistics",
year = "2002",
url = "https://www.aclweb.org/anthology/C02-1150",
}
@inproceedings{hovy-etal-2001-toward,
title = "Toward Semantics-Based Answer Pinpointing",
author = "Hovy, Eduard and
Gerber, Laurie and
Hermjakob, Ulf and
Lin, Chin-Yew and
Ravichandran, Deepak",
booktitle = "Proceedings of the First International Conference on Human Language Technology Research",
year = "2001",
url = "https://www.aclweb.org/anthology/H01-1069",
}
"""
_DESCRIPTION = """\
The Text REtrieval Conference (TREC) Question Classification dataset contains 5500 labeled questions in training set and another 500 for test set. The dataset has 6 labels, 47 level-2 labels. Average length of each sentence is 10, vocabulary size of 8700.
Data are collected from four sources: 4,500 English questions published by USC (Hovy et al., 2001), about 500 manually constructed questions for a few rare classes, 894 TREC 8 and TREC 9 questions, and also 500 questions from TREC 10 which serves as the test set.
"""
_URLs = {
"train": "http://cogcomp.org/Data/QA/QC/train_5500.label",
"test": "http://cogcomp.org/Data/QA/QC/TREC_10.label",
}
_COARSE_LABELS = ["DESC", "ENTY", "ABBR", "HUM", "NUM", "LOC"]
_FINE_LABELS = [
"manner",
"cremat",
"animal",
"exp",
"ind",
"gr",
"title",
"def",
"date",
"reason",
"event",
"state",
"desc",
"count",
"other",
"letter",
"religion",
"food",
"country",
"color",
"termeq",
"city",
"body",
"dismed",
"mount",
"money",
"product",
"period",
"substance",
"sport",
"plant",
"techmeth",
"volsize",
"instru",
"abb",
"speed",
"word",
"lang",
"perc",
"code",
"dist",
"temp",
"symbol",
"ord",
"veh",
"weight",
"currency",
]
class Trec(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
def _info(self):
# TODO: Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"label-coarse": datasets.ClassLabel(names=_COARSE_LABELS),
"label-fine": datasets.ClassLabel(names=_FINE_LABELS),
"text": datasets.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://cogcomp.seas.upenn.edu/Data/QA/QC/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
dl_files = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": dl_files["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": dl_files["test"],
},
),
]
def _generate_examples(self, filepath):
""" Yields examples. """
# TODO: Yields (key, example) tuples from the dataset
with open(filepath, "rb") as f:
for id_, row in enumerate(f):
# One non-ASCII byte: sisterBADBYTEcity. We replace it with a space
label, _, text = row.replace(b"\xf0", b" ").strip().decode().partition(" ")
coarse_label, _, fine_label = label.partition(":")
yield id_, {
"label-coarse": coarse_label,
"label-fine": fine_label,
"text": text,
}
|
py | 1a4e762765da334d23b738359aca29c2bfd6eaa3 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyUritemplate(PythonPackage):
"""Simple python library to deal with URI Templates."""
homepage = "https://uritemplate.readthedocs.org/"
pypi = "uritemplate/uritemplate-3.0.0.tar.gz"
version('3.0.0', sha256='c02643cebe23fc8adb5e6becffe201185bf06c40bda5c0b4028a93f1527d011d')
depends_on('py-setuptools', type='build')
|
py | 1a4e77948422c6f6c6bf466d78fc98ae27930060 | def scrape():
from bs4 import BeautifulSoup
from selenium import webdriver
import pandas as pd
import urllib
import time
URL_mars_news = "https://mars.nasa.gov/news/"
URL_mars_image = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
URL_mars_weather = "https://twitter.com/marswxreport?lang=en"
URL_mars_facts = "http://space-facts.com/mars/"
URL_mars_hemispheres = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
# navigate through the URLs and save the htmls
driver = webdriver.Firefox()
driver.get(URL_mars_news)
time.sleep(1)
html_mars_news = driver.page_source
driver.get(URL_mars_image)
time.sleep(1)
html_mars_image = driver.page_source
# driver = webdriver.Firefox()
driver.get(URL_mars_weather)
time.sleep(1)
html_mars_weather = driver.page_source
driver.get(URL_mars_hemispheres)
time.sleep(1)
html_mars_hemispheres = driver.page_source
# Grab Mars News
soup = BeautifulSoup(html_mars_news, "html.parser")
mars_latest_news = soup.find("div", class_="list_text")
mars_latest_news_dict = {
"date": mars_latest_news.contents[0].text,
"headline": mars_latest_news.contents[1].text,
"teaser": mars_latest_news.contents[2].text
}
# Grab latest JPL Mars Image
soup = BeautifulSoup(html_mars_image, "html.parser")
mars_image = soup.find_all("a", class_="fancybox")
mars_image_URL = urllib.parse.urljoin("https://www.jpl.nasa.gov", mars_image[1]["data-fancybox-href"])
# Get Mars Weather
soup = BeautifulSoup(html_mars_weather, "html.parser")
mars_weather = soup.find("div", class_="css-901oao r-jwli3a r-1qd0xha r-a023e6 r-16dba41 r-ad9z0x r-bcqeeo r-bnwqim r-qvutc0").text
# Scrape Mars Facts Table
dfs = pd.read_html(URL_mars_facts)
mars_facts = dfs[0]
# Grab Mars Hemispheres Images
soup = BeautifulSoup(html_mars_hemispheres, "html.parser")
mars_hemispheres = soup.find_all("div", class_="item")
mars_hemisphere_URLs = []
for item in mars_hemispheres:
# cycle through each hemisphere link and grab the download link for the enhance tif file
mars_hemisphere_link = urllib.parse.urljoin("https://astrogeology.usgs.gov", item.a["href"])
driver.get(mars_hemisphere_link)
html_mars_hemisphere = driver.page_source
soup = BeautifulSoup(html_mars_hemisphere, "html.parser")
mars_hemisphere_download_link = soup.find("div", class_="downloads")
# append URL to list
mars_hemisphere_URLs.append(
{
"title": item.div.a.text,
"img_url": mars_hemisphere_download_link.ul.li.a["href"]
}
)
driver.close()
return {
"news": mars_latest_news_dict,
"image": mars_image_URL,
"weather": mars_weather,
"facts": mars_facts,
"hemispheres": mars_hemisphere_URLs
}
# test code
if __name__ == "__main__":
result = scrape()
print(result) |
py | 1a4e79e0bd961c77cdef6cb19b76ecef59363d5f | import seaborn as sns
from datetime import date
from datetime import timedelta
import matplotlib.pyplot as plt
from student_reader import stu_num
direct = f"./code/orgs-{date.today() - timedelta(days = 2)}/" # edit date differance or type in full path
plt.pie(
list(stu_num(direct).values()),
labels=list(stu_num(direct).keys()),
colors=sns.color_palette("pastel")[0:5],
autopct="%.0f%%",
)
plt.title(label="Percent organisations by number of students", fontweight=10, pad="2.0")
plt.savefig("stupi.svg")
|
py | 1a4e7ac6d149fba89c10ef3c67c29ecbded74f01 | """Amber Generator."""
|
py | 1a4e7ae9685174a6559d1729fd180fa4b10c807a | import sys
n, m, *ab = map(int, sys.stdin.read().split())
ab = list(zip(*[iter(ab)] * 2))
root = list(range(n+1)); root[0] = None
height = [0] * (n + 1); height[0] = None
size = [1] * (n + 1); size[0] = None
sys.setrecursionlimit(10 ** 9)
def find_root(v):
u = root[v]
if u == v:
return u
w = find_root(u)
root[v] = w
return w
def unite(v, u):
rv = find_root(v)
ru = find_root(u)
if rv == ru:
return 0
sv = size[rv]
su = size[ru]
if height[v] >= height[u]:
root[ru] = rv
height[rv] = max(height[rv], height[ru] + 1)
size[rv] += size[ru]
else:
root[rv] = ru
size[ru] += size[rv]
return sv * su
def main():
res = [0] * m
for i in range(1, m):
res[i] = res[i-1] + unite(*ab[m-i])
all_pairs = n * (n - 1) // 2
for i in res[::-1]:
yield all_pairs - i
if __name__ == '__main__':
ans = main()
print(*ans, sep='\n')
|
py | 1a4e7b9f1df31cd9d159cedc200a0d197dae32ed | # Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import recirq.quantum_chess.enums as enums
_ORD_A = ord('a')
def to_rank(x: int) -> str:
"""Returns the algebraic notation rank from the x coordinate."""
return chr(_ORD_A + x)
def to_square(x: int, y: int) -> str:
"""Returns the algebraic notation of a square."""
return chr(_ORD_A + x) + str(y + 1)
def x_of(square: str) -> int:
"""Returns x coordinate of an algebraic notation square (e.g. 'f4')."""
return ord(square[0]) - _ORD_A
def y_of(square: str) -> int:
"""Returns y coordinate of an algebraic notation square (e.g. 'f4')."""
return int(square[1]) - 1
class Move:
"""Container class that has the source and target of a quantum chess move.
If the move is a split move, it will have a target2. If a merge move,
it will have a source2 attribute.
For moves that are input from the quantum chess board API, this will
have a move type and variant that determines what kind of move this is
(capture, exclusion, etc).
"""
def __init__(self,
source: str,
target: str,
*,
source2: str = None,
target2: str = None,
move_type: enums.MoveType = None,
move_variant: enums.MoveVariant = None):
self.source = source
self.source2 = source2
self.target = target
self.target2 = target2
self.move_type = move_type
self.move_variant = move_variant
def __eq__(self, other):
if isinstance(other, Move):
return (self.source == other.source and
self.target == other.target and
self.target2 == other.target2)
return False
@classmethod
def from_string(cls, str_to_parse: str):
"""Creates a move from a string shorthand for tests.
Format=source,target,target2,source2:type:variant
with commas omitted.
if target2 is specified, then source2 should
be '--'
Examples:
'a1a2:JUMP:BASIC'
'b1a3c3:SPLIT_JUMP:BASIC'
'a3b1--c3:MERGE_JUMP:BASIC'
"""
fields = str_to_parse.split(':')
if len(fields) != 3:
raise ValueError(f'Invalid move string {str_to_parse}')
source = fields[0][0:2]
target = fields[0][2:4]
move_type = enums.MoveType[fields[1]]
move_variant = enums.MoveVariant[fields[2]]
if len(fields[0]) <= 4:
return cls(source,
target,
move_type=move_type,
move_variant=move_variant)
if len(fields[0]) <= 6:
return cls(source,
target,
target2=fields[0][4:6],
move_type=move_type,
move_variant=move_variant)
return cls(source,
target,
source2=fields[0][6:8],
move_type=move_type,
move_variant=move_variant)
def is_split_move(self) -> bool:
return self.target2 is not None
def __str__(self):
if self.is_split_move():
return self.source + '^' + self.target + self.target2
return self.source + self.target
|
py | 1a4e7c4eab5fe134a1bf751da7a872dbda72124b | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# This file has a special meaning for pytest. See https://docs.pytest.org/en/2.7.3/plugins.html for
# additional details.
import json
import logging
import os
import random
import re
from pathlib import Path
from shutil import copyfile
from traceback import format_tb
import boto3
import pkg_resources
import pytest
import yaml
from cfn_stacks_factory import CfnStack, CfnStacksFactory
from clusters_factory import Cluster, ClustersFactory
from conftest_markers import (
DIMENSIONS_MARKER_ARGS,
add_default_markers,
check_marker_dimensions,
check_marker_list,
check_marker_skip_dimensions,
check_marker_skip_list,
)
from conftest_tests_config import apply_cli_dimensions_filtering, parametrize_from_config, remove_disabled_tests
from constants import SCHEDULERS_SUPPORTING_IMDS_SECURED
from filelock import FileLock
from framework.credential_providers import aws_credential_provider, register_cli_credentials_for_region
from framework.tests_configuration.config_renderer import read_config_file
from framework.tests_configuration.config_utils import get_all_regions
from images_factory import Image, ImagesFactory
from jinja2 import Environment, FileSystemLoader
from network_template_builder import Gateways, NetworkTemplateBuilder, SubnetConfig, VPCConfig
from retrying import retry
from utils import (
InstanceTypesData,
create_s3_bucket,
delete_s3_bucket,
dict_add_nested_key,
dict_has_nested_key,
generate_stack_name,
get_architecture_supported_by_instance_type,
get_arn_partition,
get_instance_info,
get_network_interfaces_count,
get_vpc_snakecase_value,
random_alphanumeric,
set_logger_formatter,
)
from tests.common.utils import (
get_installed_parallelcluster_version,
get_sts_endpoint,
retrieve_pcluster_ami_without_standard_naming,
)
def pytest_addoption(parser):
"""Register argparse-style options and ini-style config values, called once at the beginning of a test run."""
parser.addoption("--tests-config-file", help="config file to specify tests and dimensions")
parser.addoption("--regions", help="aws region where tests are executed", nargs="*")
parser.addoption("--instances", help="aws instances under test", nargs="*")
parser.addoption("--oss", help="OSs under test", nargs="*")
parser.addoption("--schedulers", help="schedulers under test", nargs="*")
parser.addoption("--tests-log-file", help="file used to write test logs", default="pytest.log")
parser.addoption("--output-dir", help="output dir for tests artifacts")
# Can't mark fields as required due to: https://github.com/pytest-dev/pytest/issues/2026
parser.addoption("--key-name", help="key to use for EC2 instances", type=str)
parser.addoption("--key-path", help="key path to use for SSH connections", type=str)
parser.addoption("--custom-chef-cookbook", help="url to a custom cookbook package")
parser.addoption(
"--createami-custom-chef-cookbook", help="url to a custom cookbook package for the createami command"
)
parser.addoption("--pcluster-git-ref", help="Git ref of the custom cli package used to build the AMI.")
parser.addoption("--cookbook-git-ref", help="Git ref of the custom cookbook package used to build the AMI.")
parser.addoption("--node-git-ref", help="Git ref of the custom node package used to build the AMI.")
parser.addoption(
"--ami-owner",
help="Override the owner value when fetching AMIs to use with cluster. By default pcluster uses amazon.",
)
parser.addoption("--createami-custom-node-package", help="url to a custom node package for the createami command")
parser.addoption("--custom-awsbatch-template-url", help="url to a custom awsbatch template")
parser.addoption("--cw-dashboard-template-url", help="url to a custom Dashboard cfn template")
parser.addoption("--custom-awsbatchcli-package", help="url to a custom awsbatch cli package")
parser.addoption("--custom-node-package", help="url to a custom node package")
parser.addoption("--custom-ami", help="custom AMI to use in the tests")
parser.addoption("--pre-install", help="url to pre install script")
parser.addoption("--post-install", help="url to post install script")
parser.addoption("--vpc-stack", help="Name of an existing vpc stack.")
parser.addoption("--cluster", help="Use an existing cluster instead of creating one.")
parser.addoption("--public-ecr-image-uri", help="S3 URI of the ParallelCluster API spec")
parser.addoption(
"--api-definition-s3-uri", help="URI of the Docker image for the Lambda of the ParallelCluster API"
)
parser.addoption(
"--api-infrastructure-s3-uri", help="URI of the CloudFormation template for the ParallelCluster API"
)
parser.addoption("--api-uri", help="URI of an existing ParallelCluster API")
parser.addoption("--instance-types-data-file", help="JSON file with additional instance types data")
parser.addoption(
"--credential", help="STS credential endpoint, in the format <region>,<endpoint>,<ARN>,<externalId>.", nargs="+"
)
parser.addoption(
"--no-delete", action="store_true", default=False, help="Don't delete stacks after tests are complete."
)
parser.addoption("--benchmarks-target-capacity", help="set the target capacity for benchmarks tests", type=int)
parser.addoption("--benchmarks-max-time", help="set the max waiting time in minutes for benchmarks tests", type=int)
parser.addoption("--stackname-suffix", help="set a suffix in the integration tests stack names")
parser.addoption(
"--delete-logs-on-success", help="delete CloudWatch logs when a test succeeds", action="store_true"
)
parser.addoption(
"--use-default-iam-credentials",
help="use default IAM creds when running pcluster commands",
action="store_true",
)
def pytest_generate_tests(metafunc):
"""Generate (multiple) parametrized calls to a test function."""
if metafunc.config.getoption("tests_config", None):
parametrize_from_config(metafunc)
else:
_parametrize_from_option(metafunc, "region", "regions")
_parametrize_from_option(metafunc, "instance", "instances")
_parametrize_from_option(metafunc, "os", "oss")
_parametrize_from_option(metafunc, "scheduler", "schedulers")
def pytest_configure(config):
"""This hook is called for every plugin and initial conftest file after command line options have been parsed."""
# read tests config file if used
if config.getoption("tests_config_file", None):
config.option.tests_config = read_config_file(config.getoption("tests_config_file"))
# Read instance types data file if used
if config.getoption("instance_types_data_file", None):
# Load additional instance types data
InstanceTypesData.load_additional_instance_types_data(config.getoption("instance_types_data_file"))
config.option.instance_types_data = InstanceTypesData.additional_instance_types_data
# register additional markers
config.addinivalue_line("markers", "instances(instances_list): run test only against the listed instances.")
config.addinivalue_line("markers", "regions(regions_list): run test only against the listed regions")
config.addinivalue_line("markers", "oss(os_list): run test only against the listed oss")
config.addinivalue_line("markers", "schedulers(schedulers_list): run test only against the listed schedulers")
config.addinivalue_line(
"markers", "dimensions(region, instance, os, scheduler): run test only against the listed dimensions"
)
config.addinivalue_line("markers", "skip_instances(instances_list): skip test for the listed instances")
config.addinivalue_line("markers", "skip_regions(regions_list): skip test for the listed regions")
config.addinivalue_line("markers", "skip_oss(os_list): skip test for the listed oss")
config.addinivalue_line("markers", "skip_schedulers(schedulers_list): skip test for the listed schedulers")
config.addinivalue_line(
"markers", "skip_dimensions(region, instance, os, scheduler): skip test for the listed dimensions"
)
_setup_custom_logger(config.getoption("tests_log_file"))
def pytest_sessionstart(session):
# The number of seconds before a connection to the instance metadata service should time out.
# When attempting to retrieve credentials on an Amazon EC2 instance that is configured with an IAM role,
# a connection to the instance metadata service will time out after 1 second by default. If you know you're
# running on an EC2 instance with an IAM role configured, you can increase this value if needed.
os.environ["AWS_METADATA_SERVICE_TIMEOUT"] = "5"
# When attempting to retrieve credentials on an Amazon EC2 instance that has been configured with an IAM role,
# Boto3 will make only one attempt to retrieve credentials from the instance metadata service before giving up.
# If you know your code will be running on an EC2 instance, you can increase this value to make Boto3 retry
# multiple times before giving up.
os.environ["AWS_METADATA_SERVICE_NUM_ATTEMPTS"] = "5"
# Increasing default max attempts retry
os.environ["AWS_MAX_ATTEMPTS"] = "10"
def pytest_runtest_call(item):
"""Called to execute the test item."""
set_logger_formatter(
logging.Formatter(fmt=f"%(asctime)s - %(levelname)s - %(process)d - {item.name} - %(module)s - %(message)s")
)
logging.info("Running test " + item.name)
def pytest_runtest_logfinish(nodeid, location):
set_logger_formatter(logging.Formatter(fmt="%(asctime)s - %(levelname)s - %(process)d - %(module)s - %(message)s"))
def pytest_collection_modifyitems(session, config, items):
"""Called after collection has been performed, may filter or re-order the items in-place."""
if config.getoption("tests_config", None):
# Remove tests not declared in config file from the collected ones
remove_disabled_tests(session, config, items)
# Apply filtering based on dimensions passed as CLI options
# ("--regions", "--instances", "--oss", "--schedulers")
apply_cli_dimensions_filtering(config, items)
else:
add_default_markers(items)
check_marker_list(items, "instances", "instance")
check_marker_list(items, "regions", "region")
check_marker_list(items, "oss", "os")
check_marker_list(items, "schedulers", "scheduler")
check_marker_skip_list(items, "skip_instances", "instance")
check_marker_skip_list(items, "skip_regions", "region")
check_marker_skip_list(items, "skip_oss", "os")
check_marker_skip_list(items, "skip_schedulers", "scheduler")
check_marker_dimensions(items)
check_marker_skip_dimensions(items)
_add_filename_markers(items, config)
def pytest_collection_finish(session):
_log_collected_tests(session)
def _log_collected_tests(session):
from xdist import get_xdist_worker_id
# Write collected tests in a single worker
# get_xdist_worker_id returns the id of the current worker ('gw0', 'gw1', etc) or 'master'
if get_xdist_worker_id(session) in ["master", "gw0"]:
collected_tests = list(map(lambda item: item.nodeid, session.items))
logging.info(
"Collected tests in regions %s (total=%d):\n%s",
session.config.getoption("regions") or get_all_regions(session.config.getoption("tests_config")),
len(session.items),
json.dumps(collected_tests, indent=2),
)
out_dir = session.config.getoption("output_dir")
with open(f"{out_dir}/collected_tests.txt", "a", encoding="utf-8") as out_f:
out_f.write("\n".join(collected_tests))
out_f.write("\n")
def pytest_exception_interact(node, call, report):
"""Called when an exception was raised which can potentially be interactively handled.."""
logging.error(
"Exception raised while executing %s: %s\n%s",
node.name,
call.excinfo.value,
"".join(format_tb(call.excinfo.tb)),
)
def _extract_tested_component_from_filename(item):
"""Extract portion of test item's filename identifying the component it tests."""
test_location = os.path.splitext(os.path.basename(item.location[0]))[0]
return re.sub(r"test_|_test", "", test_location)
def _add_filename_markers(items, config):
"""Add a marker based on the name of the file where the test case is defined."""
for item in items:
marker = _extract_tested_component_from_filename(item)
# This dynamically registers markers in pytest so that warning for the usage of undefined markers are not
# displayed
config.addinivalue_line("markers", marker)
item.add_marker(marker)
def _parametrize_from_option(metafunc, test_arg_name, option_name):
if test_arg_name in metafunc.fixturenames:
metafunc.parametrize(test_arg_name, metafunc.config.getoption(option_name), scope="class")
def _setup_custom_logger(log_file):
formatter = logging.Formatter(fmt="%(asctime)s - %(levelname)s - %(process)d - %(module)s - %(message)s")
logger = logging.getLogger()
logger.handlers = []
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.setLevel(logging.INFO)
logger.addHandler(console_handler)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def _add_properties_to_report(item):
props = []
# Add properties for test dimensions, obtained from fixtures passed to tests
for dimension in DIMENSIONS_MARKER_ARGS:
value = item.funcargs.get(dimension)
if value:
props.append((dimension, value))
# Add property for feature tested, obtained from filename containing the test
props.append(("feature", _extract_tested_component_from_filename(item)))
for dimension_value_pair in props:
if dimension_value_pair not in item.user_properties:
item.user_properties.append(dimension_value_pair)
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("setup_credentials")
def clusters_factory(request, region):
"""
Define a fixture to manage the creation and destruction of clusters.
The configs used to create clusters are dumped to output_dir/clusters_configs/{test_name}.config
"""
factory = ClustersFactory(delete_logs_on_success=request.config.getoption("delete_logs_on_success"))
def _cluster_factory(cluster_config, upper_case_cluster_name=False, **kwargs):
cluster_config = _write_config_to_outdir(request, cluster_config, "clusters_configs")
cluster = Cluster(
name=request.config.getoption("cluster")
if request.config.getoption("cluster")
else "integ-tests-{0}{1}{2}".format(
random_alphanumeric().upper() if upper_case_cluster_name else random_alphanumeric(),
"-" if request.config.getoption("stackname_suffix") else "",
request.config.getoption("stackname_suffix"),
),
config_file=cluster_config,
ssh_key=request.config.getoption("key_path"),
region=region,
)
if not request.config.getoption("cluster"):
cluster.creation_response = factory.create_cluster(cluster, **kwargs)
return cluster
yield _cluster_factory
if not request.config.getoption("no_delete"):
try:
test_passed = request.node.rep_call.passed
except AttributeError:
test_passed = False
factory.destroy_all_clusters(test_passed=test_passed)
@pytest.fixture(scope="session")
def api_server_factory(
cfn_stacks_factory, request, public_ecr_image_uri, api_definition_s3_uri, api_infrastructure_s3_uri
):
"""Creates a factory for deploying API servers on-demand to each region."""
api_servers = {}
def _api_server_factory(server_region):
api_stack_name = generate_stack_name("integ-tests-api", request.config.getoption("stackname_suffix"))
params = [
{"ParameterKey": "EnableIamAdminAccess", "ParameterValue": "true"},
{"ParameterKey": "CreateApiUserRole", "ParameterValue": "false"},
]
if api_definition_s3_uri:
params.append({"ParameterKey": "ApiDefinitionS3Uri", "ParameterValue": api_definition_s3_uri})
if public_ecr_image_uri:
params.append({"ParameterKey": "PublicEcrImageUri", "ParameterValue": public_ecr_image_uri})
template = (
api_infrastructure_s3_uri
or f"https://{server_region}-aws-parallelcluster.s3.{server_region}.amazonaws.com"
f"{'.cn' if server_region.startswith('cn') else ''}"
f"/parallelcluster/{get_installed_parallelcluster_version()}/api/parallelcluster-api.yaml"
)
if server_region not in api_servers:
logging.info(f"Creating API Server stack: {api_stack_name} in {server_region} with template {template}")
stack = CfnStack(
name=api_stack_name,
region=server_region,
parameters=params,
capabilities=["CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND"],
template=template,
)
cfn_stacks_factory.create_stack(stack)
api_servers[server_region] = stack
else:
logging.info(f"Found cached API Server stack: {api_stack_name} in {server_region}")
return api_servers[server_region]
yield _api_server_factory
@pytest.fixture(scope="class")
def api_client(region, api_server_factory, api_uri):
"""Define a fixture for an API client that interacts with the pcluster api."""
from pcluster_client import ApiClient, Configuration
if api_uri:
host = api_uri
else:
stack = api_server_factory(region)
host = stack.cfn_outputs["ParallelClusterApiInvokeUrl"]
api_configuration = Configuration(host=host)
api_configuration.retries = 3
with ApiClient(api_configuration) as api_client_instance:
yield api_client_instance
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("setup_credentials")
def images_factory(request):
"""
Define a fixture to manage the creation and destruction of images.
The configs used to create clusters are dumped to output_dir/images_configs/{test_name}.config
"""
factory = ImagesFactory()
def _image_factory(image_id, image_config, region, **kwargs):
image_config_file = _write_config_to_outdir(request, image_config, "image_configs")
image = Image(
image_id="-".join([image_id, request.config.getoption("stackname_suffix")])
if request.config.getoption("stackname_suffix")
else image_id,
config_file=image_config_file,
region=region,
)
factory.create_image(image, **kwargs)
if image.image_status != "BUILD_IN_PROGRESS" and kwargs.get("log_error", False):
logging.error("image %s creation failed", image_id)
return image
yield _image_factory
factory.destroy_all_images()
def _write_config_to_outdir(request, config, config_dir):
out_dir = request.config.getoption("output_dir")
# Sanitize config file name to make it Windows compatible
# request.node.nodeid example:
# 'dcv/test_dcv.py::test_dcv_configuration[eu-west-1-c5.xlarge-centos7-slurm-8443-0.0.0.0/0-/shared]'
test_file, test_name = request.node.nodeid.split("::", 1)
config_file_name = "{0}-{1}".format(test_file, test_name.replace("/", "_"))
os.makedirs(
"{out_dir}/{config_dir}/{test_dir}".format(
out_dir=out_dir, config_dir=config_dir, test_dir=os.path.dirname(test_file)
),
exist_ok=True,
)
config_dst = "{out_dir}/{config_dir}/{config_file_name}.config".format(
out_dir=out_dir, config_dir=config_dir, config_file_name=config_file_name
)
copyfile(config, config_dst)
return config_dst
@pytest.fixture()
def test_datadir(request, datadir):
"""
Inject the datadir with resources for the specific test function.
If the test function is declared in a class then datadir is ClassName/FunctionName
otherwise it is only FunctionName.
"""
function_name = request.function.__name__
if not request.cls:
return datadir / function_name
class_name = request.cls.__name__
return datadir / "{0}/{1}".format(class_name, function_name)
@pytest.fixture()
def pcluster_config_reader(test_datadir, vpc_stack, request, region):
"""
Define a fixture to render pcluster config templates associated to the running test.
The config for a given test is a pcluster.config.yaml file stored in the configs_datadir folder.
The config can be written by using Jinja2 template engine.
The current renderer already replaces placeholders for current keys:
{{ region }}, {{ os }}, {{ instance }}, {{ scheduler}}, {{ key_name }},
{{ vpc_id }}, {{ public_subnet_id }}, {{ private_subnet_id }}
The current renderer injects options for custom templates and packages in case these
are passed to the cli and not present already in the cluster config.
Also sanity_check is set to true by default unless explicitly set in config.
:return: a _config_renderer(**kwargs) function which gets as input a dictionary of values to replace in the template
"""
def _config_renderer(config_file="pcluster.config.yaml", **kwargs):
config_file_path = test_datadir / config_file
if not os.path.isfile(config_file_path):
raise FileNotFoundError(f"Cluster config file not found in the expected dir {config_file_path}")
default_values = _get_default_template_values(vpc_stack, request)
file_loader = FileSystemLoader(str(test_datadir))
env = Environment(loader=file_loader)
rendered_template = env.get_template(config_file).render(**{**default_values, **kwargs})
config_file_path.write_text(rendered_template)
if not config_file.endswith("image.config.yaml"):
inject_additional_config_settings(config_file_path, request, region)
else:
inject_additional_image_configs_settings(config_file_path, request)
return config_file_path
return _config_renderer
def inject_additional_image_configs_settings(image_config, request):
with open(image_config, encoding="utf-8") as conf_file:
config_content = yaml.load(conf_file, Loader=yaml.SafeLoader)
if request.config.getoption("createami_custom_chef_cookbook") and not dict_has_nested_key(
config_content, ("DevSettings", "Cookbook", "ChefCookbook")
):
dict_add_nested_key(
config_content,
request.config.getoption("createami_custom_chef_cookbook"),
("DevSettings", "Cookbook", "ChefCookbook"),
)
for option, config_param in [
("custom_awsbatchcli_package", "AwsBatchCliPackage"),
("createami_custom_node_package", "NodePackage"),
]:
if request.config.getoption(option) and not dict_has_nested_key(config_content, ("DevSettings", config_param)):
dict_add_nested_key(config_content, request.config.getoption(option), ("DevSettings", config_param))
with open(image_config, "w", encoding="utf-8") as conf_file:
yaml.dump(config_content, conf_file)
def inject_additional_config_settings(cluster_config, request, region): # noqa C901
with open(cluster_config, encoding="utf-8") as conf_file:
config_content = yaml.safe_load(conf_file)
if request.config.getoption("custom_chef_cookbook") and not dict_has_nested_key(
config_content, ("DevSettings", "Cookbook", "ChefCookbook")
):
dict_add_nested_key(
config_content,
request.config.getoption("custom_chef_cookbook"),
("DevSettings", "Cookbook", "ChefCookbook"),
)
if request.config.getoption("custom_ami") and not dict_has_nested_key(config_content, ("Image", "CustomAmi")):
dict_add_nested_key(config_content, request.config.getoption("custom_ami"), ("Image", "CustomAmi"))
if not dict_has_nested_key(config_content, ("DevSettings", "AmiSearchFilters")):
if (
request.config.getoption("pcluster_git_ref")
or request.config.getoption("cookbook_git_ref")
or request.config.getoption("node_git_ref")
):
tags = []
if request.config.getoption("pcluster_git_ref"):
tags.append(
{"Key": "build:parallelcluster:cli_ref", "Value": request.config.getoption("pcluster_git_ref")}
)
if request.config.getoption("cookbook_git_ref"):
tags.append(
{"Key": "build:parallelcluster:cookbook_ref", "Value": request.config.getoption("cookbook_git_ref")}
)
if request.config.getoption("node_git_ref"):
tags.append(
{"Key": "build:parallelcluster:node_ref", "Value": request.config.getoption("node_git_ref")}
)
tags.append({"Key": "parallelcluster:build_status", "Value": "available"})
dict_add_nested_key(config_content, tags, ("DevSettings", "AmiSearchFilters", "Tags"))
if request.config.getoption("ami_owner"):
dict_add_nested_key(
config_content, request.config.getoption("ami_owner"), ("DevSettings", "AmiSearchFilters", "Owner")
)
# Additional instance types data is copied it into config files to make it available at cluster creation
instance_types_data = request.config.getoption("instance_types_data", None)
if instance_types_data:
dict_add_nested_key(config_content, json.dumps(instance_types_data), ("DevSettings", "InstanceTypesData"))
for option, config_param in [("pre_install", "OnNodeStart"), ("post_install", "OnNodeConfigured")]:
if request.config.getoption(option):
if not dict_has_nested_key(config_content, ("HeadNode", "CustomActions", config_param)):
dict_add_nested_key(
config_content,
request.config.getoption(option),
("HeadNode", "CustomActions", config_param, "Script"),
)
_add_policy_for_pre_post_install(config_content["HeadNode"], option, request, region)
scheduler = config_content["Scheduling"]["Scheduler"]
if scheduler != "awsbatch":
for queue in config_content["Scheduling"][f"{scheduler.capitalize()}Queues"]:
if not dict_has_nested_key(queue, ("CustomActions", config_param)):
dict_add_nested_key(
queue, request.config.getoption(option), ("CustomActions", config_param, "Script")
)
_add_policy_for_pre_post_install(queue, option, request, region)
for option, config_param in [
("custom_awsbatchcli_package", "AwsBatchCliPackage"),
("custom_node_package", "NodePackage"),
]:
if request.config.getoption(option) and not dict_has_nested_key(config_content, ("DevSettings", config_param)):
dict_add_nested_key(config_content, request.config.getoption(option), ("DevSettings", config_param))
with open(cluster_config, "w", encoding="utf-8") as conf_file:
yaml.dump(config_content, conf_file)
def _add_policy_for_pre_post_install(node_config, custom_option, request, region):
match = re.match(r"s3://(.*?)/(.*)", request.config.getoption(custom_option))
if not match or len(match.groups()) < 2:
logging.info("{0} script is not an S3 URL".format(custom_option))
else:
additional_iam_policies = {"Policy": f"arn:{get_arn_partition(region)}:iam::aws:policy/AmazonS3ReadOnlyAccess"}
if dict_has_nested_key(node_config, ("Iam", "InstanceRole")) or dict_has_nested_key(
node_config, ("Iam", "InstanceProfile")
):
# AdditionalIamPolicies, InstanceRole or InstanceProfile can not co-exist
logging.info(
"InstanceRole/InstanceProfile is specified, "
f"skipping insertion of AdditionalIamPolicies: {additional_iam_policies}"
)
else:
logging.info(
f"{custom_option} script is an S3 URL, adding AdditionalIamPolicies: {additional_iam_policies}"
)
if dict_has_nested_key(node_config, ("Iam", "AdditionalIamPolicies")):
if additional_iam_policies not in node_config["Iam"]["AdditionalIamPolicies"]:
node_config["Iam"]["AdditionalIamPolicies"].append(additional_iam_policies)
else:
dict_add_nested_key(node_config, [additional_iam_policies], ("Iam", "AdditionalIamPolicies"))
def _get_default_template_values(vpc_stack, request):
"""Build a dictionary of default values to inject in the jinja templated cluster configs."""
default_values = get_vpc_snakecase_value(vpc_stack)
default_values.update({dimension: request.node.funcargs.get(dimension) for dimension in DIMENSIONS_MARKER_ARGS})
default_values["key_name"] = request.config.getoption("key_name")
scheduler = request.node.funcargs.get("scheduler")
default_values["imds_secured"] = scheduler in SCHEDULERS_SUPPORTING_IMDS_SECURED
return default_values
@pytest.fixture(scope="session")
def cfn_stacks_factory(request):
"""Define a fixture to manage the creation and destruction of CloudFormation stacks."""
factory = CfnStacksFactory(request.config.getoption("credential"))
yield factory
if not request.config.getoption("no_delete"):
factory.delete_all_stacks()
else:
logging.warning("Skipping deletion of CFN stacks because --no-delete option is set")
@pytest.fixture()
@pytest.mark.usefixtures("setup_credentials")
def parameterized_cfn_stacks_factory(request):
"""Define a fixture that returns a parameterized stack factory and manages the stack creation and deletion."""
factory = CfnStacksFactory(request.config.getoption("credential"))
def _create_stack(region, template_path, stack_prefix="", parameters=None, capabilities=None):
file_content = extract_template(template_path)
stack = CfnStack(
name=generate_stack_name(stack_prefix, request.config.getoption("stackname_suffix")),
region=region,
template=file_content,
parameters=parameters or [],
capabilities=capabilities or [],
)
factory.create_stack(stack)
return stack
def extract_template(template_path):
with open(template_path, encoding="utf-8") as cfn_file:
file_content = cfn_file.read()
return file_content
yield _create_stack
factory.delete_all_stacks()
AVAILABILITY_ZONE_OVERRIDES = {
# c5.xlarge is not supported in use1-az3
# FSx Lustre file system creation is currently not supported for use1-az3
# m6g.xlarge is not supported in use1-az2 or use1-az3
# p4d.24xlarge is only available on use1-az2
"us-east-1": ["use1-az2"],
# some instance type is only supported in use2-az2
"us-east-2": ["use2-az2"],
# c4.xlarge is not supported in usw2-az4
# p4d.24xlarge is only available on uw2-az2
"us-west-2": ["usw2-az2"],
# c5.xlarge is not supported in apse2-az3
"ap-southeast-2": ["apse2-az1", "apse2-az2"],
# m6g.xlarge is not supported in apne1-az2
"ap-northeast-1": ["apne1-az4", "apne1-az1"],
# c4.xlarge is not supported in apne2-az2
"ap-northeast-2": ["apne2-az1", "apne2-az3"],
# c5.xlarge is not supported in apse1-az3
"ap-southeast-1": ["apse1-az2", "apse1-az1"],
# c4.xlarge is not supported in aps1-az2
"ap-south-1": ["aps1-az1", "aps1-az3"],
# NAT Gateway not available in sae1-az2 , c5n.18xlarge is not supported in sae1-az3
"sa-east-1": ["sae1-az1"],
# m6g.xlarge instances not available in euw1-az3
"eu-west-1": ["euw1-az1", "euw1-az2"],
# io2 EBS volumes not available in cac1-az4
"ca-central-1": ["cac1-az1", "cac1-az2"],
# instance can only be launch in placement group in eun1-az2
"eu-north-1": ["eun1-az2"],
# g3.8xlarge is not supported in euc1-az1
"eu-central-1": ["euc1-az2", "euc1-az3"],
# FSx not available in cnn1-az4
"cn-north-1": ["cnn1-az1", "cnn1-az2"],
}
@pytest.fixture(scope="function")
def random_az_selector(request):
"""Select random AZs for a given region."""
def _get_random_availability_zones(region, num_azs=1, default_value=None):
"""Return num_azs random AZs (in the form of AZ names, e.g. 'us-east-1a') for the given region."""
az_ids = AVAILABILITY_ZONE_OVERRIDES.get(region, [])
if az_ids:
az_id_to_az_name_map = get_az_id_to_az_name_map(region, request.config.getoption("credential"))
sample = random.sample([az_id_to_az_name_map.get(az_id, default_value) for az_id in az_ids], k=num_azs)
else:
sample = [default_value] * num_azs
return sample[0] if num_azs == 1 else sample
return _get_random_availability_zones
@pytest.fixture(scope="class", autouse=True)
def setup_credentials(region, request):
"""Setup environment for the integ tests"""
with aws_credential_provider(region, request.config.getoption("credential")):
yield
# FixMe: double check if this fixture introduce unnecessary implication.
# The alternative way is to use --region for all cluster operations.
@pytest.fixture(scope="class", autouse=True)
def setup_env_variable(region):
"""Setup environment for the integ tests"""
os.environ["AWS_DEFAULT_REGION"] = region
yield
del os.environ["AWS_DEFAULT_REGION"]
def get_az_id_to_az_name_map(region, credential):
"""Return a dict mapping AZ IDs (e.g, 'use1-az2') to AZ names (e.g., 'us-east-1c')."""
# credentials are managed manually rather than via setup_sts_credentials because this function
# is called by a session-scoped fixture, which cannot make use of a class-scoped fixture.
with aws_credential_provider(region, credential):
ec2_client = boto3.client("ec2", region_name=region)
return {
entry.get("ZoneId"): entry.get("ZoneName")
for entry in ec2_client.describe_availability_zones().get("AvailabilityZones")
}
def get_availability_zones(region, credential):
"""
Return a list of availability zones for the given region.
Note that this function is called by the vpc_stacks fixture. Because vcp_stacks is session-scoped,
it cannot utilize setup_sts_credentials, which is required in opt-in regions in order to call
describe_availability_zones.
"""
az_list = []
with aws_credential_provider(region, credential):
client = boto3.client("ec2", region_name=region)
response_az = client.describe_availability_zones(
Filters=[
{"Name": "region-name", "Values": [str(region)]},
{"Name": "zone-type", "Values": ["availability-zone"]},
]
)
for az in response_az.get("AvailabilityZones"):
az_list.append(az.get("ZoneName"))
return az_list
@pytest.fixture(scope="session", autouse=True)
def initialize_cli_creds(cfn_stacks_factory, request):
if request.config.getoption("use_default_iam_credentials"):
logging.info("Using default IAM credentials to run pcluster commands")
return
regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config"))
for region in regions:
logging.info("Creating IAM roles for pcluster CLI")
stack_name = generate_stack_name("integ-tests-iam-user-role", request.config.getoption("stackname_suffix"))
stack_template_path = os.path.join("..", "iam_policies", "user-role.cfn.yaml")
with open(stack_template_path, encoding="utf-8") as stack_template_file:
stack_template_data = stack_template_file.read()
stack = CfnStack(name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data)
cfn_stacks_factory.create_stack(stack)
# register providers
register_cli_credentials_for_region(region, stack.cfn_outputs["ParallelClusterUserRole"])
@pytest.fixture(scope="session", autouse=True)
def vpc_stacks(cfn_stacks_factory, request):
"""Create VPC used by integ tests in all configured regions."""
regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config"))
vpc_stacks = {}
for region in regions:
# Creating private_subnet_different_cidr in a different AZ for test_efs
# To-do: isolate this logic and create a compute subnet in different AZ than head node in test_efs
# if region has a non-empty list in AVAILABILITY_ZONE_OVERRIDES, select a subset of those AZs
credential = request.config.getoption("credential")
az_ids_for_region = AVAILABILITY_ZONE_OVERRIDES.get(region, [])
if az_ids_for_region:
az_id_to_az_name = get_az_id_to_az_name_map(region, credential)
az_names = [az_id_to_az_name.get(az_id) for az_id in az_ids_for_region]
# if only one AZ can be used for the given region, use it multiple times
if len(az_names) == 1:
az_names *= 2
availability_zones = random.sample(az_names, k=2)
# otherwise, select a subset of all AZs in the region
else:
az_list = get_availability_zones(region, credential)
# if number of available zones is smaller than 2, available zones should be [None, None]
if len(az_list) < 2:
availability_zones = [None, None]
else:
availability_zones = random.sample(az_list, k=2)
# Subnets visual representation:
# http://www.davidc.net/sites/default/subnets/subnets.html?network=192.168.0.0&mask=16&division=7.70
public_subnet = SubnetConfig(
name="Public",
cidr="192.168.32.0/19", # 8190 IPs
map_public_ip_on_launch=True,
has_nat_gateway=True,
availability_zone=availability_zones[0],
default_gateway=Gateways.INTERNET_GATEWAY,
)
private_subnet = SubnetConfig(
name="Private",
cidr="192.168.64.0/18", # 16382 IPs
map_public_ip_on_launch=False,
has_nat_gateway=False,
availability_zone=availability_zones[0],
default_gateway=Gateways.NAT_GATEWAY,
)
private_subnet_different_cidr = SubnetConfig(
name="PrivateAdditionalCidr",
cidr="192.168.128.0/17", # 32766 IPs
map_public_ip_on_launch=False,
has_nat_gateway=False,
availability_zone=availability_zones[1],
default_gateway=Gateways.NAT_GATEWAY,
)
no_internet_subnet = SubnetConfig(
name="NoInternet",
cidr="192.168.16.0/20", # 4094 IPs
map_public_ip_on_launch=False,
has_nat_gateway=False,
availability_zone=availability_zones[0],
default_gateway=Gateways.NONE,
)
vpc_config = VPCConfig(
cidr="192.168.0.0/17",
additional_cidr_blocks=["192.168.128.0/17"],
subnets=[public_subnet, private_subnet, private_subnet_different_cidr, no_internet_subnet],
)
template = NetworkTemplateBuilder(vpc_configuration=vpc_config, availability_zone=availability_zones[0]).build()
vpc_stacks[region] = _create_vpc_stack(request, template, region, cfn_stacks_factory)
return vpc_stacks
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("clusters_factory", "images_factory")
def create_roles_stack(request, region):
"""Define a fixture that returns a stack factory for IAM roles."""
logging.info("Creating IAM roles stack")
factory = CfnStacksFactory(request.config.getoption("credential"))
def _create_stack(stack_prefix, roles_file):
stack_template_path = os.path.join("..", "iam_policies", roles_file)
template_data = read_template(stack_template_path)
stack = CfnStack(
name=generate_stack_name(stack_prefix, request.config.getoption("stackname_suffix")),
region=region,
template=template_data,
capabilities=["CAPABILITY_IAM"],
)
factory.create_stack(stack)
return stack
def read_template(template_path):
with open(template_path, encoding="utf-8") as cfn_file:
file_content = cfn_file.read()
return file_content
yield _create_stack
if not request.config.getoption("no_delete"):
factory.delete_all_stacks()
else:
logging.warning("Skipping deletion of IAM roles stack because --no-delete option is set")
def _create_iam_policies(iam_policy_name, region, policy_filename):
logging.info("Creating iam policy {0}...".format(iam_policy_name))
file_loader = FileSystemLoader(pkg_resources.resource_filename(__name__, "/resources"))
env = Environment(loader=file_loader, trim_blocks=True, lstrip_blocks=True)
partition = get_arn_partition(region)
account_id = (
boto3.client("sts", region_name=region, endpoint_url=get_sts_endpoint(region))
.get_caller_identity()
.get("Account")
)
parallel_cluster_instance_policy = env.get_template(policy_filename).render(
partition=partition, region=region, account_id=account_id, cluster_bucket_name="parallelcluster-*"
)
return boto3.client("iam", region_name=region).create_policy(
PolicyName=iam_policy_name, PolicyDocument=parallel_cluster_instance_policy
)["Policy"]["Arn"]
@pytest.fixture(scope="class")
def vpc_stack(vpc_stacks, region):
return vpc_stacks[region]
@pytest.fixture(scope="session")
def public_ecr_image_uri(request):
return request.config.getoption("public_ecr_image_uri")
@pytest.fixture(scope="session")
def api_uri(request):
return request.config.getoption("api_uri")
@pytest.fixture(scope="session")
def api_definition_s3_uri(request):
return request.config.getoption("api_definition_s3_uri")
@pytest.fixture(scope="session")
def api_infrastructure_s3_uri(request):
return request.config.getoption("api_infrastructure_s3_uri")
# If stack creation fails it'll retry once more. This is done to mitigate failures due to resources
# not available in randomly picked AZs.
@retry(
stop_max_attempt_number=2,
wait_fixed=5000,
retry_on_exception=lambda exception: not isinstance(exception, KeyboardInterrupt),
)
def _create_vpc_stack(request, template, region, cfn_stacks_factory):
if request.config.getoption("vpc_stack"):
logging.info("Using stack {0} in region {1}".format(request.config.getoption("vpc_stack"), region))
stack = CfnStack(name=request.config.getoption("vpc_stack"), region=region, template=template.to_json())
else:
stack = CfnStack(
name=generate_stack_name("integ-tests-vpc", request.config.getoption("stackname_suffix")),
region=region,
template=template.to_json(),
)
cfn_stacks_factory.create_stack(stack)
return stack
@pytest.fixture(scope="class")
def s3_bucket_factory(region):
"""
Define a fixture to create S3 buckets.
:param region: region where the test is running
:return: a function to create buckets.
"""
created_buckets = []
def _create_bucket():
bucket_name = "integ-tests-" + random_alphanumeric()
logging.info("Creating S3 bucket {0}".format(bucket_name))
create_s3_bucket(bucket_name, region)
created_buckets.append((bucket_name, region))
return bucket_name
yield _create_bucket
for bucket in created_buckets:
logging.info("Deleting S3 bucket {0}".format(bucket[0]))
try:
delete_s3_bucket(bucket_name=bucket[0], region=bucket[1])
except Exception as e:
logging.error("Failed deleting bucket {0} with exception: {1}".format(bucket[0], e))
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Making test result information available in fixtures"""
# add dimension properties to report
_add_properties_to_report(item)
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
if rep.when in ["setup", "call"] and rep.failed:
try:
update_failed_tests_config(item)
except Exception as e:
logging.error("Failed when generating config for failed tests: %s", e, exc_info=True)
def update_failed_tests_config(item):
out_dir = Path(item.config.getoption("output_dir"))
if not str(out_dir).endswith(".out"):
# Navigate to the parent dir in case of parallel run so that we can access the shared parent dir
out_dir = out_dir.parent
out_file = out_dir / "failed_tests_config.yaml"
logging.info("Updating failed tests config file %s", out_file)
# We need to acquire a lock first to prevent concurrent edits to this file
with FileLock(str(out_file) + ".lock"):
failed_tests = {"test-suites": {}}
if out_file.is_file():
with open(str(out_file), encoding="utf-8") as f:
failed_tests = yaml.safe_load(f)
# item.node.nodeid example:
# 'dcv/test_dcv.py::test_dcv_configuration[eu-west-1-c5.xlarge-centos7-slurm-8443-0.0.0.0/0-/shared]'
feature, test_id = item.nodeid.split("/", 1)
test_id = test_id.split("[", 1)[0]
dimensions = {}
for dimension in DIMENSIONS_MARKER_ARGS:
value = item.callspec.params.get(dimension)
if value:
dimensions[dimension + "s"] = [value]
if not dict_has_nested_key(failed_tests, ("test-suites", feature, test_id)):
dict_add_nested_key(failed_tests, [], ("test-suites", feature, test_id, "dimensions"))
if dimensions not in failed_tests["test-suites"][feature][test_id]["dimensions"]:
failed_tests["test-suites"][feature][test_id]["dimensions"].append(dimensions)
with open(out_file, "w", encoding="utf-8") as f:
yaml.dump(failed_tests, f)
@pytest.fixture()
def architecture(request, instance, region):
"""Return a string describing the architecture supported by the given instance type."""
supported_architecture = request.config.cache.get(f"{instance}/architecture", None)
if supported_architecture is None:
logging.info(f"Getting supported architecture for instance type {instance}")
supported_architecture = get_architecture_supported_by_instance_type(instance, region)
request.config.cache.set(f"{instance}/architecture", supported_architecture)
return supported_architecture
@pytest.fixture()
def network_interfaces_count(request, instance, region):
"""Return the number of network interfaces for the given instance type."""
network_interfaces_count = request.config.cache.get(f"{instance}/network_interfaces_count", None)
if network_interfaces_count is None:
logging.info(f"Getting number of network interfaces for instance type {instance}")
network_interfaces_count = get_network_interfaces_count(instance, region)
request.config.cache.set(f"{instance}/network_interfaces_count", network_interfaces_count)
return network_interfaces_count
@pytest.fixture()
def default_threads_per_core(request, instance, region):
"""Return the default threads per core for the given instance type."""
# NOTE: currently, .metal instances do not contain the DefaultThreadsPerCore
# attribute in their VCpuInfo section. This is a known limitation with the
# ec2 DescribeInstanceTypes API. For these instance types an assumption
# is made that if the instance's supported architectures list includes
# x86_64 then the default is 2, otherwise it's 1.
logging.info(f"Getting defaul threads per core for instance type {instance}")
instance_type_data = get_instance_info(instance, region)
threads_per_core = instance_type_data.get("VCpuInfo", {}).get("DefaultThreadsPerCore")
if threads_per_core is None:
supported_architectures = instance_type_data.get("ProcessorInfo", {}).get("SupportedArchitectures", [])
threads_per_core = 2 if "x86_64" in supported_architectures else 1
logging.info(f"Defaul threads per core for instance type {instance} : {threads_per_core}")
return threads_per_core
@pytest.fixture(scope="session")
def key_name(request):
"""Return the EC2 key pair name to be used."""
return request.config.getoption("key_name")
@pytest.fixture()
def pcluster_ami_without_standard_naming(region, os, architecture):
"""
Define a fixture to manage the creation and deletion of AMI without standard naming.
This AMI is used to test the validation of pcluster version in Cookbook
"""
ami_id = None
def _pcluster_ami_without_standard_naming(version):
nonlocal ami_id
ami_id = retrieve_pcluster_ami_without_standard_naming(region, os, version, architecture)
return ami_id
yield _pcluster_ami_without_standard_naming
if ami_id:
client = boto3.client("ec2", region_name=region)
client.deregister_image(ImageId=ami_id)
@pytest.fixture(scope="class")
def ami_copy(region):
"""
Define a fixture to manage the copy and deletion of AMI.
This AMI is used to test head node and compute node AMI update
"""
copy_ami_id = None
client = boto3.client("ec2", region_name=region)
def _copy_image(image_id, test_name):
nonlocal copy_ami_id
copy_ami_id = client.copy_image(
Name=f"aws-parallelcluster-copied-image-{test_name}", SourceImageId=image_id, SourceRegion=region
).get("ImageId")
# Created tag for copied image to be filtered by cleanup ami pipeline
client.create_tags(
Resources=[
f"{copy_ami_id}",
],
Tags=[
{
"Key": "parallelcluster:image_id",
"Value": f"aws-parallelcluster-copied-image-{test_name}",
},
{
"Key": "parallelcluster:build_status",
"Value": "available",
},
],
)
return copy_ami_id
yield _copy_image
if copy_ami_id:
client = boto3.client("ec2", region_name=region)
copied_image_info = client.describe_images(ImageIds=[copy_ami_id])
logging.info("Deregister copied AMI.")
client.deregister_image(ImageId=copy_ami_id)
try:
for block_device_mapping in copied_image_info.get("Images")[0].get("BlockDeviceMappings"):
if block_device_mapping.get("Ebs"):
client.delete_snapshot(SnapshotId=block_device_mapping.get("Ebs").get("SnapshotId"))
except IndexError as e:
logging.error("Delete copied AMI snapshot failed due to %s", e)
@pytest.fixture()
def mpi_variants(architecture):
variants = ["openmpi"]
if architecture == "x86_64":
variants.append("intelmpi")
return variants
|
py | 1a4e7df467fd53ff83784f5059d8548d4eed87b0 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['rainierland.com']
self.base_link = 'http://rainierland.com'
self.movie_link = '/movie/%s-%s.html'
def movie(self, imdb, title, year):
try:
url = re.sub('([^\s\-\w])+', '', title.lower()).replace(' ', '-')
url = self.movie_link % (url, year)
url = urlparse.urljoin(self.base_link, url)
url = client.request(url, output='geturl')
if url == None: raise Exception()
url = urlparse.urljoin(self.base_link, url)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'div', attrs = {'class': 'screen fluid-width-video-wrapper'})[0]
r = re.findall('src\s*=\s*"(.*?)"', r)[0]
r = urlparse.urljoin(self.base_link, r)
r = client.request(r, referer=url)
links = []
url = re.findall('src\s*=\s*"(.*?)"', r)
url = [i for i in url if 'http' in i]
for i in url:
try: links += [{'source': 'gvideo', 'url': i, 'quality': directstream.googletag(i)[0]['quality'], 'direct': True}]
except: pass
url = re.findall('(openload\.(?:io|co)/(?:embed|f)/[0-9a-zA-Z-_]+)', r)
url = ['http://' + i for i in url]
for i in url:
try: links += [{'source': 'openload.co', 'url': i, 'quality': 'HD', 'direct': False}]
except: pass
for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Rainierland', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
|
py | 1a4e7f5a3a4bcb8a124fc11af00e62aecfbb0f57 | from __future__ import annotations
import os
from prettyqt import core
from prettyqt.qt import QtCore
QtCore.QFileSelector.__bases__ = (core.Object,)
class FileSelector(QtCore.QFileSelector):
def serialize_fields(self):
return dict(extra_selectors=self.extraSelectors())
def select_path(self, path: str | os.PathLike) -> str:
return self.select(os.fspath(path))
def select_url(self, url: str | QtCore.QUrl) -> core.Url:
if isinstance(url, str):
url = QtCore.QUrl(url)
return core.Url(self.select(url))
|
py | 1a4e8090d4f30bee68d881fed340fbfc87ba4cc8 | from faker import Faker
import os
import random
import pandas as pd
CurrentDir = os.path.dirname(os.path.realpath(__file__))
def CreateFakeInformation(fake,AccountsCSV):
AccountData = pd.read_csv(os.path.join(CurrentDir,AccountsCSV),encoding='latin-1')
AccountDF = pd.DataFrame(AccountData)
DataColumns = ['Phone','Email','Bank Account Number','Federal Tax ID#','Social Security Number']
for i in AccountDF.index:
FakeInfo = [fake.phone_number(),fake.email(),random.randint(100000,9999999),random.randint(100000,9999999),random.randint(100000,9999999)]
for j in range(0,len(DataColumns)):
AccountDF[DataColumns[j]][i] = FakeInfo[j]
FakeAccountsFinalFile = "FakeInfoAccounts.csv"
while(os.path.exists(os.path.join(CurrentDir,FakeAccountsFinalFile))):
FakeAccountsFinalFile = input("%s already exists! New Name: " %FakeAccountsFinalFile) + '.csv'
AccountDF.to_csv(os.path.join(CurrentDir,FakeAccountsFinalFile), index = False)
print("Success")
AccountsCSV = input("Accounts Data file : ")
fake = Faker('en_US')
CreateFakeInformation(fake,AccountsCSV) |
py | 1a4e80fc4f69068a29d5cd3413b6b65744ec6425 |
from dlqyoutubedl import DLQYouTubeDl
from dlqmaildirstore import DLQMaildirStore
from dlqtransaction import DLQTransaction
import json
dlq_store = DLQMaildirStore('./queue')
dl = DLQYouTubeDl()
(queue_item_path, queue_item) = dlq_store.new_item()
print(f"queue_item: {queue_item}")
if queue_item:
trans = DLQTransaction()
trans["queue_item"] = queue_item
print(f"queue_item_path: {queue_item_path}")
tmp_item_path = dlq_store.lock_file(queue_item_path)
print(f"tmp_item_path: {tmp_item_path}")
if tmp_item_path:
trans["tmp_item_path"] = str(tmp_item_path)
trans["state"] = 10
trans.write()
trans["dlresult"] = dl.download(queue_item["url"])
if trans["dlresult"]["returncode"] == 0:
trans["state"] = 20
trans.write()
cur_item_path = dlq_store.file_done(tmp_item_path)
if cur_item_path:
trans["state"] = 30
trans["cur_item_path"] = str(cur_item_path)
trans.write()
else:
trans["state"] = 40
trans.write()
err_item_path = dlq_store.file_had_errors(tmp_item_path)
if err_item_path:
trans["state"] = 50
trans["err_item_path"] = str(err_item_path)
trans.write()
|
py | 1a4e81483dcecd61eb6aac9f52cba28e5a37d64d | # Home Assistant mqtt integration
# (C) Copyright Renaud Guillon 202.
# Released under the MIT licence.
import json
try:
import asyncio
except ImportError:
import uasyncio as asyncio
import ha_mqtt
# default root topic for home assistant discovery
HOME_ASSISTANT_PREFIX = "homeassistant"
class HaMqttEntity(object):
'''
Base class for Home Assistant Mqtt Entities, the implementations are expected to populate the discover_conf with
the parameters specific to the device type, and input_topics/output_topics that are dictionaries of mqtt topic/
callback.
The service on_connect will subscribe to every input topics and send the mqtt discovery message to Home assistant.
A task will be created to monitor is_updated, if true, the output_topics are published, to inform home assistant of
the new state of the entity
TODO: json payload management here is not the best idea as many kinds of devices uses other format by default
'''
def __init__(self, model, name):
self.base_topic = "{}/{}/{}".format(HOME_ASSISTANT_PREFIX, model, name)
self.discover_topic = bytes("{}/config".format(self.base_topic), 'utf-8')
self.discover_conf = {"name": name,
"" "unique_id": bytes("{}_{}".format(model, name), 'utf-8')}
self.input_topics = {}
self.output_topics = {}
self.current_state = {}
self.is_updated = False
self.mqtt_client = ha_mqtt.add_entity(self)
asyncio.get_event_loop().create_task(self.task())
async def task(self):
'''
Never ending task that will send the updated state to home assistant when needed.
'''
while True:
if self.is_updated:
self.is_updated = False
await self.update_state()
await asyncio.sleep(0.1)
async def update_state(self):
for output_topic, callback in self.output_topics.items():
await self.mqtt_client.publish(output_topic, json.dumps(callback()))
async def on_connect(self):
'''
Subscribes to every input topics and sends the mqtt discover message
'''
for input_topic in self.input_topics:
await self.mqtt_client.subscribe(input_topic)
await self.mqtt_client.publish(self.discover_topic, json.dumps(self.discover_conf))
def receive(self, topic, message):
'''
Sends the message to the callback if the topic matches
:param topic:
:param message:
'''
try:
payload = json.loads(message.decode('utf-8'))
self.input_topics[topic.decode('utf-8')](payload)
self.is_updated = True
except KeyError:
pass
|
py | 1a4e81f1dc9aae36201504c24d06a221f6e57f85 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class AvailablePrivateEndpointTypesOperations(object):
"""AvailablePrivateEndpointTypesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2020-07-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-07-01"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""Returns all of the resource types that can be linked to a Private
Endpoint in this subscription in this region.
:param location: The location of the domain name.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AvailablePrivateEndpointType
:rtype:
~azure.mgmt.network.v2020_07_01.models.AvailablePrivateEndpointTypePaged[~azure.mgmt.network.v2020_07_01.models.AvailablePrivateEndpointType]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.AvailablePrivateEndpointTypePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'}
def list_by_resource_group(
self, location, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Returns all of the resource types that can be linked to a Private
Endpoint in this subscription in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AvailablePrivateEndpointType
:rtype:
~azure.mgmt.network.v2020_07_01.models.AvailablePrivateEndpointTypePaged[~azure.mgmt.network.v2020_07_01.models.AvailablePrivateEndpointType]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.AvailablePrivateEndpointTypePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availablePrivateEndpointTypes'}
|
py | 1a4e83c64d535e438958a2a85bcc84a2aca737d3 | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class RelationalOperator(GenericTypeCode):
"""
v3.RelationalOperator
From: http://terminology.hl7.org/ValueSet/v3-RelationalOperator in v3-codesystems.xml
**** MISSING DEFINITIONS ****
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-RelationalOperator
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-RelationalOperator"
class RelationalOperatorValues:
"""
Specified set of things includes value being evaluated.
From: http://terminology.hl7.org/CodeSystem/v3-RelationalOperator in v3-codesystems.xml
"""
Contains = RelationalOperator("CT")
"""
Equal condition applied to comparisons.
From: http://terminology.hl7.org/CodeSystem/v3-RelationalOperator in v3-codesystems.xml
"""
Equal = RelationalOperator("EQ")
"""
Greater than or equal condition applied to comparisons.
From: http://terminology.hl7.org/CodeSystem/v3-RelationalOperator in v3-codesystems.xml
"""
GreaterThanOrEqual = RelationalOperator("GE")
"""
A generic comparison selects a record for inclusion in the response if the
beginning of the designated element value matches the select string.
From: http://terminology.hl7.org/CodeSystem/v3-RelationalOperator in v3-codesystems.xml
"""
Generic = RelationalOperator("GN")
"""
Greater than condition applied to comparisons.
From: http://terminology.hl7.org/CodeSystem/v3-RelationalOperator in v3-codesystems.xml
"""
GreaterThan = RelationalOperator("GT")
"""
Less than or equal condition applied to comparisons.
From: http://terminology.hl7.org/CodeSystem/v3-RelationalOperator in v3-codesystems.xml
"""
LessThanOrEqual = RelationalOperator("LE")
"""
Less than condition applied to comparisons.
From: http://terminology.hl7.org/CodeSystem/v3-RelationalOperator in v3-codesystems.xml
"""
LessThan = RelationalOperator("LT")
"""
Not equal condition applied to comparisons.
From: http://terminology.hl7.org/CodeSystem/v3-RelationalOperator in v3-codesystems.xml
"""
NotEqual = RelationalOperator("NE")
|
py | 1a4e853fffeef5dfab91e47b6a91be5e6c31ed1b | # -*- encoding: utf-8 -*-
'''
The menubar module.
'''
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ #
# #
# MenuBar #
# #
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ #
class MenuBar(object):
'''
The MenuBar class. The application's menubar.
'''
''' Initialization method. '''
def __init__(self, gtk_builder):
# File tab
self.__menu_file = gtk_builder.get_object("menubar-file")
self.__new_button = gtk_builder.get_object("menubar-new")
self.__open_button = gtk_builder.get_object("menubar-open")
self.__save_button = gtk_builder.get_object("menubar-save")
self.__save_as_button = gtk_builder.get_object("menubar-save-as")
self.__exit_button = gtk_builder.get_object("menubar-exit")
# Preferences tab
self.__menu_preferences = gtk_builder.get_object(
"menubar-preferences")
self.__menu_language = gtk_builder.get_object(
"menubar-preferences-language")
self.__spanish_language_button = gtk_builder.get_object(
"spanish-language")
self.__english_language_button = gtk_builder.get_object(
"english-language")
self.__menu_theme = gtk_builder.get_object(
"menubar-preferences-theme")
self.__theme_clear_button = gtk_builder.get_object("theme-clear")
self.__theme_dark_button = gtk_builder.get_object("theme-dark")
# Help tab
self.__menu_help = gtk_builder.get_object("menubar-help")
self.__about_button = gtk_builder.get_object("menubar-about")
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ #
# Getters & Setters #
# ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ #
def get_menu_file(self):
return self.__menu_file
def set_menu_file(self, menu_file):
self.__menu_file = menu_file
def get_new_button(self):
return self.__new_button
def set_new_button(self, new_button):
self.__new_button = new_button
def get_open_button(self):
return self.__open_button
def set_open_button(self, open_button):
self.__open_button = open_button
def get_save_button(self):
return self.__save_button
def set_save_button(self, save_button):
self.__save_button = save_button
def get_save_as_button(self):
return self.__save_as_button
def set_save_as_button(self, save_as_button):
self.__save_as_button = save_as_button
def get_exit_button(self):
return self.__exit_button
def set_exit_button(self, exit_button):
self.__exit_button = exit_button
def get_menu_preferences(self):
return self.__menu_preferences
def set_menu_preferences(self, menu_preferences):
self.__menu_preferences = menu_preferences
def get_menu_language(self):
return self.__menu_language
def set_menu_language(self, menu_language):
self.__menu_language = menu_language
def get_spanish_language_button(self):
return self.__spanish_language_button
def set_spanish_language_button(self, spanish_language_button):
self.__spanish_language_button = spanish_language_button
def get_english_language_button(self):
return self.__english_language_button
def set_english_language_button(self, english_language_button):
self.__english_language_button = english_language_button
def get_menu_theme(self):
return self.__menu_theme
def set_menu_theme(self, menu_theme):
self.__menu_theme = menu_theme
def get_theme_clear_button(self):
return self.__theme_clear_button
def set_theme_clear_button(self, theme_clear_button):
self.__theme_clear_button = theme_clear_button
def get_theme_dark_button(self):
return self.__theme_dark_button
def set_theme_dark_button(self, theme_dark_button):
self.__theme_dark_button = theme_dark_button
def get_menu_help(self):
return self.__menu_help
def set_menu_help(self, menu_help):
self.__menu_help = menu_help
def get_about_button(self):
return self.__about_button
def set_about_button(self, about_button):
self.__about_button = about_button
|
py | 1a4e8700f8a91bee5eb64bdcc0c111df51b989de |
#1. 编写一个函数:
#1) 计算所有参数的和的基数倍(默认基数为base=3)
def mysum(*number):
res = 0
for i in number:
res += i
return res
def bei(a,base=3):
r = 0
r = mysum(a) * base
return r
if __name__=="__main__":
print(bei(mysum(1,3,5)))
|
py | 1a4e88144b4567b8dcbb6da0e0cb53ad8bae94a2 | import google
def websites(query, start=0, stop=None, per_page=10):
return google.search(query, start=start, stop=stop, num=per_page)
|
py | 1a4e888fd8de9b60d86f3c468084f7fbe219c085 | import pytest
import typing as t
from uzi.providers import Resource as Provider
from ..abc import _T_NewPro, ProviderTestCase
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
_T_NewPro = _T_NewPro[Provider]
class ResourceProviderTests(ProviderTestCase[Provider]):
class ContextManager:
def __init__(self, func):
self.func = func
self.enters = 0
self.exits = 0
def __enter__(self):
assert self.enters == 0
self.enters += 1
return self.func()
def __exit__(self, *err):
assert self.enters == 1
assert self.exits == 0
self.exits += 1
# @pytest.fixture
# def provider(self, cm):
# return Provider(lambda: cm)
@pytest.fixture
def cm(self, cm_class, value_setter):
return cm_class(value_setter)
@pytest.fixture
def cm_class(self):
return self.ContextManager
# def test_exit(self, cm: ContextManager, provider: Provider, graph, injector, ctx_manager):
# bound = provider.resolve(graph, self.provides)
# with ctx_manager:
# fn = bound.resolver(injector)
# assert cm.enters == 0 == cm.exits
# assert fn() is fn() is fn() is fn()
# assert cm.enters == 1
# assert cm.exits == 0
# assert cm.enters == 1 == cm.exits
|
py | 1a4e88a87d4b66c3d21b5c8a8d03d016bfe698a1 | # Amara, universalsubtitles.org
#
# Copyright (C) 2012 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see http://www.gnu.org/licenses/agpl-3.0.html.
"""babelsubs.loader -- create subtitle sets."""
import os.path
import lxml
from babelsubs import parsers
from babelsubs import storage
from babelsubs.generators.dfxp import DFXPGenerator
from babelsubs.xmlconst import *
class SubtitleLoader(object):
"""SubtitleLoader -- Create SubtitleSets
SubtitleLoader provides a way to creating SubtitleSet objects with custom
layout/styling sections. It supports both creating new subtitles and
parsing them from other formats.
"""
def __init__(self):
self.styles = []
self.regions = []
def add_style(self, xml_id, **attrib):
"""Add a custom style to the created SubtitleSets.
Each add_style() call will create a new style element in the TTML. At
least one style must be added before creating subtitles.
:param xml_id: xml:id attribute to use
:param attribs: extra attributes to set. Each attribute name will
have the TTS namespace prefixed to it.
"""
self.styles.append((xml_id, attrib))
def add_region(self, xml_id, style_id, **attrib):
"""Add a custom region to the created SubtitleSets.
Each add_region() call will create a new region element in the TTML. At
least one region must be added before creating subtitles.
The first region added will be the default region for the body.
:param xml_id: xml:id attribute to use
:param style_id: style to use for this region
:param attribs: extra attributes to set. Each attribute name will
have the TTS namespace prefixed to it.
"""
self.regions.append((xml_id, style_id, attrib))
def _empty_ttml(self, language_code, title, description, frame_rate=None,
frame_rate_multiplier=None, drop_mode=None):
if not self.styles:
raise ValueError("no styles added")
if not self.regions:
raise ValueError("no regions added")
attrib = {}
if language_code:
attrib[XML + 'lang'] = language_code
if frame_rate:
attrib[TTP + 'frameRate'] = frame_rate
if frame_rate_multiplier:
attrib[TTP + 'frameRateMultiplier'] = frame_rate_multiplier
if drop_mode == 'dropNTSC':
attrib[TTP + 'timeBase'] = 'smpte'
attrib[TTP + 'dropMode'] = 'dropNTSC'
tt = lxml.etree.Element(TTML + 'tt', attrib=attrib, nsmap={
None: TTML_NAMESPACE_URI,
'tts': TTS_NAMESPACE_URI,
'ttm': TTM_NAMESPACE_URI,
'ttp': TTP_NAMESPACE_URI,
})
head = lxml.etree.SubElement(tt, TTML + 'head')
head.append(self._create_metadata(title, description))
head.append(self._create_styling())
head.append(self._create_layout())
tt.append(self._create_empty_body())
return tt
def _create_metadata(self, title, description):
metadata = lxml.etree.Element(TTML + 'metadata')
lxml.etree.SubElement(metadata, TTM + 'title').text = title
lxml.etree.SubElement(metadata, TTM + 'description').text = description
lxml.etree.SubElement(metadata, TTM + 'copyright')
return metadata
def _create_styling(self):
styling = lxml.etree.Element(TTML + 'styling')
for (xml_id, attrib) in self.styles:
style = lxml.etree.SubElement(styling, TTML + 'style')
style.set(XML + 'id', xml_id)
for name, value in attrib.items():
style.set(TTS + name, value)
return styling
def _create_layout(self):
layout = lxml.etree.Element(TTML + 'layout')
for (xml_id, style_id, attrib) in self.regions:
region = lxml.etree.SubElement(layout, TTML + 'region')
region.set(XML + 'id', xml_id)
region.set(TTML + 'style', style_id)
for name, value in attrib.items():
region.set(TTS + name, value)
return layout
def _create_empty_body(self):
body = lxml.etree.Element(TTML + 'body', attrib={
TTML + 'region': self.regions[0][0],
})
return body
def create_new(self, language_code, title='', description='',
frame_rate=None, frame_rate_multiplier=None, drop_mode=None):
"""Create a new SubtitleSet. """
ttml = self._empty_ttml(language_code, title, description, frame_rate,
frame_rate_multiplier, drop_mode)
# add an empty div to start the subtitles
lxml.etree.SubElement(ttml.find(TTML + 'body'), TTML + 'div')
return storage.SubtitleSet.create_with_raw_ttml(ttml)
def dfxp_merge(self, subtitle_sets):
"""Create a merged DFXP file from a list of subtitle sets."""
initial_ttml = self._empty_ttml('', '', '')
return DFXPGenerator.merge_subtitles(subtitle_sets, initial_ttml)
def load(self, language_code, path):
"""Create a SubtitleSet with existing subtitles.
If path is a DFXP file, then we will simply load it and return. If
it has any other format, we will create a DFXP template using our
styles/regions and load the subtitles into that. The reason for this
is that if we are reading DFXP we don't want to ovewrite the styles
inside the file with our own.
"""
basename, ext = os.path.splitext(path)
with open(path) as f:
content = f.read()
return self.loads(language_code, content, ext[1:].lower())
def loads(self, language_code, content, file_type):
try:
parser = parsers.discover(file_type)
except KeyError:
raise TypeError("No parser for %s" % file_type)
parsed_subs = parser.parse(content,
language=language_code).to_internal()
if parser is parsers.DFXPParser:
# return the subtitles as-is
return parsed_subs
ttml = self._empty_ttml(language_code, '', '')
self._move_elements(parsed_subs._ttml.find(TTML + 'body'),
ttml.find(TTML + 'body'))
return storage.SubtitleSet.create_with_raw_ttml(ttml)
def _remove_intial_div(self, subtitle_set):
body = subtitle_set._ttml.find(TTML + 'body')
body.remove(body[0])
def _move_elements(self, source, dest):
"""Move children from one etree element to another."""
children = list(source)
source.clear()
for child in children:
dest.append(child)
|
py | 1a4e89ba8558abd1473d5b6b0126166c9939b3e1 | import tests.periodicities.period_test as per
per.buildModel((7 , 'T' , 200));
|
py | 1a4e89d82b1d660c83279a333b6e791a57b6012c | #!/usr/bin/env python3
"""
USAGE:
yb_mass_column_update.py [options]
PURPOSE:
Update the value of multiple columns.
OPTIONS:
See the command line help message for all options.
(yb_mass_column_update.py --help)
Output:
The update statements for the requested set of columns.
"""
import sys
from yb_common import StoredProc, Util
class mass_column_update(Util):
"""Issue the ybsql command used to list the column names comprising an
object.
"""
config = {
'description': (
'Update the value of multiple columns.'
'\n'
'\nnote:'
'\n Mass column updates may cause performance issues due to the change '
'\n of how the data is ordered in storage.')
, 'optional_args_single': []
, 'optional_args_multi': ['owner', 'schema', 'table', 'column', 'datatype']
, 'usage_example': {
'cmd_line_args': "@$HOME/conn.args --datatype_like 'CHAR%' --update_where_clause \"<column> = 'NULL'\" --set_clause NULL --"
, 'file_args': [Util.conn_args_file] }
, 'db_filter_args': {'owner':'tableowner', 'schema':'schemaname', 'table':'tablename', 'column':'columnname', 'datatype':'datatype'} }
def execute(self):
self.cmd_results = StoredProc('yb_mass_column_update_p', self.db_conn).call_proc_as_anonymous_block(
args = {
'a_update_where_clause' : self.args_handler.args.update_where_clause
, 'a_set_clause' : self.args_handler.args.set_clause
, 'a_column_filter_clause' : self.db_filter_sql()
, 'a_exec_updates' : ('TRUE' if self.args_handler.args.exec_updates else 'FALSE')}
, pre_sql = self.args_handler.args.pre_sql
, post_sql = self.args_handler.args.post_sql)
def additional_args(self):
args_mass_r_grp = self.args_handler.args_parser.add_argument_group('required mass update arguments')
args_mass_r_grp.add_argument(
"--update_where_clause", required=True
, help=("update column only if this boolean clause is satisfied, like: "
"'LENGTH(<column>)<>LENGTH(RTRIM(<column>))', "
"Note: the special use of the string '<column>' ")
)
args_mass_r_grp.add_argument(
"--set_clause", required=True
, help=("Set the column to this value, Like; "
"'RTRIM(<column>)', "
"Note: the special use of the string '<column>' ")
)
args_mass_o_grp = self.args_handler.args_parser.add_argument_group('optional mass update arguments')
args_mass_o_grp.add_argument(
"--exec_updates"
, action='store_true'
, help=("defaults to False and only prints the update statements. When set "
"to True, execute the update statements.")
)
args_mass_o_grp.add_argument("--pre_sql", default=''
, help="SQL to run before the chunking DML, only runs if execute_chunk_dml is set")
args_mass_o_grp.add_argument("--post_sql", default=''
, help="SQL to run after the chunking DML, only runs if execute_chunk_dml is set")
def additional_args_process(self):
if '<column>' not in self.args_handler.args.update_where_clause:
self.args_handler.args_parser.error("UPDATE_WHERE_CLAUSE must contain the string '<column>'")
if not self.args_handler.args.exec_updates:
self.args_handler.args.pre_sql = ''
self.args_handler.args.post_sql = ''
self.args_handler.db_filter_args.schema_set_all_if_none()
def main():
mcu = mass_column_update()
sys.stdout.write('-- Running mass column update.\n')
mcu.execute()
mcu.cmd_results.write(tail='-- Completed mass column update.\n')
exit(mcu.cmd_results.exit_code)
if __name__ == "__main__":
main() |
py | 1a4e89db126d2cfd5da608d0c30391ba4f62e1bd | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
import cv2
import copy
import numpy as np
import math
import time
import traceback
import pyxlpr.ppocr.tools.infer.utility as utility
from pyxlpr.ppocr.postprocess import build_post_process
from pyxlpr.ppocr.utils.logging import get_logger
from pyxlpr.ppocr.utils.utility import get_image_file_list, check_and_read_gif
logger = get_logger()
class TextClassifier(object):
def __init__(self, args):
self.cls_image_shape = [int(v) for v in args.cls_image_shape.split(",")]
self.cls_batch_num = args.cls_batch_num
self.cls_thresh = args.cls_thresh
postprocess_params = {
'name': 'ClsPostProcess',
"label_list": args.label_list,
}
self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.input_tensor, self.output_tensors, _ = \
utility.create_predictor(args, 'cls', logger)
self.use_onnx = args.use_onnx
def resize_norm_img(self, img):
imgC, imgH, imgW = self.cls_image_shape
h = img.shape[0]
w = img.shape[1]
ratio = w / float(h)
if math.ceil(imgH * ratio) > imgW:
resized_w = imgW
else:
resized_w = int(math.ceil(imgH * ratio))
resized_image = cv2.resize(img, (resized_w, imgH))
resized_image = resized_image.astype('float32')
if self.cls_image_shape[0] == 1:
resized_image = resized_image / 255
resized_image = resized_image[np.newaxis, :]
else:
resized_image = resized_image.transpose((2, 0, 1)) / 255
resized_image -= 0.5
resized_image /= 0.5
padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
padding_im[:, :, 0:resized_w] = resized_image
return padding_im
def __call__(self, img_list):
img_list = copy.deepcopy(img_list)
img_num = len(img_list)
# Calculate the aspect ratio of all text bars
width_list = []
for img in img_list:
width_list.append(img.shape[1] / float(img.shape[0]))
# Sorting can speed up the cls process
indices = np.argsort(np.array(width_list))
cls_res = [['', 0.0]] * img_num
batch_num = self.cls_batch_num
elapse = 0
for beg_img_no in range(0, img_num, batch_num):
end_img_no = min(img_num, beg_img_no + batch_num)
norm_img_batch = []
max_wh_ratio = 0
starttime = time.time()
for ino in range(beg_img_no, end_img_no):
h, w = img_list[indices[ino]].shape[0:2]
wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio)
for ino in range(beg_img_no, end_img_no):
norm_img = self.resize_norm_img(img_list[indices[ino]])
norm_img = norm_img[np.newaxis, :]
norm_img_batch.append(norm_img)
norm_img_batch = np.concatenate(norm_img_batch)
norm_img_batch = norm_img_batch.copy()
if self.use_onnx:
input_dict = {}
input_dict[self.input_tensor.name] = norm_img_batch
outputs = self.predictor.run(self.output_tensors, input_dict)
prob_out = outputs[0]
else:
self.input_tensor.copy_from_cpu(norm_img_batch)
self.predictor.run()
prob_out = self.output_tensors[0].copy_to_cpu()
self.predictor.try_shrink_memory()
cls_result = self.postprocess_op(prob_out)
elapse += time.time() - starttime
for rno in range(len(cls_result)):
label, score = cls_result[rno]
cls_res[indices[beg_img_no + rno]] = [label, score]
if '180' in label and score > self.cls_thresh:
img_list[indices[beg_img_no + rno]] = cv2.rotate(
img_list[indices[beg_img_no + rno]], 1)
return img_list, cls_res, elapse
def main(args):
image_file_list = get_image_file_list(args.image_dir)
text_classifier = TextClassifier(args)
valid_image_file_list = []
img_list = []
for image_file in image_file_list:
img, flag = check_and_read_gif(image_file)
if not flag:
img = cv2.imread(image_file)
if img is None:
logger.info("error in loading image:{}".format(image_file))
continue
valid_image_file_list.append(image_file)
img_list.append(img)
try:
img_list, cls_res, predict_time = text_classifier(img_list)
except Exception as E:
logger.info(traceback.format_exc())
logger.info(E)
exit()
for ino in range(len(img_list)):
logger.info("Predicts of {}:{}".format(valid_image_file_list[ino],
cls_res[ino]))
if __name__ == "__main__":
main(utility.parse_args())
|
py | 1a4e8a7c609fe8449e454a83ca6b34b7cb7cb1a6 | """Implemented support for Common Workflow Language (CWL) for Toil."""
# Copyright (C) 2015 Curoverse, Inc
# Copyright (C) 2015-2021 Regents of the University of California
# Copyright (C) 2019-2020 Seven Bridges
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# For an overview of how this all works, see discussion in
# docs/architecture.rst
import argparse
import copy
import datetime
import functools
import json
import logging
import os
import stat
import sys
import tempfile
import textwrap
import urllib
import uuid
from typing import (
Any,
Dict,
Iterator,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Text,
TextIO,
Tuple,
TypeVar,
Union,
cast,
)
from urllib import parse as urlparse
import cwltool.builder
import cwltool.command_line_tool
import cwltool.errors
import cwltool.expression
import cwltool.load_tool
import cwltool.main
import cwltool.provenance
import cwltool.resolver
import cwltool.stdfsaccess
import schema_salad.ref_resolver
from cwltool.loghandler import _logger as cwllogger
from cwltool.loghandler import defaultStreamHandler
from cwltool.mutation import MutationManager
from cwltool.pathmapper import MapperEnt, PathMapper, downloadHttpFile
from cwltool.process import (
Process,
add_sizes,
compute_checksums,
fill_in_defaults,
shortname,
)
from cwltool.secrets import SecretStore
from cwltool.software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from cwltool.utils import (
CWLObjectType,
adjustDirObjs,
adjustFileObjs,
aslist,
convert_pathsep_to_unix,
get_listing,
normalizeFilesDirs,
visit_class,
)
from ruamel.yaml.comments import CommentedMap
from schema_salad import validate
from schema_salad.schema import Names
from schema_salad.sourceline import SourceLine
from toil.common import Config, Toil, addOptions
from toil.fileStores import FileID
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.job import Job
from toil.jobStores.abstractJobStore import NoSuchFileException, NoSuchJobStoreException
from toil.version import baseVersion
logger = logging.getLogger(__name__)
# Define internal jobs we should avoid submitting to batch systems and logging
CWL_INTERNAL_JOBS = (
"CWLJobWrapper",
"CWLWorkflow",
"CWLScatter",
"CWLGather",
"ResolveIndirect",
)
def cwltoil_was_removed():
"""Complain about deprecated entrypoint."""
raise RuntimeError(
'Please run with "toil-cwl-runner" instead of "cwltoil" (which has been removed).'
)
# The job object passed into CWLJob and CWLWorkflow
# is a dict mapping to tuple of (key, dict)
# the final dict is derived by evaluating each
# tuple looking up the key in the supplied dict.
#
# This is necessary because Toil jobs return a single value (a dict)
# but CWL permits steps to have multiple output parameters that may
# feed into multiple other steps. This transformation maps the key in the
# output object to the correct key of the input object.
class UnresolvedDict(dict):
"""Tag to indicate a dict contains promises that must be resolved."""
class SkipNull:
"""
Internal sentinel object.
Indicates a null value produced by each port of a skipped conditional step.
The CWL 1.2 specification calls for treating this the exactly the same as a
null value.
"""
def filter_skip_null(name: str, value: Any) -> Any:
"""
Recursively filter out SkipNull objects from 'value'.
:param name: Name of port producing this value.
Only used when we find an unhandled null from a conditional step
and we print out a warning. The name allows the user to better
localize which step/port was responsible for the unhandled null.
:param value: port output value object
"""
err_flag = [False]
value = _filter_skip_null(value, err_flag)
if err_flag[0]:
logger.warning(
"In %s, SkipNull result found and cast to None. \n"
"You had a conditional step that did not run, "
"but you did not use pickValue to handle the skipped input." % name
)
return value
def _filter_skip_null(value: Any, err_flag: List[bool]) -> Any:
"""
Private implementation for recursively filtering out SkipNull objects from 'value'.
:param value: port output value object
:param err_flag: A pass by reference boolean (passed by enclosing in a list) that
allows us to flag, at any level of recursion, that we have encountered
a SkipNull.
"""
if isinstance(value, SkipNull):
err_flag[0] = True
value = None
elif isinstance(value, list):
return [_filter_skip_null(v, err_flag) for v in value]
elif isinstance(value, dict):
return {k: _filter_skip_null(v, err_flag) for k, v in value.items()}
return value
class Conditional:
"""
Object holding conditional expression until we are ready to evaluate it.
Evaluation occurs at the moment the encloses step is ready to run.
"""
def __init__(
self,
expression: Union[str, None] = None,
outputs: Union[dict, None] = None,
requirements: List[CWLObjectType] = [],
):
"""
Instantiate a conditional expression.
:param expression: A string with the expression from the 'when' field of the step
:param outputs: The output dictionary for the step. This is needed because if the
step is skipped, all the outputs need to be populated with SkipNull
values
:param requirements: The requirements object that is needed for the context the
expression will evaluate in.
"""
self.expression = expression
self.outputs = outputs
self.requirements = requirements
def is_false(self, job: Union[dict, None]) -> bool:
"""
Determine if expression evaluates to False given completed step inputs.
:param job: job output object
:return: bool
"""
if self.expression is None:
return False
expr_is_true = cwltool.expression.do_eval(
self.expression,
{shortname(k): v for k, v in resolve_dict_w_promises(job).items()},
self.requirements,
None,
None,
{},
)
if isinstance(expr_is_true, bool):
return not expr_is_true
raise cwltool.errors.WorkflowException(
"'%s' evaluated to a non-boolean value" % self.expression
)
def skipped_outputs(self) -> dict:
"""
Generate a dict of SkipNull objects corresponding to the output structure of the step.
:return: dict
"""
outobj = {}
def sn(n):
if isinstance(n, Mapping):
return shortname(n["id"])
if isinstance(n, str):
return shortname(n)
for k in [sn(o) for o in self.outputs]:
outobj[k] = SkipNull()
return outobj
class ResolveSource:
"""Apply linkMerge and pickValue operators to values coming into a port."""
def __init__(self, name: str, input: dict, source_key: str, promises: dict):
"""
Construct a container object.
It will carry what information it can about the input sources and the
current promises, ready for evaluation when the time comes.
:param name: human readable name of step/port that this value refers to
:param input: CWL input object complete with linkMerge and pickValue fields
:param source_key: "source" or "outputSource" depending on what it is
:param promises: incident values packed as promises
"""
self.name, self.input, self.source_key = name, input, source_key
source_names = aslist(self.input[self.source_key])
# Rule is that source: [foo] is just foo unless it also has linkMerge: merge_nested
if input.get("linkMerge") or len(source_names) > 1:
self.promise_tuples = [
(shortname(s), promises[s].rv()) for s in source_names
]
else:
# KG: Cargo culting this logic and the reason given from original Toil code:
# It seems that an input source with a
# '#' in the name will be returned as a
# CommentedSeq list by the yaml parser.
s = str(source_names[0])
self.promise_tuples = (shortname(s), promises[s].rv()) # type: ignore
def resolve(self) -> Any:
"""
First apply linkMerge then pickValue if either present.
:return: dict
"""
if isinstance(self.promise_tuples, list):
result = self.link_merge([v[1][v[0]] for v in self.promise_tuples]) # type: ignore
else:
value = self.promise_tuples
result = value[1].get(value[0]) # type: ignore
result = self.pick_value(result)
result = filter_skip_null(self.name, result)
return result
def link_merge(self, values: dict) -> Union[list, dict]:
"""
Apply linkMerge operator to `values` object.
:param values: dict: result of step
"""
link_merge_type = self.input.get("linkMerge", "merge_nested")
if link_merge_type == "merge_nested":
return values
elif link_merge_type == "merge_flattened":
result = [] # type: ignore
for v in values:
if isinstance(v, MutableSequence):
result.extend(v)
else:
result.append(v)
return result
else:
raise validate.ValidationException(
"Unsupported linkMerge '%s' on %s." % (link_merge_type, self.name)
)
def pick_value(self, values: Union[List, Any]) -> Any:
"""
Apply pickValue operator to `values` object.
:param values: Intended to be a list, but other types will be returned
without modification.
:return:
"""
pick_value_type = self.input.get("pickValue")
if pick_value_type is None:
return values
if not isinstance(values, list):
logger.warning("pickValue used but input %s is not a list." % self.name)
return values
result = [v for v in values if not isinstance(v, SkipNull) and v is not None]
if pick_value_type == "first_non_null":
if len(result) < 1:
raise cwltool.errors.WorkflowException(
"%s: first_non_null operator found no non-null values" % self.name
)
else:
return result[0]
elif pick_value_type == "the_only_non_null":
if len(result) == 0:
raise cwltool.errors.WorkflowException(
"%s: the_only_non_null operator found no non-null values"
% self.name
)
elif len(result) > 1:
raise cwltool.errors.WorkflowException(
"%s: the_only_non_null operator found more than one non-null values"
% self.name
)
else:
return result[0]
elif pick_value_type == "all_non_null":
return result
else:
raise cwltool.errors.WorkflowException(
"Unsupported pickValue '%s' on %s" % (pick_value_type, self.name)
)
class StepValueFrom:
"""
A workflow step input which has a valueFrom expression attached to it.
The valueFrom expression will be evaluated to produce the actual input
object for the step.
"""
def __init__(self, expr: str, source: Any, req: List[CWLObjectType]):
"""
Instantiate an object to carry all know about this valueFrom expression.
:param expr: str: expression as a string
:param source: the source promise of this step
:param req: requirements object that is consumed by CWLtool expression evaluator
"""
self.expr = expr
self.source = source
self.context = None
self.req = req
def eval_prep(self, step_inputs: dict, file_store: AbstractFileStore):
"""
Resolve the contents of any file in a set of inputs.
The inputs must be associated with the StepValueFrom object's self.source.
Called when loadContents is specified.
:param step_inputs: Workflow step inputs.
:param file_store: A toil file store, needed to resolve toil fs:// paths.
"""
for v in step_inputs.values():
val = cast(CWLObjectType, v)
source_input = getattr(self.source, "input", {})
if isinstance(val, dict) and isinstance(source_input, dict):
if (
val.get("contents") is None
and source_input.get("loadContents") is True
):
fs_access = functools.partial(ToilFsAccess, file_store=file_store)
with fs_access("").open(cast(str, val["location"]), "rb") as f:
val["contents"] = cwltool.builder.content_limit_respected_read(
f
)
def resolve(self) -> Any:
"""
Resolve the promise in the valueFrom expression's context.
:return: object that will serve as expression context
"""
self.context = self.source.resolve()
return self.context
def do_eval(self, inputs: CWLObjectType) -> Any:
"""
Evaluate the valueFrom expression with the given input object.
:param inputs:
:return: object
"""
return cwltool.expression.do_eval(
self.expr, inputs, self.req, None, None, {}, context=self.context
)
class DefaultWithSource:
"""A workflow step input that has both a source and a default value."""
def __init__(self, default: Any, source: Any):
"""
Instantiate an object to handle a source that has a default value.
:param default: the default value
:param source: the source object
"""
self.default = default
self.source = source
def resolve(self) -> Any:
"""
Determine the final input value when the time is right.
(when the source can be resolved)
:return: dict
"""
if self.source:
result = self.source.resolve()
if result is not None:
return result
return self.default
class JustAValue:
"""A simple value masquerading as a 'resolve'-able object."""
def __init__(self, val: Any):
"""Store the value."""
self.val = val
def resolve(self) -> Any:
"""Return the value."""
return self.val
def resolve_dict_w_promises(
dict_w_promises: dict, file_store: AbstractFileStore = None
) -> dict:
"""
Resolve a dictionary of promises evaluate expressions to produce the actual values.
:param dict_w_promises: input dict for these values
:return: dictionary of actual values
"""
if isinstance(dict_w_promises, UnresolvedDict):
first_pass_results = {k: v.resolve() for k, v in dict_w_promises.items()}
else:
first_pass_results = {k: v for k, v in dict_w_promises.items()}
result = {}
for k, v in dict_w_promises.items():
if isinstance(v, StepValueFrom):
if file_store:
v.eval_prep(first_pass_results, file_store)
result[k] = v.do_eval(inputs=first_pass_results)
else:
result[k] = first_pass_results[k]
# '_:' prefixed file paths are a signal to cwltool to create folders in place
# rather than copying them, so we make them here
for entry in result:
if isinstance(result[entry], dict):
location = result[entry].get("location")
if location:
if location.startswith("_:file://"):
local_dir_path = location[len("_:file://") :]
os.makedirs(local_dir_path, exist_ok=True)
result[entry]["location"] = local_dir_path
return result
def simplify_list(maybe_list: Any) -> Any:
"""
Turn a length one list loaded by cwltool into a scalar.
Anything else is passed as-is, by reference.
"""
if isinstance(maybe_list, MutableSequence):
is_list = aslist(maybe_list)
if len(is_list) == 1:
return is_list[0]
return maybe_list
class ToilPathMapper(PathMapper):
"""
Keeps track of files in a Toil way.
Maps the symbolic identifier of a file (the Toil FileID), its local path on
the host (the value returned by readGlobalFile) and the the location of the
file inside the software container.
"""
def __init__(
self,
referenced_files: list,
basedir: str,
stagedir: str,
separateDirs: bool = True,
get_file: Union[Any, None] = None,
stage_listing: bool = False,
):
"""Initialize this ToilPathMapper."""
self.get_file = get_file
self.stage_listing = stage_listing
super(ToilPathMapper, self).__init__(
referenced_files, basedir, stagedir, separateDirs=separateDirs
)
def visit(
self,
obj: CWLObjectType,
stagedir: str,
basedir: str,
copy: bool = False,
staged: bool = False,
) -> None:
"""Iterate over a CWL object, resolving File and Directory path references."""
stagedir = cast(Optional[str], obj.get("dirname")) or stagedir
tgt = convert_pathsep_to_unix(
os.path.join(
stagedir,
cast(str, obj["basename"]),
)
)
if obj["location"] in self._pathmap:
return
if obj["class"] == "Directory":
location = cast(str, obj["location"])
if location.startswith("file://"):
resolved = schema_salad.ref_resolver.uri_file_path(location)
else:
resolved = location
self._pathmap[location] = MapperEnt(
resolved, tgt, "WritableDirectory" if copy else "Directory", staged
)
if location.startswith("file://"):
staged = False
self.visitlisting(
cast(List, obj.get("listing", [])),
tgt,
basedir,
copy=copy,
staged=staged,
)
elif obj["class"] == "File":
path = cast(str, obj["location"])
ab = cwltool.stdfsaccess.abspath(path, basedir)
if "contents" in obj and path.startswith("_:"):
self._pathmap[path] = MapperEnt(
cast(str, obj["contents"]),
tgt,
"CreateWritableFile" if copy else "CreateFile",
staged,
)
else:
with SourceLine(
obj,
"location",
validate.ValidationException,
logger.isEnabledFor(logging.DEBUG),
):
deref = self.get_file(path) if self.get_file else ab
if deref.startswith("file:"):
deref = schema_salad.ref_resolver.uri_file_path(deref)
if urllib.parse.urlsplit(deref).scheme in ["http", "https"]:
deref = downloadHttpFile(path)
elif urllib.parse.urlsplit(deref).scheme != "toilfs":
# Dereference symbolic links
st = os.lstat(deref)
while stat.S_ISLNK(st.st_mode):
rl = os.readlink(deref)
deref = (
rl
if os.path.isabs(rl)
else os.path.join(os.path.dirname(deref), rl)
)
st = os.lstat(deref)
self._pathmap[path] = MapperEnt(
deref, tgt, "WritableFile" if copy else "File", staged
)
self.visitlisting(
cast(List[CWLObjectType], obj.get("secondaryFiles", [])),
stagedir,
basedir,
copy=copy,
staged=staged,
)
class ToilCommandLineTool(cwltool.command_line_tool.CommandLineTool):
"""Subclass the cwltool command line tool to provide the custom Toil.PathMapper."""
def make_path_mapper(
self,
reffiles: List[Any],
stagedir: str,
runtimeContext: cwltool.context.RuntimeContext,
separateDirs: bool,
) -> cwltool.pathmapper.PathMapper:
"""Create the appropriate ToilPathMapper for the situation."""
return ToilPathMapper(
reffiles,
runtimeContext.basedir,
stagedir,
separateDirs,
runtimeContext.toil_get_file, # type: ignore
)
def toil_make_tool(
toolpath_object: CommentedMap,
loadingContext: cwltool.context.LoadingContext,
) -> Process:
"""
Emit custom ToilCommandLineTools.
This factory funciton is meant to be passed to cwltool.load_tool().
"""
if (
isinstance(toolpath_object, Mapping)
and toolpath_object.get("class") == "CommandLineTool"
):
return ToilCommandLineTool(toolpath_object, loadingContext)
return cwltool.workflow.default_make_tool(toolpath_object, loadingContext)
class ToilFsAccess(cwltool.stdfsaccess.StdFsAccess):
"""Custom filesystem access class which handles toil filestore references."""
def __init__(self, basedir: str, file_store: AbstractFileStore = None):
"""Create a FsAccess object for the given Toil Filestore and basedir."""
self.file_store = file_store
super(ToilFsAccess, self).__init__(basedir)
def exists(self, path: str) -> bool:
"""Test for file existance."""
# toil's _abs() throws errors when files are not found and cwltool's _abs() does not
try:
return os.path.exists(self._abs(path))
except NoSuchFileException:
return False
def realpath(self, path: str) -> str:
if path.startswith("toilfs:"):
# import the file and make it available locally if it exists
path = self._abs(path)
elif path.startswith("_:"):
return path
return os.path.realpath(path)
def listdir(self, fn: str) -> List[str]:
directory = self._abs(fn)
if fn.startswith("_:file://"):
directory = fn[len("_:file://") :]
if os.path.isdir(directory):
return [
cwltool.stdfsaccess.abspath(urllib.parse.quote(entry), fn)
for entry in os.listdir(directory)
]
else:
return []
else:
return [
cwltool.stdfsaccess.abspath(urllib.parse.quote(entry), fn)
for entry in os.listdir(self._abs(directory))
]
def _abs(self, path: str) -> str:
"""
Return a local absolute path for a file (no schema).
Overwrites cwltool.stdfsaccess.StdFsAccess._abs() to account for toil specific schema.
"""
# Used to fetch a path to determine if a file exists in the inherited
# cwltool.stdfsaccess.StdFsAccess, (among other things) so this should
# not error on missing files.
# See: https://github.com/common-workflow-language/cwltool/blob/beab66d649dd3ee82a013322a5e830875e8556ba/cwltool/stdfsaccess.py#L43 # noqa B950
if path.startswith("toilfs:"):
logger.debug("Need to download file to get a local absolute path.")
destination = self.file_store.readGlobalFile(FileID.unpack(path[7:]))
logger.debug("Downloaded %s to %s", path, destination)
if not os.path.exists(destination):
raise RuntimeError(
f"{destination} does not exist after filestore import."
)
elif path.startswith("_:file://"):
destination = path
else:
destination = super(ToilFsAccess, self)._abs(path)
return destination
def toil_get_file(
file_store: AbstractFileStore, index: dict, existing: dict, file_store_id: str
) -> str:
"""Get path to input file from Toil jobstore."""
if not file_store_id.startswith("toilfs:"):
return file_store.jobStore.getPublicUrl(
file_store.jobStore.importFile(file_store_id)
)
src_path = file_store.readGlobalFile(FileID.unpack(file_store_id[7:]))
index[src_path] = file_store_id
existing[file_store_id] = src_path
return schema_salad.ref_resolver.file_uri(src_path)
def write_file(writeFunc: Any, index: dict, existing: dict, file_uri: str) -> str:
"""
Write a file into the Toil jobstore.
'existing' is a set of files retrieved as inputs from toil_get_file. This
ensures they are mapped back as the same name if passed through.
Returns a toil uri path to the object.
"""
# Toil fileStore reference
if file_uri.startswith("toilfs:"):
return file_uri
# File literal outputs with no path, we don't write these and will fail
# with unsupportedRequirement when retrieving later with getFile
elif file_uri.startswith("_:"):
return file_uri
else:
file_uri = existing.get(file_uri, file_uri)
if file_uri not in index:
if not urlparse.urlparse(file_uri).scheme:
rp = os.path.realpath(file_uri)
else:
rp = file_uri
try:
index[file_uri] = "toilfs:" + writeFunc(rp).pack()
existing[index[file_uri]] = file_uri
except Exception as e:
logger.error("Got exception '%s' while copying '%s'", e, file_uri)
raise
return index[file_uri]
def prepareDirectoryForUpload(
directory_metadata: dict, skip_broken: bool = False
) -> None:
"""
Prepare a Directory object to be uploaded.
Assumes listings are already filled in.
Makes sure the directory actually exists, and rewrites its location to be
something we can use on another machine.
Since Files and sub-Directories are already tracked by the directory's
listing, we just need some sentinel path to represent the existence of a
directory coming from Toil and not the local filesystem.
"""
if directory_metadata["location"].startswith("toilfs:") or directory_metadata[
"location"
].startswith("_:"):
# Already in Toil; nothing to do
return
if not directory_metadata["location"] and directory_metadata["path"]:
directory_metadata["location"] = schema_salad.ref_resolver.file_uri(
directory_metadata["path"]
)
if directory_metadata["location"].startswith("file://") and not os.path.isdir(
directory_metadata["location"][7:]
):
if skip_broken:
return
else:
raise cwltool.errors.WorkflowException(
"Directory is missing: %s" % directory_metadata["location"]
)
# The metadata for a directory is all we need to keep around for it. It
# doesn't have a real location. But each directory needs a unique location
# or cwltool won't ship the metadata along. cwltool takes "_:" as a signal
# to make directories instead of copying from somewhere. So we give every
# directory a unique _: location and cwltool's machinery Just Works.
directory_metadata["location"] = "_:" + directory_metadata["location"]
logger.debug("Sending directory at %s", directory_metadata["location"])
def uploadFile(
uploadfunc: Any,
fileindex: dict,
existing: dict,
file_metadata: dict,
skip_broken: bool = False,
) -> None:
"""
Update a file object so that the location is a reference to the toil file store.
Write the file object to the file store if necessary.
"""
if file_metadata["location"].startswith("toilfs:") or file_metadata[
"location"
].startswith("_:"):
return
if file_metadata["location"] in fileindex:
file_metadata["location"] = fileindex[file_metadata["location"]]
return
if not file_metadata["location"] and file_metadata["path"]:
file_metadata["location"] = schema_salad.ref_resolver.file_uri(
file_metadata["path"]
)
if file_metadata["location"].startswith("file://") and not os.path.isfile(
file_metadata["location"][7:]
):
if skip_broken:
return
else:
raise cwltool.errors.WorkflowException(
"File is missing: %s" % file_metadata["location"]
)
file_metadata["location"] = write_file(
uploadfunc, fileindex, existing, file_metadata["location"]
)
logger.debug("Sending file at: %s", file_metadata["location"])
def writeGlobalFileWrapper(file_store: AbstractFileStore, fileuri: str) -> str:
"""Wrap writeGlobalFile to accept file:// URIs."""
fileuri = fileuri if ":/" in fileuri else f"file://{fileuri}"
return file_store.writeGlobalFile(schema_salad.ref_resolver.uri_file_path(fileuri))
def remove_empty_listings(rec: CWLObjectType) -> None:
if rec.get("class") != "Directory":
finddirs = [] # type: List[CWLObjectType]
visit_class(rec, ("Directory",), finddirs.append)
for f in finddirs:
remove_empty_listings(f)
return
if "listing" in rec and rec["listing"] == []:
del rec["listing"]
return
class ResolveIndirect(Job):
"""
Helper Job.
Accepts an unresolved dict (containing promises) and produces a dictionary
of actual values.
"""
def __init__(self, cwljob: dict):
"""Store the dictionary of promises for later resolution."""
super(ResolveIndirect, self).__init__(cores=1, memory=1024^2, disk=0)
self.cwljob = cwljob
def run(self, file_store: AbstractFileStore) -> dict:
"""Evaluate the promises and return their values."""
return resolve_dict_w_promises(self.cwljob)
def toilStageFiles(
file_store: AbstractFileStore,
cwljob: Union[Dict[Text, Any], List[Dict[Text, Any]]],
outdir: str,
destBucket: Union[str, None] = None,
) -> None:
"""Copy input files out of the global file store and update location and path."""
def _collectDirEntries(
obj: Union[Dict[Text, Any], List[Dict[Text, Any]]]
) -> Iterator[Dict[Text, Any]]:
if isinstance(obj, dict):
if obj.get("class") in ("File", "Directory"):
yield obj
for dir_entry in _collectDirEntries(obj.get("secondaryFiles", [])):
yield dir_entry
else:
for sub_obj in obj.values():
for dir_entry in _collectDirEntries(sub_obj):
yield dir_entry
elif isinstance(obj, list):
for sub_obj in obj:
for dir_entry in _collectDirEntries(sub_obj):
yield dir_entry
jobfiles = list(_collectDirEntries(cwljob))
pm = ToilPathMapper(jobfiles, "", outdir, separateDirs=False, stage_listing=True)
for _, p in pm.items():
if p.staged:
if destBucket and p.type in ["File", "CreateFile"]:
# Directories don't need to be created if we're exporting to a bucket
baseName = p.target[len(outdir) :]
local_file_path = p.resolved[len("file://") :]
if (
p.type == "CreateFile"
): # TODO: CreateFile for buckets is not under testing
local_file_path = os.path.join(
file_store.getLocalTempDir(), baseName
)
with open(local_file_path, "wb") as n:
n.write(p.resolved.encode("utf-8"))
destUrl = "/".join(s.strip("/") for s in [destBucket, baseName])
file_store.exportFile(FileID.unpack(local_file_path), destUrl)
else:
if not os.path.exists(p.target) and p.type == "Directory":
os.makedirs(p.target)
if not os.path.exists(p.target) and p.type == "File":
os.makedirs(os.path.dirname(p.target), exist_ok=True)
file_store.exportFile(
FileID.unpack(p.resolved[7:]), "file://" + p.target
)
if not os.path.exists(p.target) and p.type == "CreateFile":
os.makedirs(os.path.dirname(p.target), exist_ok=True)
with open(p.target, "wb") as n:
n.write(p.resolved.encode("utf-8"))
def _check_adjust(f: dict) -> dict:
f["location"] = schema_salad.ref_resolver.file_uri(pm.mapper(f["location"])[1])
if "contents" in f:
del f["contents"]
return f
visit_class(cwljob, ("File", "Directory"), _check_adjust)
class CWLJobWrapper(Job):
"""
Wrap a CWL job that uses dynamic resources requirement.
When executed, this creates a new child job which has the correct resource
requirement set.
"""
def __init__(
self,
tool: ToilCommandLineTool,
cwljob: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None] = None,
):
"""Store our context for later evaluation."""
super(CWLJobWrapper, self).__init__(cores=1, memory=1024 * 1024, disk=8 * 1024)
self.cwltool = remove_pickle_problems(tool)
self.cwljob = cwljob
self.runtime_context = runtime_context
self.conditional = conditional
def run(self, file_store: AbstractFileStore) -> Any:
"""Create a child job with the correct resource requirements set."""
cwljob = resolve_dict_w_promises(self.cwljob, file_store)
fill_in_defaults(
self.cwltool.tool["inputs"],
cwljob,
self.runtime_context.make_fs_access(self.runtime_context.basedir or ""),
)
realjob = CWLJob(
tool=self.cwltool,
cwljob=cwljob,
runtime_context=self.runtime_context,
conditional=self.conditional,
)
self.addChild(realjob)
return realjob.rv()
class CWLJob(Job):
"""Execute a CWL tool using cwltool.executors.SingleJobExecutor."""
def __init__(
self,
tool: ToilCommandLineTool,
cwljob: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None] = None,
):
"""Store the context for later execution."""
self.cwltool = remove_pickle_problems(tool)
self.conditional = conditional or Conditional()
if runtime_context.builder:
self.builder = runtime_context.builder
else:
self.builder = cwltool.builder.Builder(
job=cwljob,
files=[],
bindings=[],
schemaDefs={},
names=Names(),
requirements=self.cwltool.requirements,
hints=[],
resources={},
mutation_manager=None,
formatgraph=None,
make_fs_access=runtime_context.make_fs_access, # type: ignore
fs_access=runtime_context.make_fs_access(""),
job_script_provider=None,
timeout=runtime_context.eval_timeout,
debug=False,
js_console=False,
force_docker_pull=False,
loadListing=determine_load_listing(tool),
outdir="",
tmpdir="/tmp", # TODO: use actual defaults here
stagedir="/var/lib/cwl", # TODO: use actual defaults here
cwlVersion=cast(str, self.cwltool.metadata["cwlVersion"]),
)
req = tool.evalResources(self.builder, runtime_context)
# pass the default of None if basecommand is empty
unitName = self.cwltool.tool.get("baseCommand", None)
if isinstance(unitName, (MutableSequence, tuple)):
unitName = " ".join(unitName)
try:
displayName = str(self.cwltool.tool["id"])
except KeyError:
displayName = None
super(CWLJob, self).__init__(
cores=req["cores"],
memory=int(req["ram"] * (2 ** 20)),
disk=int(
(cast(int, req["tmpdirSize"]) * (2 ** 20))
+ (cast(int, req["outdirSize"]) * (2 ** 20))
),
unitName=unitName,
displayName=displayName,
)
self.cwljob = cwljob
try:
self.jobName = str(self.cwltool.tool["id"])
except KeyError:
# fall back to the Toil defined class name if the tool doesn't have
# an identifier
pass
self.runtime_context = runtime_context
self.step_inputs = self.cwltool.tool["inputs"]
self.workdir = runtime_context.workdir # type: ignore
def required_env_vars(self, cwljob: Any) -> Iterator[Tuple[str, str]]:
"""Yield environment variables from EnvVarRequirement."""
if isinstance(cwljob, dict):
if cwljob.get("class") == "EnvVarRequirement":
for t in cwljob.get("envDef", {}):
yield t["envName"], cast(str, self.builder.do_eval(t["envValue"]))
for v in cwljob.values():
for env_name, env_value in self.required_env_vars(v):
yield env_name, env_value
if isinstance(cwljob, list):
for env_var in cwljob:
for env_name, env_value in self.required_env_vars(env_var):
yield env_name, env_value
def populate_env_vars(self, cwljob: dict) -> dict:
"""
Prepare environment variables necessary at runtime for the job.
Env vars specified in the CWL "requirements" section should already be
loaded in self.cwltool.requirements, however those specified with
"EnvVarRequirement" take precedence and are only populated here. Therefore,
this not only returns a dictionary with all evaluated "EnvVarRequirement"
env vars, but checks self.cwltool.requirements for any env vars with the
same name and replaces their value with that found in the
"EnvVarRequirement" env var if it exists.
"""
self.builder.job = cwljob
required_env_vars = {}
# iterate over EnvVarRequirement env vars, if any
for k, v in self.required_env_vars(cwljob):
required_env_vars[
k
] = v # will tell cwltool which env vars to take from the environment
os.environ[k] = v
# needs to actually be populated in the environment as well or
# they're not used
# EnvVarRequirement env vars take priority over those specified with
# "requirements" so cwltool.requirements need to be overwritten if an
# env var with the same name is found
for req in self.cwltool.requirements:
for env_def in cast(Dict, req.get("envDef", {})):
env_name = env_def.get("envName", "")
if env_name in required_env_vars:
env_def["envValue"] = required_env_vars[env_name]
return required_env_vars
def run(self, file_store: AbstractFileStore) -> Any:
"""Execute the CWL document."""
# Adjust cwltool's logging to conform to Toil's settings.
# We need to make sure this happens in every worker process before we
# do CWL things.
cwllogger.removeHandler(defaultStreamHandler)
cwllogger.setLevel(logger.getEffectiveLevel())
cwljob = resolve_dict_w_promises(self.cwljob, file_store)
if self.conditional.is_false(cwljob):
return self.conditional.skipped_outputs()
fill_in_defaults(
self.step_inputs, cwljob, self.runtime_context.make_fs_access("")
)
required_env_vars = self.populate_env_vars(cwljob)
immobile_cwljob_dict = copy.deepcopy(cwljob)
for inp_id in immobile_cwljob_dict.keys():
found = False
for field in cast(
List[Dict[str, str]], self.cwltool.inputs_record_schema["fields"]
):
if field["name"] == inp_id:
found = True
if not found:
cwljob.pop(inp_id)
adjustDirObjs(
cwljob,
functools.partial(remove_empty_listings),
)
# Exports temporary directory for batch systems that reset TMPDIR
os.environ["TMPDIR"] = os.path.realpath(file_store.getLocalTempDir())
outdir = os.path.join(file_store.getLocalTempDir(), "out")
os.mkdir(outdir)
# Just keep the temporary output prefix under the job's local temp dir,
# next to the outdir.
#
# If we maintain our own system of nested temp directories, we won't
# know when all the jobs using a higher-level directory are ready for
# it to be deleted. The local temp dir, under Toil's workDir, will be
# cleaned up by Toil.
tmp_outdir_prefix = os.path.join(file_store.getLocalTempDir(), "tmp-out")
index = {} # type: ignore
existing = {} # type: ignore
# Prepare the run instructions for cwltool
runtime_context = self.runtime_context.copy()
runtime_context.basedir = os.getcwd()
runtime_context.outdir = outdir
runtime_context.tmp_outdir_prefix = tmp_outdir_prefix
runtime_context.tmpdir_prefix = file_store.getLocalTempDir()
runtime_context.make_fs_access = functools.partial(
ToilFsAccess, file_store=file_store
)
runtime_context.preserve_environment = required_env_vars
runtime_context.toil_get_file = functools.partial( # type: ignore
toil_get_file, file_store, index, existing
)
# TODO: Pass in a real builder here so that cwltool's builder is built with Toil's fs_access?
# see: https://github.com/common-workflow-language/cwltool/blob/78fe9d41ee5a44f8725dfbd7028e4a5ee42949cf/cwltool/builder.py#L474
# self.builder.outdir = outdir
# runtime_context.builder = self.builder
process_uuid = uuid.uuid4() # noqa F841
started_at = datetime.datetime.now() # noqa F841
logger.debug("Running CWL job: %s", cwljob)
output, status = cwltool.executors.SingleJobExecutor().execute(
process=self.cwltool,
job_order_object=cwljob,
runtime_context=runtime_context,
logger=cwllogger,
)
ended_at = datetime.datetime.now() # noqa F841
if status != "success":
raise cwltool.errors.WorkflowException(status)
adjustDirObjs(
output,
functools.partial(
get_listing, cwltool.stdfsaccess.StdFsAccess(outdir), recursive=True
),
)
adjustDirObjs(output, prepareDirectoryForUpload)
# write the outputs into the jobstore
adjustFileObjs(
output,
functools.partial(
uploadFile,
functools.partial(writeGlobalFileWrapper, file_store),
index,
existing,
),
)
# metadata[process_uuid] = {
# 'started_at': started_at,
# 'ended_at': ended_at,
# 'job_order': cwljob,
# 'outputs': output,
# 'internal_name': self.jobName
# }
return output
def makeJob(
tool: Process,
jobobj: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None],
) -> tuple:
"""
Create the correct Toil Job object for the CWL tool.
Types: workflow, job, or job wrapper for dynamic resource requirements.
:return: "wfjob, followOn" if the input tool is a workflow, and "job, job" otherwise
"""
if tool.tool["class"] == "Workflow":
wfjob = CWLWorkflow(
cast(cwltool.workflow.Workflow, tool),
jobobj,
runtime_context,
conditional=conditional,
)
followOn = ResolveIndirect(wfjob.rv())
wfjob.addFollowOn(followOn)
return wfjob, followOn
else:
resourceReq, _ = tool.get_requirement("ResourceRequirement")
if resourceReq:
for req in (
"coresMin",
"coresMax",
"ramMin",
"ramMax",
"tmpdirMin",
"tmpdirMax",
"outdirMin",
"outdirMax",
):
r = resourceReq.get(req)
if isinstance(r, str) and ("$(" in r or "${" in r):
# Found a dynamic resource requirement so use a job wrapper
job = CWLJobWrapper(
cast(ToilCommandLineTool, tool),
jobobj,
runtime_context,
conditional=conditional,
)
return job, job
job = CWLJob(tool, jobobj, runtime_context, conditional=conditional) # type: ignore
return job, job
class CWLScatter(Job):
"""
Implement workflow scatter step.
When run, this creates a child job for each parameterization of the scatter.
"""
def __init__(
self,
step: cwltool.workflow.WorkflowStep,
cwljob: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None],
):
"""Store our context for later execution."""
super(CWLScatter, self).__init__(cores=1, memory=100*1024^2, disk=0)
self.step = step
self.cwljob = cwljob
self.runtime_context = runtime_context
self.conditional = conditional
def flat_crossproduct_scatter(
self, joborder: dict, scatter_keys: list, outputs: list, postScatterEval: Any
) -> None:
"""Cartesian product of the inputs, then flattened."""
scatter_key = shortname(scatter_keys[0])
for n in range(0, len(joborder[scatter_key])):
updated_joborder = copy.copy(joborder)
updated_joborder[scatter_key] = joborder[scatter_key][n]
if len(scatter_keys) == 1:
updated_joborder = postScatterEval(updated_joborder)
subjob, followOn = makeJob(
tool=self.step.embedded_tool,
jobobj=updated_joborder,
runtime_context=self.runtime_context,
conditional=self.conditional,
)
self.addChild(subjob)
outputs.append(followOn.rv())
else:
self.flat_crossproduct_scatter(
updated_joborder, scatter_keys[1:], outputs, postScatterEval
)
def nested_crossproduct_scatter(
self, joborder: dict, scatter_keys: list, postScatterEval: Any
) -> list:
"""Cartesian product of the inputs."""
scatter_key = shortname(scatter_keys[0])
outputs = []
for n in range(0, len(joborder[scatter_key])):
updated_joborder = copy.copy(joborder)
updated_joborder[scatter_key] = joborder[scatter_key][n]
if len(scatter_keys) == 1:
updated_joborder = postScatterEval(updated_joborder)
subjob, followOn = makeJob(
tool=self.step.embedded_tool,
jobobj=updated_joborder,
runtime_context=self.runtime_context,
conditional=self.conditional,
)
self.addChild(subjob)
outputs.append(followOn.rv())
else:
outputs.append(
self.nested_crossproduct_scatter(
updated_joborder, scatter_keys[1:], postScatterEval
)
)
return outputs
def run(self, file_store: AbstractFileStore) -> list:
"""Generate the follow on scatter jobs."""
cwljob = resolve_dict_w_promises(self.cwljob, file_store)
if isinstance(self.step.tool["scatter"], str):
scatter = [self.step.tool["scatter"]]
else:
scatter = self.step.tool["scatter"]
scatterMethod = self.step.tool.get("scatterMethod", None)
if len(scatter) == 1:
scatterMethod = "dotproduct"
outputs = []
valueFrom = {
shortname(i["id"]): i["valueFrom"]
for i in self.step.tool["inputs"]
if "valueFrom" in i
}
def postScatterEval(job_dict: dict) -> Any:
shortio = {shortname(k): v for k, v in job_dict.items()}
for k in valueFrom:
job_dict.setdefault(k, None)
def valueFromFunc(k: str, v: Any) -> Any:
if k in valueFrom:
return cwltool.expression.do_eval(
valueFrom[k],
shortio,
self.step.requirements,
None,
None,
{},
context=v,
)
else:
return v
return {k: valueFromFunc(k, v) for k, v in list(job_dict.items())}
if scatterMethod == "dotproduct":
for i in range(0, len(cwljob[shortname(scatter[0])])):
copyjob = copy.copy(cwljob)
for sc in [shortname(x) for x in scatter]:
copyjob[sc] = cwljob[sc][i]
copyjob = postScatterEval(copyjob)
subjob, follow_on = makeJob(
tool=self.step.embedded_tool,
jobobj=copyjob,
runtime_context=self.runtime_context,
conditional=self.conditional,
)
self.addChild(subjob)
outputs.append(follow_on.rv())
elif scatterMethod == "nested_crossproduct":
outputs = self.nested_crossproduct_scatter(cwljob, scatter, postScatterEval)
elif scatterMethod == "flat_crossproduct":
self.flat_crossproduct_scatter(cwljob, scatter, outputs, postScatterEval)
else:
if scatterMethod:
raise validate.ValidationException(
"Unsupported complex scatter type '%s'" % scatterMethod
)
else:
raise validate.ValidationException(
"Must provide scatterMethod to scatter over multiple" " inputs."
)
return outputs
class CWLGather(Job):
"""
Follows on to a scatter Job.
This gathers the outputs of each job in the scatter into an array for each
output parameter.
"""
def __init__(
self,
step: cwltool.workflow.WorkflowStep,
outputs: Union[Mapping, MutableSequence],
):
"""Collect our context for later gathering."""
super(CWLGather, self).__init__(cores=1, memory=10*1024^2, disk=0)
self.step = step
self.outputs = outputs
@staticmethod
def extract(obj: Union[Mapping, MutableSequence], k: str) -> list:
"""
Extract the given key from the obj.
If the object is a list, extract it from all members of the list.
"""
if isinstance(obj, Mapping):
return obj.get(k)
elif isinstance(obj, MutableSequence):
cp = []
for item in obj:
cp.append(CWLGather.extract(item, k))
return cp
else:
return []
def run(self, file_store: AbstractFileStore) -> Dict[str, Any]:
"""Gather all the outputs of the scatter."""
outobj = {}
def sn(n):
if isinstance(n, Mapping):
return shortname(n["id"])
if isinstance(n, str):
return shortname(n)
for k in [sn(i) for i in self.step.tool["out"]]:
outobj[k] = self.extract(self.outputs, k)
return outobj
class SelfJob(Job):
"""Fake job object to facilitate implementation of CWLWorkflow.run()."""
def __init__(self, j: "CWLWorkflow", v: dict):
"""Record the workflow and dictionary."""
super(SelfJob, self).__init__(cores=1, memory=1024^2, disk=0)
self.j = j
self.v = v
def rv(self, *path) -> Any:
"""Return our properties dictionary."""
return self.v
def addChild(self, c: str) -> Any:
"""Add a child to our workflow."""
return self.j.addChild(c)
def hasChild(self, c: str) -> Any:
"""Check if the given child is in our workflow."""
return self.j.hasChild(c)
ProcessType = TypeVar(
"ProcessType",
ToilCommandLineTool,
cwltool.workflow.WorkflowStep,
cwltool.workflow.Workflow,
cwltool.command_line_tool.CommandLineTool,
cwltool.command_line_tool.ExpressionTool,
Process,
)
def remove_pickle_problems(obj: ProcessType) -> ProcessType:
"""Doc_loader does not pickle correctly, causing Toil errors, remove from objects."""
if hasattr(obj, "doc_loader"):
obj.doc_loader = None
if isinstance(obj, cwltool.workflow.WorkflowStep):
obj.embedded_tool = remove_pickle_problems(obj.embedded_tool)
elif isinstance(obj, cwltool.workflow.Workflow):
obj.steps = [remove_pickle_problems(s) for s in obj.steps]
return obj
class CWLWorkflow(Job):
"""
Toil Job to convert a CWL workflow graph into a Toil job graph.
The Toil job graph will include the appropriate dependencies.
"""
def __init__(
self,
cwlwf: cwltool.workflow.Workflow,
cwljob: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None] = None,
):
"""Gather our context for later execution."""
super(CWLWorkflow, self).__init__(cores=1, memory=100*1024^2, disk=0)
self.cwlwf = cwlwf
self.cwljob = cwljob
self.runtime_context = runtime_context
self.cwlwf = remove_pickle_problems(self.cwlwf)
self.conditional = conditional or Conditional()
def run(self, file_store: AbstractFileStore):
"""Convert a CWL Workflow graph into a Toil job graph."""
cwljob = resolve_dict_w_promises(self.cwljob, file_store)
if self.conditional.is_false(cwljob):
return self.conditional.skipped_outputs()
# `promises` dict
# from: each parameter (workflow input or step output)
# that may be used as a "source" for a step input workflow output
# parameter
# to: the job that will produce that value.
promises: Dict[str, Job] = {}
# `jobs` dict from step id to job that implements that step.
jobs = {}
for inp in self.cwlwf.tool["inputs"]:
promises[inp["id"]] = SelfJob(self, cwljob)
all_outputs_fulfilled = False
while not all_outputs_fulfilled:
# Iteratively go over the workflow steps, scheduling jobs as their
# dependencies can be fulfilled by upstream workflow inputs or
# step outputs. Loop exits when the workflow outputs
# are satisfied.
all_outputs_fulfilled = True
for step in self.cwlwf.steps:
if step.tool["id"] not in jobs:
stepinputs_fufilled = True
for inp in step.tool["inputs"]:
for s in aslist(inp.get("source", [])):
if s not in promises:
stepinputs_fufilled = False
if stepinputs_fufilled:
jobobj = {}
for inp in step.tool["inputs"]:
key = shortname(inp["id"])
if "source" in inp:
jobobj[key] = ResolveSource(
name=f'{step.tool["id"]}/{key}',
input=inp,
source_key="source",
promises=promises,
)
if "default" in inp:
jobobj[key] = DefaultWithSource( # type: ignore
copy.copy(inp["default"]), jobobj.get(key)
)
if "valueFrom" in inp and "scatter" not in step.tool:
jobobj[key] = StepValueFrom( # type: ignore
inp["valueFrom"],
jobobj.get(key, JustAValue(None)),
self.cwlwf.requirements,
)
conditional = Conditional(
expression=step.tool.get("when"),
outputs=step.tool["out"],
requirements=self.cwlwf.requirements,
)
if "scatter" in step.tool:
wfjob = CWLScatter(
step,
UnresolvedDict(jobobj),
self.runtime_context,
conditional=conditional,
)
followOn = CWLGather(step, wfjob.rv())
wfjob.addFollowOn(followOn)
else:
wfjob, followOn = makeJob(
tool=step.embedded_tool,
jobobj=UnresolvedDict(jobobj),
runtime_context=self.runtime_context,
conditional=conditional,
)
jobs[step.tool["id"]] = followOn
connected = False
for inp in step.tool["inputs"]:
for s in aslist(inp.get("source", [])):
if (
isinstance(promises[s], (CWLJobWrapper, CWLGather))
and not promises[s].hasFollowOn(wfjob)
# promises[s] job has already added wfjob as a followOn prior
and not wfjob.hasPredecessor(promises[s])
):
promises[s].addFollowOn(wfjob)
connected = True
if not isinstance(
promises[s], (CWLJobWrapper, CWLGather)
) and not promises[s].hasChild(wfjob):
promises[s].addChild(wfjob)
connected = True
if not connected:
# Workflow step is default inputs only & isn't connected
# to other jobs, so add it as child of this workflow.
self.addChild(wfjob)
for out in step.tool["outputs"]:
promises[out["id"]] = followOn
for inp in step.tool["inputs"]:
for source in aslist(inp.get("source", [])):
if source not in promises:
all_outputs_fulfilled = False
# may need a test
for out in self.cwlwf.tool["outputs"]:
if "source" in out:
if out["source"] not in promises:
all_outputs_fulfilled = False
outobj = {}
for out in self.cwlwf.tool["outputs"]:
key = shortname(out["id"])
outobj[key] = ResolveSource(
name="Workflow output '%s'" % key,
input=out,
source_key="outputSource",
promises=promises,
)
return UnresolvedDict(outobj)
def visitSteps(
cmdline_tool: Process,
op: Any,
) -> None:
"""Iterate over a CWL Process object, running the op on each WorkflowStep."""
if isinstance(cmdline_tool, cwltool.workflow.Workflow):
for step in cmdline_tool.steps:
op(step.tool)
visitSteps(step.embedded_tool, op)
def rm_unprocessed_secondary_files(job_params: Any) -> None:
if isinstance(job_params, list):
for j in job_params:
rm_unprocessed_secondary_files(j)
if isinstance(job_params, dict) and "secondaryFiles" in job_params:
job_params["secondaryFiles"] = filtered_secondary_files(job_params)
def filtered_secondary_files(unfiltered_secondary_files: dict) -> list:
"""
Remove unprocessed secondary files.
Interpolated strings and optional inputs in secondary files were added to
CWL in version 1.1.
The CWL libraries we call do successfully resolve the interpolated strings,
but add the resolved fields to the list of unresolved fields so we remove
them here after the fact.
We also remove any secondary files here not containing 'toilfs:', which
means that it was not successfully imported into the toil jobstore. The
'required' logic seems to be handled deeper in cwltool.builder.Builder(),
and correctly determines which files should be imported. Therefore we
remove the files here and if this file is SUPPOSED to exist, it will still
give the appropriate file does not exist error, but just a bit further down
the track.
"""
intermediate_secondary_files = []
final_secondary_files = []
# remove secondary files still containing interpolated strings
for sf in unfiltered_secondary_files["secondaryFiles"]:
sf_bn = sf.get("basename", "")
sf_loc = sf.get("location", "")
if ("$(" not in sf_bn) and ("${" not in sf_bn):
if ("$(" not in sf_loc) and ("${" not in sf_loc):
intermediate_secondary_files.append(sf)
# remove secondary files that are not present in the filestore
# i.e. 'file://' only gets converted to 'toilfs:' upon a successful import
for sf in intermediate_secondary_files:
sf_loc = sf.get("location", "")
# directories aren't imported, so don't worry about them
if sf_loc.startswith("toilfs:") or sf.get("class", "") == "Directory":
final_secondary_files.append(sf)
return final_secondary_files
def determine_load_listing(tool: ToilCommandLineTool):
"""
Determine the directory.listing feature in CWL.
In CWL, any input directory can have a DIRECTORY_NAME.listing (where
DIRECTORY_NAME is any variable name) set to one of the following three
options:
no_listing: DIRECTORY_NAME.listing will be undefined.
e.g. inputs.DIRECTORY_NAME.listing == unspecified
shallow_listing: DIRECTORY_NAME.listing will return a list one level
deep of DIRECTORY_NAME's contents.
e.g. inputs.DIRECTORY_NAME.listing == [items in directory]
inputs.DIRECTORY_NAME.listing[0].listing == undefined
inputs.DIRECTORY_NAME.listing.length == # of items in directory
deep_listing: DIRECTORY_NAME.listing will return a list of the entire
contents of DIRECTORY_NAME.
e.g. inputs.DIRECTORY_NAME.listing == [items in directory]
inputs.DIRECTORY_NAME.listing[0].listing == [items
in subdirectory if it exists and is the first item listed]
inputs.DIRECTORY_NAME.listing.length == # of items in directory
See: https://www.commonwl.org/v1.1/CommandLineTool.html#LoadListingRequirement
https://www.commonwl.org/v1.1/CommandLineTool.html#LoadListingEnum
DIRECTORY_NAME.listing should be determined first from loadListing.
If that's not specified, from LoadListingRequirement.
Else, default to "no_listing" if unspecified.
:param tool: ToilCommandLineTool
:return str: One of 'no_listing', 'shallow_listing', or 'deep_listing'.
"""
load_listing_req, _ = tool.get_requirement("LoadListingRequirement")
load_listing_tool_req = (
load_listing_req.get("loadListing", "no_listing")
if load_listing_req
else "no_listing"
)
load_listing = tool.tool.get("loadListing", None) or load_listing_tool_req
listing_choices = ("no_listing", "shallow_listing", "deep_listing")
if load_listing not in listing_choices:
raise ValueError(
f'Unknown loadListing specified: "{load_listing}". Valid choices: {listing_choices}'
)
return load_listing
usage_message = "\n\n" + textwrap.dedent(
f"""
* All positional arguments [cwl, yml_or_json] must always be specified last for toil-cwl-runner.
Note: If you're trying to specify a jobstore, please use --jobStore.
Usage: toil-cwl-runner [options] example.cwl example-job.yaml
Example: toil-cwl-runner \\
--jobStore aws:us-west-2:jobstore \\
--realTimeLogging \\
--logInfo \\
example.cwl \\
example-job.yaml
"""[
1:
]
)
def main(args: Union[List[str]] = None, stdout: TextIO = sys.stdout) -> int:
"""Run the main loop for toil-cwl-runner."""
# Remove cwltool logger's stream handler so it uses Toil's
cwllogger.removeHandler(defaultStreamHandler)
if args is None:
args = sys.argv[1:]
config = Config()
config.disableChaining = True
config.cwl = True
parser = argparse.ArgumentParser()
addOptions(parser, config)
parser.add_argument("cwltool", type=str)
parser.add_argument("cwljob", nargs=argparse.REMAINDER)
# Will override the "jobStore" positional argument, enables
# user to select jobStore or get a default from logic one below.
parser.add_argument("--jobStore", "--jobstore", dest="jobStore", type=str)
parser.add_argument("--not-strict", action="store_true")
parser.add_argument(
"--enable-dev",
action="store_true",
help="Enable loading and running development versions of CWL",
)
parser.add_argument("--quiet", dest="logLevel", action="store_const", const="ERROR")
parser.add_argument("--basedir", type=str) # TODO: Might be hard-coded?
parser.add_argument("--outdir", type=str, default=os.getcwd())
parser.add_argument("--version", action="version", version=baseVersion)
dockergroup = parser.add_mutually_exclusive_group()
dockergroup.add_argument(
"--user-space-docker-cmd",
help="(Linux/OS X only) Specify a user space docker command (like "
"udocker or dx-docker) that will be used to call 'pull' and 'run'",
)
dockergroup.add_argument(
"--singularity",
action="store_true",
default=False,
help="[experimental] Use Singularity runtime for running containers. "
"Requires Singularity v2.6.1+ and Linux with kernel version v3.18+ or "
"with overlayfs support backported.",
)
dockergroup.add_argument(
"--no-container",
action="store_true",
help="Do not execute jobs in a "
"Docker container, even when `DockerRequirement` "
"is specified under `hints`.",
)
dockergroup.add_argument(
"--leave-container",
action="store_false",
default=True,
help="Do not delete Docker container used by jobs after they exit",
dest="rm_container",
)
parser.add_argument(
"--preserve-environment",
type=str,
nargs="+",
help="Preserve specified environment variables when running"
" CommandLineTools",
metavar=("VAR1 VAR2"),
default=("PATH",),
dest="preserve_environment",
)
parser.add_argument(
"--preserve-entire-environment",
action="store_true",
help="Preserve all environment variable when running " "CommandLineTools.",
default=False,
dest="preserve_entire_environment",
)
parser.add_argument(
"--destBucket",
type=str,
help="Specify a cloud bucket endpoint for output files.",
)
parser.add_argument("--beta-dependency-resolvers-configuration", default=None)
parser.add_argument("--beta-dependencies-directory", default=None)
parser.add_argument("--beta-use-biocontainers", default=None, action="store_true")
parser.add_argument("--beta-conda-dependencies", default=None, action="store_true")
parser.add_argument(
"--tmpdir-prefix",
type=Text,
help="Path prefix for temporary directories",
default="tmp",
)
parser.add_argument(
"--tmp-outdir-prefix",
type=Text,
help="Path prefix for intermediate output directories",
default="tmp",
)
parser.add_argument(
"--force-docker-pull",
action="store_true",
default=False,
dest="force_docker_pull",
help="Pull latest docker image even if it is locally present",
)
parser.add_argument(
"--no-match-user",
action="store_true",
default=False,
help="Disable passing the current uid to `docker run --user`",
)
parser.add_argument(
"--no-read-only",
action="store_true",
default=False,
help="Do not set root directory in the container as read-only",
)
parser.add_argument(
"--strict-memory-limit",
action="store_true",
help="When running with "
"software containers and the Docker engine, pass either the "
"calculated memory allocation from ResourceRequirements or the "
"default of 1 gigabyte to Docker's --memory option.",
)
parser.add_argument(
"--relax-path-checks",
action="store_true",
default=False,
help="Relax requirements on path names to permit "
"spaces and hash characters.",
dest="relax_path_checks",
)
parser.add_argument(
"--default-container",
help="Specify a default docker container that will be "
"used if the workflow fails to specify one.",
)
provgroup = parser.add_argument_group(
"Options for recording provenance " "information of the execution"
)
provgroup.add_argument(
"--provenance",
help="Save provenance to specified folder as a "
"Research Object that captures and aggregates "
"workflow execution and data products.",
type=Text,
)
provgroup.add_argument(
"--enable-user-provenance",
default=False,
action="store_true",
help="Record user account info as part of provenance.",
dest="user_provenance",
)
provgroup.add_argument(
"--disable-user-provenance",
default=False,
action="store_false",
help="Do not record user account info in provenance.",
dest="user_provenance",
)
provgroup.add_argument(
"--enable-host-provenance",
default=False,
action="store_true",
help="Record host info as part of provenance.",
dest="host_provenance",
)
provgroup.add_argument(
"--disable-host-provenance",
default=False,
action="store_false",
help="Do not record host info in provenance.",
dest="host_provenance",
)
provgroup.add_argument(
"--orcid",
help="Record user ORCID identifier as part of "
"provenance, e.g. https://orcid.org/0000-0002-1825-0097 "
"or 0000-0002-1825-0097. Alternatively the environment variable "
"ORCID may be set.",
dest="orcid",
default=os.environ.get("ORCID", ""),
type=Text,
)
provgroup.add_argument(
"--full-name",
help="Record full name of user as part of provenance, "
"e.g. Josiah Carberry. You may need to use shell quotes to preserve "
"spaces. Alternatively the environment variable CWL_FULL_NAME may "
"be set.",
dest="cwl_full_name",
default=os.environ.get("CWL_FULL_NAME", ""),
type=Text,
)
# Problem: we want to keep our job store somewhere auto-generated based on
# our options, unless overridden by... an option. So we will need to parse
# options twice, because we need to feed the parser the job store.
# Propose a local workdir, probably under /tmp.
# mkdtemp actually creates the directory, but
# toil requires that the directory not exist,
# since it is going to be our jobstore,
# so make it and delete it and allow
# toil to create it again (!)
workdir = tempfile.mkdtemp()
os.rmdir(workdir)
# we use the workdir as the default jobStore:
options = parser.parse_args([workdir] + args)
# if tmpdir_prefix is not the default value, set workDir if unset, and move
# workdir and the job store under it
if options.tmpdir_prefix != "tmp":
workdir = cwltool.utils.create_tmp_dir(options.tmpdir_prefix)
os.rmdir(workdir)
# Re-parse arguments with the new default jobstore under the temp dir.
# It still might be overridden by a --jobStore option
options = parser.parse_args([workdir] + args)
if options.workDir is None:
# We need to override workDir because by default Toil will pick
# somewhere under the system temp directory if unset, ignoring
# --tmpdir-prefix.
#
# If set, workDir needs to exist, so we directly use the prefix
options.workDir = cwltool.utils.create_tmp_dir(options.tmpdir_prefix)
if options.provisioner and not options.jobStore:
raise NoSuchJobStoreException(
"Please specify a jobstore with the --jobStore option when "
"specifying a provisioner."
)
if options.batchSystem == "kubernetes":
options.singularity = True
use_container = not options.no_container
if options.logLevel:
# Make sure cwltool uses Toil's log level.
# Applies only on the leader.
cwllogger.setLevel(options.logLevel.upper())
outdir = os.path.abspath(options.outdir)
tmp_outdir_prefix = os.path.abspath(options.tmp_outdir_prefix)
fileindex = dict() # type: ignore
existing = dict() # type: ignore
conf_file = getattr(options, "beta_dependency_resolvers_configuration", None)
use_conda_dependencies = getattr(options, "beta_conda_dependencies", None)
job_script_provider = None
if conf_file or use_conda_dependencies:
dependencies_configuration = DependenciesConfiguration(options)
job_script_provider = dependencies_configuration
options.default_container = None
runtime_context = cwltool.context.RuntimeContext(vars(options))
runtime_context.find_default_container = functools.partial(
find_default_container, options
)
runtime_context.workdir = workdir # type: ignore
runtime_context.move_outputs = "leave"
runtime_context.rm_tmpdir = False
loading_context = cwltool.context.LoadingContext(vars(options))
if options.provenance:
research_obj = cwltool.provenance.ResearchObject(
temp_prefix_ro=options.tmp_outdir_prefix,
orcid=options.orcid,
full_name=options.cwl_full_name,
fsaccess=runtime_context.make_fs_access(""),
)
runtime_context.research_obj = research_obj
with Toil(options) as toil:
if options.restart:
outobj = toil.restart()
else:
loading_context.hints = [
{
"class": "ResourceRequirement",
"coresMin": toil.config.defaultCores,
"ramMin": toil.config.defaultMemory / (2 ** 20),
"outdirMin": toil.config.defaultDisk / (2 ** 20),
"tmpdirMin": 0,
}
]
loading_context.construct_tool_object = toil_make_tool
loading_context.resolver = cwltool.resolver.tool_resolver
loading_context.strict = not options.not_strict
options.workflow = options.cwltool
options.job_order = options.cwljob
try:
uri, tool_file_uri = cwltool.load_tool.resolve_tool_uri(
options.cwltool,
loading_context.resolver,
loading_context.fetcher_constructor,
)
except schema_salad.exceptions.ValidationException:
print(
"\nYou may be getting this error because your arguments are incorrect or out of order."
+ usage_message,
file=sys.stderr,
)
raise
options.tool_help = None
options.debug = options.logLevel == "DEBUG"
job_order_object, options.basedir, jobloader = cwltool.main.load_job_order(
options,
sys.stdin,
loading_context.fetcher_constructor,
loading_context.overrides_list,
tool_file_uri,
)
loading_context, workflowobj, uri = cwltool.load_tool.fetch_document(
uri, loading_context
)
loading_context, uri = cwltool.load_tool.resolve_and_validate_document(
loading_context, workflowobj, uri
)
loading_context.overrides_list.extend(
cast(
List[CWLObjectType],
loading_context.metadata.get("cwltool:overrides", []),
)
)
document_loader = loading_context.loader
metadata = loading_context.metadata
processobj = document_loader.idx
if options.provenance and runtime_context.research_obj:
runtime_context.research_obj.packed_workflow(
cwltool.main.print_pack(loading_context, uri)
)
try:
tool = cwltool.load_tool.make_tool(uri, loading_context)
except cwltool.process.UnsupportedRequirement as err:
logging.error(err)
return 33
runtime_context.secret_store = SecretStore()
try:
initialized_job_order = cwltool.main.init_job_order(
job_order_object,
options,
tool,
jobloader,
sys.stdout,
secret_store=runtime_context.secret_store,
)
except SystemExit as e:
if e.code == 2: # raised by argparse's parse_args() function
print(
"\nIf both a CWL file and an input object (YAML/JSON) file were "
"provided, this may be the argument order." + usage_message,
file=sys.stderr,
)
raise
fs_access = cwltool.stdfsaccess.StdFsAccess(options.basedir)
fill_in_defaults(tool.tool["inputs"], initialized_job_order, fs_access)
for inp in tool.tool["inputs"]:
def set_secondary(fileobj):
if isinstance(fileobj, Mapping) and fileobj.get("class") == "File":
if "secondaryFiles" not in fileobj:
# inits all secondary files with 'file://' schema
# later changed to 'toilfs:' when imported into the jobstore
fileobj["secondaryFiles"] = [
{
"location": cwltool.builder.substitute(
fileobj["location"], sf["pattern"]
),
"class": "File",
}
for sf in inp["secondaryFiles"]
]
if isinstance(fileobj, MutableSequence):
for entry in fileobj:
set_secondary(entry)
if shortname(inp["id"]) in initialized_job_order and inp.get(
"secondaryFiles"
):
set_secondary(initialized_job_order[shortname(inp["id"])])
runtime_context.use_container = use_container
runtime_context.tmp_outdir_prefix = os.path.realpath(tmp_outdir_prefix)
runtime_context.job_script_provider = job_script_provider
runtime_context.force_docker_pull = options.force_docker_pull
runtime_context.no_match_user = options.no_match_user
runtime_context.no_read_only = options.no_read_only
runtime_context.basedir = options.basedir
runtime_context.move_outputs = "move"
# We instantiate an early builder object here to populate indirect
# secondaryFile references using cwltool's library because we need
# to resolve them before toil imports them into the filestore.
# A second builder will be built in the job's run method when toil
# actually starts the cwl job.
builder = tool._init_job(initialized_job_order, runtime_context)
# make sure this doesn't add listing items; if shallow_listing is
# selected, it will discover dirs one deep and then again later on
# (producing 2+ deep listings instead of only 1)
builder.loadListing = "no_listing"
builder.bind_input(
tool.inputs_record_schema,
initialized_job_order,
discover_secondaryFiles=True,
)
def path_to_loc(obj):
if "location" not in obj and "path" in obj:
obj["location"] = obj["path"]
del obj["path"]
def import_files(inner_tool):
visit_class(inner_tool, ("File", "Directory"), path_to_loc)
visit_class(
inner_tool, ("File",), functools.partial(add_sizes, fs_access)
)
normalizeFilesDirs(inner_tool)
adjustFileObjs(
inner_tool,
functools.partial(
uploadFile,
toil.importFile,
fileindex,
existing,
skip_broken=True,
),
)
# files with the 'file://' uri are imported into the jobstore and
# changed to 'toilfs:'
import_files(initialized_job_order)
visitSteps(tool, import_files)
for job_name, job_params in initialized_job_order.items():
rm_unprocessed_secondary_files(job_params)
try:
wf1, _ = makeJob(
tool=tool,
jobobj={},
runtime_context=runtime_context,
conditional=None,
)
except cwltool.process.UnsupportedRequirement as err:
logging.error(err)
return 33
wf1.cwljob = initialized_job_order
outobj = toil.start(wf1)
outobj = resolve_dict_w_promises(outobj)
# Stage files. Specify destination bucket if specified in CLI
# options. If destination bucket not passed in,
# options.destBucket's value will be None.
toilStageFiles(toil, outobj, outdir, destBucket=options.destBucket)
if runtime_context.research_obj is not None:
runtime_context.research_obj.create_job(outobj, True)
def remove_at_id(doc):
if isinstance(doc, MutableMapping):
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
if isinstance(value, MutableSequence):
for entry in value:
if isinstance(value, MutableMapping):
remove_at_id(entry)
remove_at_id(outobj)
visit_class(
outobj,
("File",),
functools.partial(add_sizes, runtime_context.make_fs_access("")),
)
prov_dependencies = cwltool.main.prov_deps(
workflowobj, document_loader, uri
)
runtime_context.research_obj.generate_snapshot(prov_dependencies)
runtime_context.research_obj.close(options.provenance)
if not options.destBucket:
visit_class(
outobj,
("File",),
functools.partial(
compute_checksums, cwltool.stdfsaccess.StdFsAccess("")
),
)
visit_class(outobj, ("File",), MutationManager().unset_generation)
stdout.write(json.dumps(outobj, indent=4))
return 0
def find_default_container(
args: argparse.Namespace, builder: cwltool.builder.Builder
) -> str:
"""Find the default constuctor by consulting a Toil.options object."""
if args.default_container:
return args.default_container
if args.beta_use_biocontainers:
return get_container_from_software_requirements(True, builder)
return None
|
py | 1a4e8b3f9e1ca5732f7adccfca1cfb066edd7cc4 | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
DUMMY_UNKWOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
require_scatter,
require_torch,
slow,
)
if is_torch_available():
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTableQuestionAnswering,
AutoModelForTokenClassification,
AutoModelWithLMHead,
BertConfig,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertModel,
FunnelBaseModel,
FunnelModel,
GPT2Config,
GPT2LMHeadModel,
RobertaForMaskedLM,
T5Config,
T5ForConditionalGeneration,
TapasConfig,
TapasForQuestionAnswering,
)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
MODEL_WITH_HEADS_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
)
from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpt2.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tapas import TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
class AutoModelTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModel.from_pretrained(model_name)
model, loading_info = AutoModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertModel)
for value in loading_info.values():
self.assertEqual(len(value), 0)
@slow
def test_model_for_pretraining_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForPreTraining.from_pretrained(model_name)
model, loading_info = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForPreTraining)
# Only one value should not be initialized and in the missing keys.
missing_keys = loading_info.pop("missing_keys")
self.assertListEqual(["cls.predictions.decoder.bias"], missing_keys)
for key, value in loading_info.items():
self.assertEqual(len(value), 0)
@slow
def test_lmhead_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelWithLMHead.from_pretrained(model_name)
model, loading_info = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_causal_lm(self):
for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, GPT2Config)
model = AutoModelForCausalLM.from_pretrained(model_name)
model, loading_info = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, GPT2LMHeadModel)
@slow
def test_model_for_masked_lm(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForMaskedLM.from_pretrained(model_name)
model, loading_info = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_encoder_decoder_lm(self):
for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, T5Config)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, T5ForConditionalGeneration)
@slow
def test_sequence_classification_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
model, loading_info = AutoModelForSequenceClassification.from_pretrained(
model_name, output_loading_info=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForSequenceClassification)
@slow
def test_question_answering_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForQuestionAnswering)
@slow
@require_scatter
def test_table_question_answering_model_from_pretrained(self):
for model_name in TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, TapasConfig)
model = AutoModelForTableQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForTableQuestionAnswering.from_pretrained(
model_name, output_loading_info=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, TapasForQuestionAnswering)
@slow
def test_token_classification_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForTokenClassification.from_pretrained(model_name)
model, loading_info = AutoModelForTokenClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForTokenClassification)
def test_from_pretrained_identifier(self):
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, BertForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_identifier_from_model_type(self):
model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKWOWN_IDENTIFIER)
self.assertIsInstance(model, RobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_pretrained_with_tuple_values(self):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
model = AutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(model, FunnelModel)
config = copy.deepcopy(model.config)
config.architectures = ["FunnelBaseModel"]
model = AutoModel.from_config(config)
self.assertIsInstance(model, FunnelBaseModel)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = AutoModel.from_pretrained(tmp_dir)
self.assertIsInstance(model, FunnelBaseModel)
def test_parents_and_children_in_mappings(self):
# Test that the children are placed before the parents in the mappings, as the `instanceof` will be triggered
# by the parents and will return the wrong configuration type when using auto models
mappings = (
MODEL_MAPPING,
MODEL_WITH_HEADS_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
)
for mapping in mappings:
mapping = tuple(mapping.items())
for index, (child_config, child_model) in enumerate(mapping[1:]):
for parent_config, parent_model in mapping[: index + 1]:
assert not issubclass(
child_config, parent_config
), f"{child_config.__name__} is child of {parent_config.__name__}"
# Tuplify child_model and parent_model since some of them could be tuples.
if not isinstance(child_model, (list, tuple)):
child_model = (child_model,)
if not isinstance(parent_model, (list, tuple)):
parent_model = (parent_model,)
for child, parent in [(a, b) for a in child_model for b in parent_model]:
assert not issubclass(child, parent), f"{child.__name__} is child of {parent.__name__}"
|
py | 1a4e8ddcf589b435e9a8fe1b9d1129591d36bdc9 | """
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota
See: LICENSE.md for complete license details
Author: Chris Regan
Analysis for Thor RTSM
"""
#%%
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
from Core import Loader
from Core import OpenData
# plt.rcParams.update({
# "text.usetex": True,
# "font.family": "serif",
# "font.serif": ["Palatino"],
# "font.size": 10
# })
# Constants
hz2rps = 2 * np.pi
rps2hz = 1 / hz2rps
#%% File Lists
import os.path as path
# pathBase = path.join('/home', 'rega0051', 'FlightArchive', 'Thor')
pathBase = path.join('G:', 'Shared drives', 'UAVLab', 'Flight Data', 'Thor')
fileList = {}
flt = 'FLT126'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
flt = 'FLT127'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
flt = 'FLT128'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
#%%
from Core import FreqTrans
rtsmSegList = [
# {'flt': 'FLT126', 'seg': ('time_us', [875171956 , 887171956], 'FLT126 - RTSM - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT126', 'seg': ('time_us', [829130591 , 841130591], 'FLT126 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT127', 'seg': ('time_us', [641655909 , 653655909], 'FLT127 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [700263746 , 712263746 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Interesting Roll Margin vs. Uncertainty
# {'flt': 'FLT128', 'seg': ('time_us', [831753831 , 843753831 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT128', 'seg': ('time_us', [ 959859721 , 971859721 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Not good
# {'flt': 'FLT126', 'seg': ('time_us', [928833763 , 940833763], 'FLT126 - RTSM Large - Nominal Gain, 8 deg amp'), 'color': 'r'},
# {'flt': 'FLT127', 'seg': ('time_us', [698755386 , 707255278], 'FLT127 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [779830919 , 791830919 ], 'FLT128 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'},
# {'flt': 'FLT128', 'seg': ('time_us', [900237086 , 912237086 ], 'FLT128 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'},
#
# {'flt': 'FLT126', 'seg': ('time_us', [902952886 , 924952886], 'FLT126 - RTSM Long - Nominal Gain, 4 deg amp'), 'color': 'b'},
# {'flt': 'FLT127', 'seg': ('time_us', [657015836 , 689015836], 'FLT127 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'b'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [714385469 , 746385469 ], 'FLT128 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'b'},
{'flt': 'FLT128', 'seg': ('time_us', [847254621 , 879254621 ], 'FLT128 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'b'}, # Best
# {'flt': 'FLT127', 'seg': ('time_us', [1209355236 , 1221535868], 'FLT127 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'm'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [794251787 , 826251787 ], 'FLT128 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'm'},
# {'flt': 'FLT128', 'seg': ('time_us', [921438015 , 953438015 ], 'FLT128 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'm'},
# {'flt': 'FLT126', 'seg': ('time_us', [981115495 , 993115495], 'FLT126 - RTSM - High Gain, 4 deg amp')},
# {'flt': 'FLT126', 'seg': ('time_us', [689907125 , 711907125], 'FLT126 - RTSM Long - High Gain, 4 deg amp')},
# {'flt': 'FLT126', 'seg': ('time_us', [728048050 , 740048050], 'FLT126 - RTSM Large - High Gain, 8 deg amp')},
#
]
oDataSegs = []
for rtsmSeg in rtsmSegList:
fltNum = rtsmSeg['flt']
fileLog = fileList[fltNum]['log']
fileConfig = fileList[fltNum]['config']
# Load
h5Data = Loader.Load_h5(fileLog) # RAPTRS log data as hdf5
sysConfig = Loader.JsonRead(fileConfig)
oData = Loader.OpenData_RAPTRS(h5Data, sysConfig)
oData['cmdRoll_FF'] = h5Data['Control']['cmdRoll_pidFF']
oData['cmdRoll_FB'] = h5Data['Control']['cmdRoll_pidFB']
oData['cmdPitch_FF'] = h5Data['Control']['cmdPitch_pidFF']
oData['cmdPitch_FB'] = h5Data['Control']['cmdPitch_pidFB']
oData['cmdYaw_FF'] = h5Data['Control']['refPsi_rad']
oData['cmdYaw_FB'] = h5Data['Control']['cmdYaw_damp_rps']
# Segments
rtsmSeg['seg'][1][0] += 1e6
rtsmSeg['seg'][1][1] += -1e6 + 50e3
oDataSegs.append(OpenData.Segment(oData, rtsmSeg['seg']))
#%%
sigExcList = ['cmdRoll_rps', 'cmdPitch_rps', 'cmdYaw_rps']
sigFbList = ['cmdRoll_FB', 'cmdPitch_FB', 'cmdYaw_FB']
sigFfList = ['cmdRoll_FF', 'cmdPitch_FF', 'cmdYaw_FF']
#sigSensList = ['wB_I_rps', 'cmdPitch_FF', 'cmdYaw_FF']
freqExc_rps = []
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_1']['Frequency']))
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_2']['Frequency']))
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_3']['Frequency']))
vCmdList = []
vExcList = []
vFbList = []
vFfList = []
ySensList = []
for iSeg, seg in enumerate(oDataSegs):
vCmd = np.zeros((len(sigExcList), len(seg['time_s'])))
vExc = np.zeros((len(sigExcList), len(seg['time_s'])))
vFb = np.zeros((len(sigExcList), len(seg['time_s'])))
vFf = np.zeros((len(sigExcList), len(seg['time_s'])))
ySens = np.zeros((len(sigExcList), len(seg['time_s'])))
for iSig, sigExc in enumerate(sigExcList):
sigFb = sigFbList[iSig]
sigFf = sigFfList[iSig]
vCmd[iSig] = seg['Control'][sigExc]
vExc[iSig] = seg['Excitation'][sigExc]
# vFb[iSig] = seg[sigFb]
vFb[iSig][1:-1] = seg[sigFb][0:-2] # Shift the time of the output into next frame
vFf[iSig] = seg[sigFf]
ySens[iSig] = seg['wB_I_rps'][iSig]
vCmdList.append(vCmd)
vExcList.append(vExc)
vFbList.append(vFb)
vFfList.append(vFf)
ySensList.append(ySens)
plt.plot(oDataSegs[iSeg]['time_s'], oDataSegs[iSeg]['vIas_mps'])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][0])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][1])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][2])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][0])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][1])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][2])
#%% Estimate the frequency response function
# Define the excitation frequencies
freqRate_hz = 50
freqRate_rps = freqRate_hz * hz2rps
optSpec = FreqTrans.OptSpect(dftType = 'czt', freqRate = freqRate_rps, smooth = ('box', 3), winType = ('tukey', 0.2), detrendType = 'Linear')
# Excited Frequencies per input channel
optSpec.freq = np.asarray(freqExc_rps)
# FRF Estimate
LiEstNomList = []
LiEstCohList = []
svLiEstNomList = []
for iSeg, seg in enumerate(oDataSegs):
freq_rps, Teb, Ceb, Pee, Pbb, Peb = FreqTrans.FreqRespFuncEst(vExcList[iSeg], vExcList[iSeg] + vFbList[iSeg], optSpec)
# _ , Tev, Cev, _ , Pvv, Pev = FreqTrans.FreqRespFuncEst(vExcList[iSeg], vCmdList[iSeg], optSpec)
freq_hz = freq_rps * rps2hz
I3 = np.repeat([np.eye(3)], Teb.shape[-1], axis=0).T
SaEstNom = Teb # Sa = I + Teb
SaEstCoh = Ceb # Cxy = np.abs(Sxy)**2 / (Sxx * Syy) = (np.abs(Sxy) / Sxx) * (np.abs(Sxy) / Syy)
# T = TNom = (uCtrl + uExc) / uExc - uNull / uExc
# Li = inv(TNom + TUnc) - I = LiEstNom + LiEstUnc
# LiEstNom = -I + TNom^-1
# LiEstUnc = -(I + TNom^-1 * TUnc)^-1 * TNom^-1 * TUnc * TNom^-1
LiEstNom = np.zeros_like(SaEstNom, dtype = complex)
LiEstCoh = np.zeros_like(SaEstCoh)
inv = np.linalg.inv
for i in range(SaEstNom.shape[-1]):
SaEstNomElem = SaEstNom[...,i]
SaEstNomInvElem = inv(SaEstNomElem)
LiEstNom[...,i] = -np.eye(3) + SaEstNomInvElem
# LiEstCoh[...,i] = -np.eye(3) + inv(SaEstCoh[...,i])
LiEstCoh[...,i] = SaEstCoh[...,i]
LiEstNomList.append( LiEstNom )
LiEstCohList.append( LiEstCoh )
svLiEstNomList_seg = FreqTrans.Sigma( LiEstNom ) # Singular Value Decomp
svLiEstNomList.append(svLiEstNomList_seg)
T_InputNames = sigExcList
T_OutputNames = sigFbList
# Compute Gain, Phase, Crit Distance
gainLiEstNomList_mag = []
phaseLiEstNomList_deg = []
rCritLiEstNomList_mag = []
for iSeg in range(0, len(oDataSegs)):
gain_mag, phase_deg = FreqTrans.GainPhase(LiEstNomList[iSeg], magUnit = 'mag', phaseUnit = 'deg', unwrap = True)
gainLiEstNomList_mag.append(gain_mag)
phaseLiEstNomList_deg.append(phase_deg)
# rCritLiEstNom_mag, _, _ = FreqTrans.DistCrit(LiEstNomList[iSeg], typeUnc = 'ellipse')
rCritLiEstNom_mag, _, _ = FreqTrans.DistCritCirc(LiEstNomList[iSeg])
rCritLiEstNomList_mag.append(rCritLiEstNom_mag)
#%% Sigma Plot
fig = None
for iSeg in range(0, len(oDataSegs)):
Cmin = np.min(np.min(LiEstCohList[iSeg], axis = 0), axis = 0)
sNomMin = np.min(svLiEstNomList[iSeg], axis=0)
fig = FreqTrans.PlotSigma(freq_hz[0], svLiEstNomList[iSeg], coher_nd = Cmin, fig = fig, color = rtsmSegList[iSeg]['color'], linestyle = '-', label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotSigma(freq_hz[0], 0.4 * np.ones_like(freq_hz[0]), color = 'r', linestyle = '--', fig = fig)
ax = fig.get_axes()
ax[0].set_xlim(0, 10)
# ax[0].set_ylim(0, 1)
#%% Disk Margin Plots
inPlot = sigExcList # Elements of sigExcList
outPlot = sigFbList # Elements of sigFbList
if False:
for iOut, outName in enumerate(outPlot):
for iIn, inName in enumerate(inPlot):
fig = None
for iSeg in range(0, len(oDataSegs)):
fig = FreqTrans.PlotSigma(freq_hz[0], rCritLiEstNomList_mag[iSeg][iOut, iIn], coher_nd = LiEstCohList[iSeg][iOut, iIn], fig = fig, color = rtsmSegList[iSeg]['color'], linestyle = '-', label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotSigma(freq_hz[0], 0.4 * np.ones_like(freq_hz[0]), fig = fig, color = 'r', linestyle = '--')
fig.suptitle(inName + ' to ' + outName, size=20)
ax = fig.get_axes()
# ax[0].set_ylim(0, 2)
#%% Nyquist Plots
if False:
for iOut, outName in enumerate(outPlot):
for iIn, inName in enumerate(inPlot):
fig = None
for iSeg in range(0, len(oDataSegs)):
fig = FreqTrans.PlotNyquist(LiEstNomList[iSeg][iOut, iIn], fig = fig, color = rtsmSegList[iSeg]['color'], label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotNyquist(np.asarray([-1+ 0j]), TUnc = np.asarray([0.4 + 0.4j]), fig = fig, fmt = '*r', label = 'Critical Region')
fig.suptitle(inName + ' to ' + outName, size=20)
ax = fig.get_axes()
ax[0].set_xlim(-3, 1)
ax[0].set_ylim(-2, 2)
#%% Bode Plots
if False:
for iOut, outName in enumerate(outPlot):
for iIn, inName in enumerate(inPlot):
fig = None
for iSeg in range(0, len(oDataSegs)):
fig = FreqTrans.PlotBode(freq_hz[0], gainLiEstNomList_mag[iSeg][iOut, iIn], phaseLiEstNomList_deg[iSeg][iOut, iIn], LiEstCohList[iSeg][iOut, iIn], fig = fig, color = rtsmSegList[iSeg]['color'], linestyle = '-', label = oDataSegs[iSeg]['Desc'])
fig.suptitle(inName + ' to ' + outName, size=20)
|
py | 1a4e8df3734d222800e247423960b702e25f8a98 | # -*- coding: utf-8 -*-
'''
Created on 10 oct. 2017
@author: Cesar Martinez Izquierdo
'''
from rest_framework.renderers import BaseRenderer
from pyexcel_io import save_data
from collections import OrderedDict
from io import BytesIO
from six import itervalues
from datetime import datetime
import json
class NotImplemented(Exception):
pass
class PyExcelBaseRenderer(BaseRenderer):
def _get_features(self, data):
if data.get('features'):
return data.get('features')
if data.get('results'):
return data.get('results').get('features')
return [data]
def _get_coords(self, feature):
if feature.get("geometry"):
return feature['geometry']['coordinates']
else:
return [None, None]
def _get_row(self, feature):
row = list(self._get_coords(feature))
props = self._get_properties(feature)
for value in itervalues(props):
if isinstance(value, datetime):
# fix mishandling of timezones on pyexcel_xlsxw
row.append(value.replace(tzinfo=None))
elif isinstance(value, list):
row.append(json.dumps(value))
else:
row.append(value)
return row
def _get_header(self, features):
if len(features)>0:
props = self._get_properties(features[0])
header = [ fname for fname in iter(props) ]
return ['lon', 'lat'] + header
return []
def _get_properties(self, feature):
if feature.get('properties'):
return feature.get('properties')
else:
return feature
def render(self, data, accepted_media_type=None, renderer_context=None):
raise NotImplemented() |
py | 1a4e8e293b62bb826dc58bf6b4378b1c3e8acdab | from unittest import TestCase
from test.brick.movemonitor import MoveMonitor
from test.brick.testbrick import TBrick, DECIMALS
from cothread import Sleep
from datetime import datetime
# These tests verify deferred moves
class TestDeferred(TestCase):
def test_cs_defer(self):
"""
check timed deferred moves and also individual cs moves
"""
tb = TBrick()
tb.set_cs_group(tb.g3)
tb.cs3.set_deferred_moves(True)
tb.cs3.set_move_time(3000)
tb.height.go(5, wait=False)
tb.angle.go(1, wait=False)
# verify no motion yet
Sleep(1)
self.assertAlmostEqual(tb.height.pos, 0, DECIMALS)
self.assertAlmostEqual(tb.angle.pos, 0, DECIMALS)
m = MoveMonitor(tb.height.pv_root)
start = datetime.now()
tb.cs3.set_deferred_moves(False)
m.wait_for_one_move(10)
elapsed = datetime.now() - start
print(elapsed)
# verify motion
self.assertAlmostEqual(tb.angle.pos, 1, DECIMALS)
self.assertAlmostEqual(tb.height.pos, 5, DECIMALS)
# todo this seems to take longer than I would expect - is this an issue?
# todo YES - moves to real and virtual axes are taking an extra SLOW POLL
# todo before DMOV is set True
self.assertTrue(3 <= elapsed.seconds < 6)
# single axis move should be controlled by speed setting, not CsMoveTime
start = datetime.now()
tb.height.go(0)
elapsed = datetime.now() - start
print(elapsed)
self.assertAlmostEqual(tb.height.pos, 0, DECIMALS)
self.assertTrue(elapsed.seconds < 2)
def test_real_defer(self):
"""
check that real axes update as expected on virtual axis moves
"""
for _ in range(4): # retry for possible occasional race condition
tb = TBrick()
tb.set_cs_group(tb.g2)
tb.set_deferred_moves(True)
tb.jack1.go(5, wait=False)
tb.jack2.go(4, wait=False)
Sleep(1)
# verify no motion yet
self.assertAlmostEqual(tb.jack1.pos, 0, DECIMALS)
self.assertAlmostEqual(tb.jack2.pos, 0, DECIMALS)
m = MoveMonitor(tb.jack1.pv_root)
start = datetime.now()
tb.set_deferred_moves(False)
m.wait_for_one_move(10)
elapsed = datetime.now() - start
print(elapsed)
# verify motion
self.assertAlmostEqual(tb.jack1.pos, 5, DECIMALS)
self.assertAlmostEqual(tb.jack2.pos, 4, DECIMALS)
self.assertTrue(elapsed.seconds < 4)
|
py | 1a4e8e3e2a163bfff5c538cf352a71adbb75fb21 | #!/usr/bin/env python
"""
For more information on this API, please visit:
https://duo.com/docs/adminapi
-
Script Dependencies:
requests
Depencency Installation:
$ pip install -r requirements.txt
System Requirements:
- Duo MFA, Duo Access or Duo Beyond account with aministrator priviliedges.
- Duo Admin API enabled
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import json, base64, email, hmac, hashlib, urllib3, urllib
import requests
import pprint
import config
urllib3.disable_warnings()
# Imported API configuration variables
API_HOSTNAME = config.DUO_API_HOSTNAME
S_KEY = config.DUO_API_SECRET_KEY
I_KEY = config.DUO_API_INTEGRATION_KEY
# Script specific variables
METHOD = 'POST'
API_PATH = '/admin/v1/integrations'
NAME = 'Test Integration'
TYPE = 'authapi'
PARAMS = {
'name': NAME,
'type': TYPE
}
# Request signing helper function
def sign(method=METHOD,
host=API_HOSTNAME,
path=API_PATH,
params=PARAMS,
skey=S_KEY,
ikey=I_KEY):
"""
Return HTTP Basic Authentication ("Authorization" and "Date") headers.
method, host, path: strings from request
params: dict of request parameters
skey: secret key
ikey: integration key
"""
# create canonical string
now = email.utils.formatdate()
canon = [now, method.upper(), host.lower(), path]
args = []
for key in sorted(params.keys()):
val = params[key]
if isinstance(val, str):
val = val.encode("utf-8")
args.append(
'%s=%s' % (urllib.parse.quote(key, '~'), urllib.parse.quote(val, '~')))
canon.append('&'.join(args))
canon = '\n'.join(canon)
print(canon)
# sign canonical string
sig = hmac.new(skey.encode('utf-8'), canon.encode('utf-8'), hashlib.sha1)
auth = '%s:%s' % (ikey, sig.hexdigest())
print(auth)
encoded_auth = base64.b64encode(auth.encode('utf-8'))
# return headers
return {'Date': now, 'Authorization': 'Basic %s' % str(encoded_auth, 'UTF-8')}
if __name__ == "__main__":
url = "https://{}{}".format(API_HOSTNAME, API_PATH)
payload = PARAMS
request_headers = sign()
request_headers['Content-Type'] = 'application/x-www-form-urlencoded'
integration = requests.request(METHOD, url, data=payload, headers=request_headers, verify=False)
pprint.pprint(json.loads(integration.content)) |
py | 1a4e8e4eebf525ac9a7e60c580c0ca5841c800bb | """NICOS GUI default configuration."""
main_window = docked(
vsplit(
panel('nicos.clients.gui.panels.status.ScriptStatusPanel'),
# panel('nicos.clients.gui.panels.watch.WatchPanel'),
panel('nicos.clients.gui.panels.console.ConsolePanel'),
),
('NICOS devices',
panel('nicos.clients.gui.panels.devices.DevicesPanel', icons=True,
dockpos='right',)
),
('Experiment Information and Setup',
panel('nicos.clients.gui.panels.expinfo.ExpInfoPanel',)
),
)
windows = [
window('Editor', 'editor',
panel('nicos.clients.gui.panels.editor.EditorPanel')),
window('Scans', 'plotter',
panel('nicos.clients.gui.panels.scans.ScansPanel')),
window('History', 'find',
panel('nicos.clients.gui.panels.history.HistoryPanel')),
window('Logbook', 'table',
panel('nicos.clients.gui.panels.elog.ELogPanel')),
window('Log files', 'table',
panel('nicos.clients.gui.panels.logviewer.LogViewerPanel')),
window('Errors', 'errors',
panel('nicos.clients.gui.panels.errors.ErrorPanel')),
]
tools = [
tool('Calculator', 'nicos.clients.gui.tools.calculator.CalculatorTool'),
tool('Neutron cross-sections',
'nicos.clients.gui.tools.website.WebsiteTool',
url='http://www.ncnr.nist.gov/resources/n-lengths/'),
tool('Neutron activation', 'nicos.clients.gui.tools.website.WebsiteTool',
url='https://webapps.frm2.tum.de/intranet/activation/'),
tool('Neutron calculations', 'nicos.clients.gui.tools.website.WebsiteTool',
url='https://webapps.frm2.tum.de/intranet/neutroncalc/'),
tool('Report NICOS bug or request enhancement',
'nicos.clients.gui.tools.bugreport.BugreportTool'),
tool('Emergency stop button',
'nicos.clients.gui.tools.estop.EmergencyStopTool',
runatstartup=False),
]
|
py | 1a4e8e5b43bab1e1814343804655d29f39e46f0f |
# 升半音
def sharp_note(mynote, sharped):
'''
给一个音符升半音
'''
if sharped:
if mynote == '1':
return '2'
elif mynote == '2':
return '3'
elif mynote == '4':
return '5'
elif mynote == '5':
return '6'
elif mynote == '6':
return '7'
else:
return '(!)'
else:
if mynote == '3':
return '4'
elif mynote == '7':
return '[1]'
elif mynote == '1' or mynote == '2' or mynote == '4' or mynote == '5' or mynote == '6':
return '#' + mynote
else:
return mynote
def sharp_tune(old_tune):
'''
升半音
'''
str(old_tune)
sharping = False
new_tune = ''
for i in old_tune:
if i == '#':
sharping = True
else:
new_tune = new_tune + sharp_note(i, sharping)
sharping = False
return new_tune
def sharp_tune_more(old_tune, times):
'''
多次升半音
'''
for _ in range(times):
old_tune = sharp_tune(old_tune)
return old_tune
# print(sharp_tune(input('input:')))
|
py | 1a4e8e6d7d48854af6873015a5f1f040f27a8ca2 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_node_config_status import V1NodeConfigStatus
class TestV1NodeConfigStatus(unittest.TestCase):
""" V1NodeConfigStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1NodeConfigStatus(self):
"""
Test V1NodeConfigStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_node_config_status.V1NodeConfigStatus()
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a4e8e92944932b5721c7cced7b6d0579eef438c | import collections
Set = set
try:
from collections import OrderedDict
except ImportError:
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.values():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
"""
From: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[NEXT] = next
next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
|
py | 1a4e8ec77af7de7ea7c4ae27169598df1085e222 | """
NI ELVIS III Pulse Width Modulation Example
This example illustrates how to generate a Pulse Width Modulation (PWM) signal
to an external peripheral through the PWM channels. The program first defines
the configuration for the PWM channels, and then generates the signal for 20
seconds.
The PWM configuration consists of two parameters: bank and channel. There are
two identical banks of PWM channels (A and B). The PWM shares the same channels
with DIO. Each bank contains 20 digital input and output channels.
This example uses:
Bank B, Channel DIO0.
Hardware setup:
No hardware is needed.
Result:
Generate a PWM signal from DIO0 on bank B.
"""
import time
from nielvis import PWM, Bank, DIOChannel
# specify the bank and channel for the PWM session
bank = Bank.B
channel = DIOChannel.DIO0
# configure a PWM session
with PWM(bank, channel) as pwm:
# specify the frequency (floating-point number) for the PWM signal. The
# FPGA automatically coerces it to the nearest possible frequency.
frequency = 1000
# specify the percentage of time the PWM signal remains high over one PWM
# cycle
duty_cycle = 0.7
# generate the PWM signal
pwm.generate(frequency, duty_cycle)
# begin to generate the PWM signal for 20 seconds
time.sleep(20)
# stop generating the PWM signal when the 'with' statement ends
|
py | 1a4e8f7607c113360432f07a068f1a0549ecfac7 | from setuptools import setup, find_packages
setup(
name="app",
install_requires="pluggy>=0.3,<1.0",
packages=find_packages(),
) |
py | 1a4e8fdacc870285eb865f807145839c07d1c4bf | from typing import Any, Dict
from ..base import BaseDistiller, DistillationResult
class JsonDistiller(BaseDistiller):
def __call__(
self,
source: Dict[str, Any],
context: Dict[str, Any] = None,
raise_validation_error: bool = False,
) -> DistillationResult:
raise NotImplementedError
|
py | 1a4e9076ac785a1d5c32fb581849b3edf434c2ca | import abc
from typing import TYPE_CHECKING
import jsonpickle
from .output.json_writer import ADD_VARIABLE, CHANGE_VARIABLE, EXECUTE_FRAME, NEW_FRAME, REMOVE_VARIABLE
if TYPE_CHECKING:
from .debugger import Debugger
class Replayer(abc.ABC):
def __init__(self: "Debugger"):
# Propagate initialization to other mixins
super().__init__()
def replay_events(self: "Debugger", events):
for event in events:
evt_type = event["event"]
if evt_type == NEW_FRAME:
self.out.write_cur_frame(event["frame_info"], event["output"])
elif evt_type == EXECUTE_FRAME:
frame_info = event["frame_info"]
exec_time = event["exec_time"]
self.out.write_frame_exec(frame_info, exec_time, event["exec_times"])
# Replay changes to frame_exec_times
if frame_info in self.frame_exec_times:
self.frame_exec_times[frame_info].append(exec_time)
else:
self.frame_exec_times[frame_info] = [exec_time]
elif evt_type == ADD_VARIABLE:
self.out.write_add(
event["var_name"], event["value"], event["history"], action=event["action"], plural=event["plural"],
)
elif evt_type == CHANGE_VARIABLE:
self.out.write_change(
event["var_name"],
event["value_before"],
event["value_after"],
event["history"],
action=event["action"],
)
elif evt_type == REMOVE_VARIABLE:
self.out.write_remove(event["var_name"], event["value"], event["history"], action=event["action"])
else:
raise ValueError(f"Unrecognized JSON event '{evt_type}'")
def replay_summary(self: "Debugger", data):
self.vars.update(data["var_history"])
self.out.write_variable_summary(self.vars)
if self.profiler_output:
self.out.write_profiler_summary(self.frame_exec_times)
self.out.write_time_summary(data["exec_start_time"], data["exec_stop_time"])
def replay(self: "Debugger", json_path):
with open(json_path, "r") as f:
data = jsonpickle.loads(f.read())
self.replay_events(data["events"])
self.replay_summary(data)
|
py | 1a4e915626663e94ff110f417826a5d4bdce6761 | from gym.spaces import Box
from ray.rllib.agents.dqn.distributional_q_tf_model import \
DistributionalQTFModel
from ray.rllib.agents.dqn.dqn_torch_model import \
DQNTorchModel
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import LARGE_INTEGER
tf = try_import_tf()
torch, nn = try_import_torch()
class ParametricActionsModel(DistributionalQTFModel):
"""Parametric action model that handles the dot product and masking.
This assumes the outputs are logits for a single Categorical action dist.
Getting this to work with a more complex output (e.g., if the action space
is a tuple of several distributions) is also possible but left as an
exercise to the reader.
"""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4, ),
action_embed_size=2,
**kw):
super(ParametricActionsModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name, **kw)
self.action_embed_model = FullyConnectedNetwork(
Box(-1, 1, shape=true_obs_shape), action_space, action_embed_size,
model_config, name + "_action_embed")
self.register_variables(self.action_embed_model.variables())
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
avail_actions = input_dict["obs"]["avail_actions"]
action_mask = input_dict["obs"]["action_mask"]
# Compute the predicted action embedding
action_embed, _ = self.action_embed_model({
"obs": input_dict["obs"]["cart"]
})
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = tf.expand_dims(action_embed, 1)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = tf.reduce_sum(avail_actions * intent_vector, axis=2)
# Mask out invalid actions (use tf.float32.min for stability)
inf_mask = tf.maximum(tf.log(action_mask), tf.float32.min)
return action_logits + inf_mask, state
def value_function(self):
return self.action_embed_model.value_function()
class TorchParametricActionsModel(DQNTorchModel, nn.Module):
"""PyTorch version of above ParametricActionsModel."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4, ),
action_embed_size=2,
**kw):
nn.Module.__init__(self)
DQNTorchModel.__init__(self, obs_space, action_space, num_outputs,
model_config, name, **kw)
self.action_embed_model = TorchFC(
Box(-1, 1, shape=true_obs_shape), action_space, action_embed_size,
model_config, name + "_action_embed")
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
avail_actions = input_dict["obs"]["avail_actions"]
action_mask = input_dict["obs"]["action_mask"]
# Compute the predicted action embedding
action_embed, _ = self.action_embed_model({
"obs": input_dict["obs"]["cart"]
})
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = torch.unsqueeze(action_embed, 1)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = torch.sum(avail_actions * intent_vector, dim=2)
# Mask out invalid actions (use -LARGE_INTEGER to tag invalid).
# These are then recognized by the EpsilonGreedy exploration component
# as invalid actions that are not to be chosen.
inf_mask = torch.clamp(
torch.log(action_mask), -float(LARGE_INTEGER), float("inf"))
return action_logits + inf_mask, state
def value_function(self):
return self.action_embed_model.value_function()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.