ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40bd83407a1845a6e9529fcd8b0d4e39fea59bd | """SI URL Feeds Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
class SIUrlFeeds(APIClassTemplate):
"""The SIUrlFeeds Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"checksumURL",
"feedURL",
"updateFrequency",
"overrides",
"overridable",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/siurlfeeds"
def __init__(self, fmc, **kwargs):
"""
Initialize SIUrlFeeds object.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for SIUrlFeeds class.")
self.parse_kwargs(**kwargs)
def post(self):
"""POST method for API for SIUrlFeeds not supported."""
logging.info("POST method for API for SIUrlFeeds not supported.")
pass
def put(self):
"""PUT method for API for SIUrlFeeds not supported."""
logging.info("PUT method for API for SIUrlFeeds not supported.")
pass
def delete(self):
"""DELETE method for API for SIUrlFeeds not supported."""
logging.info("DELETE method for API for SIUrlFeeds not supported.")
pass
|
py | b40bd8adedacb737c421a32e60b3dd98e1bfda1f | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The evolved operator ansatz."""
from typing import Optional
import numpy as np
from qiskit.circuit import (
Parameter,
ParameterVector,
QuantumRegister,
QuantumCircuit,
ParameterExpression,
)
from qiskit.circuit.exceptions import CircuitError
from qiskit.exceptions import QiskitError
from .blueprintcircuit import BlueprintCircuit
class EvolvedOperatorAnsatz(BlueprintCircuit):
"""The evolved operator ansatz."""
def __init__(
self,
operators=None,
reps: int = 1,
evolution=None,
insert_barriers: bool = False,
name: str = "EvolvedOps",
initial_state: Optional[QuantumCircuit] = None,
):
"""
Args:
operators (Optional[Union[OperatorBase, QuantumCircuit, list]): The operators to evolve.
If a circuit is passed, we assume it implements an already evolved operator and thus
the circuit is not evolved again. Can be a single operator (circuit) or a list of
operators (and circuits).
reps: The number of times to repeat the evolved operators.
evolution (Optional[EvolutionBase]): An opflow converter object to construct the evolution.
Defaults to Trotterization.
insert_barriers: Whether to insert barriers in between each evolution.
name: The name of the circuit.
initial_state: A `QuantumCircuit` object to prepend to the circuit.
"""
if evolution is None:
# pylint: disable=cyclic-import
from qiskit.opflow import PauliTrotterEvolution
evolution = PauliTrotterEvolution()
if operators is not None:
operators = _validate_operators(operators)
super().__init__(name=name)
self._operators = operators
self._evolution = evolution
self._reps = reps
self._insert_barriers = insert_barriers
self._initial_state = initial_state
def _check_configuration(self, raise_on_failure: bool = True) -> bool:
if self.operators is None:
if raise_on_failure:
raise ValueError("The operators are not set.")
return False
if self.reps < 1:
if raise_on_failure:
raise ValueError("The reps cannot be smaller than 1.")
return False
return True
@property
def reps(self) -> int:
"""The number of times the evolved operators are repeated."""
return self._reps
@reps.setter
def reps(self, r: int) -> None:
"""Sets the number of times the evolved operators are repeated."""
self._invalidate()
self._reps = r
@property
def evolution(self):
"""The evolution converter used to compute the evolution.
Returns:
EvolutionBase: The evolution converter used to compute the evolution.
"""
return self._evolution
@evolution.setter
def evolution(self, evol) -> None:
"""Sets the evolution converter used to compute the evolution.
Args:
evol (EvolutionBase): An opflow converter object to construct the evolution.
"""
self._invalidate()
self._evolution = evol
@property
def initial_state(self) -> QuantumCircuit:
"""The initial state."""
return self._initial_state
@initial_state.setter
def initial_state(self, initial_state: QuantumCircuit) -> None:
"""Sets the initial state."""
self._invalidate()
self._initial_state = initial_state
@property
def operators(self):
"""The operators that are evolved in this circuit.
Returns:
list: The operators to be evolved (and circuits) contained in this ansatz.
"""
return self._operators
@operators.setter
def operators(self, operators=None) -> None:
"""Set the operators to be evolved.
operators (Optional[Union[OperatorBase, QuantumCircuit, list]): The operators to evolve.
If a circuit is passed, we assume it implements an already evolved operator and thus
the circuit is not evolved again. Can be a single operator (circuit) or a list of
operators (and circuits).
"""
operators = _validate_operators(operators)
self._invalidate()
self._operators = operators
@property
def qregs(self):
"""A list of the quantum registers associated with the circuit."""
if self._data is None:
self._build()
return self._qregs
@qregs.setter
def qregs(self, qregs):
"""Set the quantum registers associated with the circuit."""
self._qregs = qregs
self._qubits = [qbit for qreg in qregs for qbit in qreg]
self._invalidate()
# TODO: the `preferred_init_points`-implementation can (and should!) be improved!
@property
def preferred_init_points(self):
"""Getter of preferred initial points based on the given initial state."""
if self._initial_state is None:
return None
else:
# If an initial state was set by the user, then we want to make sure that the VQE does
# not start from a random point. Thus, we return an all-zero initial point for the
# optimizer which is used (unless it gets overwritten by a higher-priority setting at
# runtime of the VQE).
# However, in order to determine the correct length, we must build the QuantumCircuit
# first, because otherwise the operators may not be set yet.
self._build()
return np.zeros(self.reps * len(self.operators), dtype=float)
def _build(self):
if self._data is not None:
return
self._check_configuration()
self._data = []
# get the evolved operators as circuits
from qiskit.opflow import PauliOp
coeff = Parameter("c")
circuits = []
is_evolved_operator = []
for op in self.operators:
# if the operator is already the evolved circuit just append it
if isinstance(op, QuantumCircuit):
circuits.append(op)
is_evolved_operator.append(False) # has no time coeff
else:
# check if the operator is just the identity, if yes, skip it
if isinstance(op, PauliOp):
sig_qubits = np.logical_or(op.primitive.x, op.primitive.z)
if sum(sig_qubits) == 0:
continue
evolved_op = self.evolution.convert((coeff * op).exp_i()).reduce()
circuits.append(evolved_op.to_circuit())
is_evolved_operator.append(True) # has time coeff
# set the registers
num_qubits = circuits[0].num_qubits
try:
qr = QuantumRegister(num_qubits, "q")
self.add_register(qr)
except CircuitError:
# the register already exists, probably because of a previous composition
pass
# build the circuit
times = ParameterVector("t", self.reps * sum(is_evolved_operator))
times_it = iter(times)
evolution = QuantumCircuit(*self.qregs, name=self.name)
first = True
for _ in range(self.reps):
for is_evolved, circuit in zip(is_evolved_operator, circuits):
if first:
first = False
else:
if self._insert_barriers:
evolution.barrier()
if is_evolved:
bound = circuit.assign_parameters({coeff: next(times_it)})
else:
bound = circuit
evolution.compose(bound, inplace=True)
if self.initial_state:
evolution.compose(self.initial_state, front=True, inplace=True)
# cast global phase to float if it has no free parameters
if isinstance(evolution.global_phase, ParameterExpression):
try:
evolution.global_phase = float(evolution.global_phase._symbol_expr)
# RuntimeError is raised if symengine is used, for SymPy it is a TypeError
except (RuntimeError, TypeError):
# expression contains free parameters
pass
try:
instr = evolution.to_gate()
except QiskitError:
instr = evolution.to_instruction()
self.append(instr, self.qubits)
def _validate_operators(operators):
if not isinstance(operators, list):
operators = [operators]
if len(operators) > 1:
num_qubits = operators[0].num_qubits
if any(operators[i].num_qubits != num_qubits for i in range(1, len(operators))):
raise ValueError("All operators must act on the same number of qubits.")
return operators
|
py | b40bd8be814659f2ed06a76014ded0b0a0a6fa01 | def gen_fib(n):
a, b, i = 1, 1, 0
for i in range(n):
yield (i, a)
a, b = b, a + b
if __name__ == '__main__':
for i, val in gen_fib(15):
print(f'fib({i}) = {val}')
|
py | b40bd8e6252f05e97a9ee5c5e2463c879552a130 | # -*- coding: utf-8 -*-
import time
import mendeley
from mendeley.exception import MendeleyApiException
from modularodm import fields
from website.addons.base import AddonOAuthNodeSettingsBase
from website.addons.base import AddonOAuthUserSettingsBase
from website.addons.citations.utils import serialize_folder
from website.addons.mendeley import serializer
from website.addons.mendeley import settings
from website.addons.mendeley.api import APISession
from website.oauth.models import ExternalProvider
from website.util import web_url_for
from framework.exceptions import HTTPError
class Mendeley(ExternalProvider):
name = 'Mendeley'
short_name = 'mendeley'
client_id = settings.MENDELEY_CLIENT_ID
client_secret = settings.MENDELEY_CLIENT_SECRET
auth_url_base = 'https://api.mendeley.com/oauth/authorize'
callback_url = 'https://api.mendeley.com/oauth/token'
default_scopes = ['all']
_client = None
def handle_callback(self, response):
client = self._get_client(response)
# make a second request for the Mendeley user's ID and name
profile = client.profiles.me
return {
'provider_id': profile.id,
'display_name': profile.display_name,
'profile_url': profile.link,
}
def _get_client(self, credentials):
if not self._client:
partial = mendeley.Mendeley(
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=web_url_for('oauth_callback',
service_name='mendeley',
_absolute=True),
)
self._client = APISession(partial, credentials)
return self._client
def _get_folders(self):
"""Get a list of a user's folders"""
client = self.client
return client.folders.list().items
@property
def client(self):
"""An API session with Mendeley"""
if not self._client:
self._client = self._get_client({
'access_token': self.account.oauth_key,
'refresh_token': self.account.refresh_token,
'expires_at': time.mktime(self.account.expires_at.timetuple()),
'token_type': 'bearer',
})
#Check if Mendeley can be accessed
try:
self._client.folders.list()
except MendeleyApiException as error:
self._client = None
if error.status == 403:
raise HTTPError(403)
else:
raise HTTPError(error.status)
return self._client
def citation_lists(self, extract_folder):
"""List of CitationList objects, derived from Mendeley folders"""
folders = self._get_folders()
# TODO: Verify OAuth access to each folder
all_documents = serialize_folder(
'All Documents',
id='ROOT',
parent_id='__'
)
serialized_folders = [
extract_folder(each)
for each in folders
]
return [all_documents] + serialized_folders
def get_list(self, list_id='ROOT'):
"""Get a single CitationList
:param str list_id: ID for a Mendeley folder. Optional.
:return CitationList: CitationList for the folder, or for all documents
"""
if list_id == 'ROOT':
folder = None
else:
folder = self.client.folders.get(list_id)
if folder:
return self._citations_for_mendeley_folder(folder)
return self._citations_for_mendeley_user()
def _folder_metadata(self, folder_id):
folder = self.client.folders.get(folder_id)
return folder
def _citations_for_mendeley_folder(self, folder):
document_ids = [
document.id
for document in folder.documents.iter(page_size=500)
]
citations = {
citation['id']: citation
for citation in self._citations_for_mendeley_user()
}
return map(lambda id: citations[id], document_ids)
def _citations_for_mendeley_user(self):
documents = self.client.documents.iter(page_size=500)
return [
self._citation_for_mendeley_document(document)
for document in documents
]
def _citation_for_mendeley_document(self, document):
"""Mendeley document to ``website.citations.models.Citation``
:param BaseDocument document:
An instance of ``mendeley.models.base_document.BaseDocument``
:return Citation:
"""
csl = {
'id': document.json.get('id')
}
CSL_TYPE_MAP = {
'book_section': 'chapter',
'case': 'legal_case',
'computer_program': 'article',
'conference_proceedings': 'paper-conference',
'encyclopedia_article': 'entry-encyclopedia',
'film': 'motion_picture',
'generic': 'article',
'hearing': 'speech',
'journal': 'article-journal',
'magazine_article': 'article-magazine',
'newspaper_article': 'article-newspaper',
'statute': 'legislation',
'television_broadcast': 'broadcast',
'web_page': 'webpage',
'working_paper': 'report'
}
csl_type = document.json.get('type')
if csl_type in CSL_TYPE_MAP:
csl['type'] = CSL_TYPE_MAP[csl_type]
else:
csl['type'] = 'article'
if document.json.get('abstract'):
csl['abstract'] = document.json.get('abstract')
if document.json.get('accessed'):
csl['accessed'] = document.json.get('accessed')
if document.json.get('authors'):
csl['author'] = [
{
'given': person.get('first_name'),
'family': person.get('last_name'),
} for person in document.json.get('authors')
]
if document.json.get('chapter'):
csl['chapter-number'] = document.json.get('chapter')
if document.json.get('city') and document.json.get('country'):
csl['publisher-place'] = document.json.get('city') + ", " + document.json.get('country')
elif document.json.get('city'):
csl['publisher-place'] = document.json.get('city')
elif document.json.get('country'):
csl['publisher-place'] = document.json.get('country')
if document.json.get('edition'):
csl['edition'] = document.json.get('edition')
if document.json.get('editors'):
csl['editor'] = [
{
'given': person.get('first_name'),
'family': person.get('last_name'),
} for person in document.json.get('editors')
]
if document.json.get('genre'):
csl['genre'] = document.json.get('genre')
# gather identifiers
idents = document.json.get('identifiers')
if idents is not None:
if idents.get('doi'):
csl['DOI'] = idents.get('doi')
if idents.get('isbn'):
csl['ISBN'] = idents.get('isbn')
if idents.get('issn'):
csl['ISSN'] = idents.get('issn')
if idents.get('pmid'):
csl['PMID'] = idents.get('pmid')
if document.json.get('issue'):
csl['issue'] = document.json.get('issue')
if document.json.get('language'):
csl['language'] = document.json.get('language')
if document.json.get('medium'):
csl['medium'] = document.json.get('medium')
if document.json.get('pages'):
csl['page'] = document.json.get('pages')
if document.json.get('publisher'):
csl['publisher'] = document.json.get('publisher')
if csl_type == 'thesis':
csl['publisher'] = document.json.get('institution')
if document.json.get('revision'):
csl['number'] = document.json.get('revision')
if document.json.get('series'):
csl['collection-title'] = document.json.get('series')
if document.json.get('series_editor'):
csl['collection-editor'] = document.json.get('series_editor')
if document.json.get('short_title'):
csl['shortTitle'] = document.json.get('short_title')
if document.json.get('source'):
csl['container-title'] = document.json.get('source')
if document.json.get('title'):
csl['title'] = document.json.get('title')
if document.json.get('volume'):
csl['volume'] = document.json.get('volume')
urls = document.json.get('websites', [])
if urls:
csl['URL'] = urls[0]
if document.json.get('year'):
csl['issued'] = {'date-parts': [[document.json.get('year')]]}
return csl
class MendeleyUserSettings(AddonOAuthUserSettingsBase):
oauth_provider = Mendeley
serializer = serializer.MendeleySerializer
class MendeleyNodeSettings(AddonOAuthNodeSettingsBase):
oauth_provider = Mendeley
serializer = serializer.MendeleySerializer
mendeley_list_id = fields.StringField()
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Mendeley()
self._api.account = self.external_account
return self._api
@property
def complete(self):
return bool(self.has_auth and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
metadata={'folder': self.mendeley_list_id},
))
@property
def selected_folder_name(self):
if self.mendeley_list_id is None:
return ''
elif self.mendeley_list_id == 'ROOT':
return 'All Documents'
else:
folder = self.api._folder_metadata(self.mendeley_list_id)
return folder.name
@property
def root_folder(self):
root = serialize_folder(
'All Documents',
id='ROOT',
parent_id='__'
)
root['kind'] = 'folder'
return root
@property
def provider_name(self):
return 'mendeley'
@property
def folder_id(self):
return self.mendeley_list_id
@property
def folder_name(self):
return self.selected_folder_name
@property
def folder_path(self):
return self.selected_folder_name
def clear_auth(self):
self.mendeley_list_id = None
return super(MendeleyNodeSettings, self).clear_auth()
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
if add_log:
self.owner.add_log(
'mendeley_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
self.save()
def set_auth(self, *args, **kwargs):
self.mendeley_list_id = None
return super(MendeleyNodeSettings, self).set_auth(*args, **kwargs)
def set_target_folder(self, mendeley_list_id, mendeley_list_name, auth):
"""Configure this addon to point to a Mendeley folder
:param str mendeley_list_id:
:param ExternalAccount external_account:
:param User user:
"""
# Tell the user's addon settings that this node is connecting
self.user_settings.grant_oauth_access(
node=self.owner,
external_account=self.external_account,
metadata={'folder': mendeley_list_id}
)
self.user_settings.save()
# update this instance
self.mendeley_list_id = mendeley_list_id
self.save()
self.owner.add_log(
'mendeley_folder_selected',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'folder_id': mendeley_list_id,
'folder_name': mendeley_list_name,
},
auth=auth,
)
|
py | b40bdab22fcc6aaee47a8b944adabe29b98b6d2a | #!/usr/bin/env python
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from io import open
import sys
from setuptools import setup
from os import path
DESCRIPTION = "Koalas: pandas API on Apache Spark"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
try:
exec(open('databricks/koalas/version.py').read())
except IOError:
print("Failed to load Koalas version file for packaging. You must be in Koalas root dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
setup(
name='koalas',
version=VERSION,
packages=['databricks', 'databricks.koalas', 'databricks.koalas.missing',
'databricks.koalas.usage_logging'],
extras_require={
'spark': ['pyspark>=2.4.0'],
'mlflow': ['mlflow>=1.0'],
},
python_requires='>=3.5',
install_requires=[
'pandas>=0.23.2,<1.0',
'pyarrow>=0.10,<0.15',
'numpy>=1.14',
'matplotlib>=3.0.0',
],
maintainer="Databricks",
maintainer_email="[email protected]",
license='http://www.apache.org/licenses/LICENSE-2.0',
url="https://github.com/databricks/koalas",
project_urls={
'Bug Tracker': 'https://github.com/databricks/koalas/issues',
'Documentation': 'https://koalas.readthedocs.io/',
'Source Code': 'https://github.com/databricks/koalas'
},
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
py | b40bdaf6d5f6c2befe0a27adc2310b38ca413722 | import itertools
import logging
import atlas_mpl_style as ampl
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import hepynet.common.hepy_type as ht
logger = logging.getLogger("hepynet")
def plot_mva_scores(
df_raw: pd.DataFrame,
df: pd.DataFrame,
job_config: ht.config,
save_dir: ht.pathlike,
file_name: str = "mva_scores",
):
# initialize
logger.info("Plotting MVA scores")
tc = job_config.train.clone()
ac = job_config.apply.clone()
plot_config = ac.cfg_mva_scores_data_mc.clone()
# prepare signal
sig_scores_dict = {}
sig_weights_dict = {}
for sig_key in plot_config.sig_list:
sig_score = df.loc[df["sample_name"] == sig_key, "y_pred"].values
if sig_score.ndim == 1:
sig_score = sig_score.reshape((-1, 1))
sig_scores_dict[sig_key] = sig_score
sig_weight = df_raw.loc[df["sample_name"] == sig_key, "weight"].values
sig_weights_dict[sig_key] = sig_weight
# prepare background
bkg_scores_dict = {}
bkg_weights_dict = {}
for bkg_key in plot_config.bkg_list:
bkg_score = df.loc[df["sample_name"] == bkg_key, "y_pred"].values
if bkg_score.ndim == 1:
bkg_score = bkg_score.reshape((-1, 1))
bkg_scores_dict[bkg_key] = bkg_score
bkg_weight = df_raw.loc[df["sample_name"] == bkg_key, "weight"].values
bkg_weights_dict[bkg_key] = bkg_weight
# prepare data
# TODO: support data plots
if plot_config.apply_data:
data_key = plot_config.data_key
data_scores = df.loc[df["sample_name"] == data_key, "y_pred"].values
if data_scores.ndim == 1:
data_scores = data_scores.reshape((-1, 1))
data_weights = df_raw.loc[
df["sample_name"] == data_key, "weight"
].values
# make plots
all_nodes = ["sig"] + tc.output_bkg_node_names
for node_id, node in enumerate(all_nodes):
if plot_config.show_ratio:
fig = plt.figure(figsize=(50 / 3, 50 / 3))
gs = mpl.gridspec.GridSpec(4, 1, hspace=0.0, wspace=0.0)
ax = fig.add_subplot(gs[0:3])
ax.tick_params(labelbottom=False)
ratio_ax = fig.add_subplot(gs[3], sharex=ax)
# ratio_ax.yaxis.set_major_locator(
# mpl.ticker.MaxNLocator(
# symmetric=True, prune="both", min_n_ticks=5, nbins=4
# )
# )
ratio_ax.autoscale(axis="x", tight=True)
plt.sca(ax)
else:
fig, ax = plt.subplots(figsize=(50 / 3, 100 / 9))
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
color_cycle = itertools.cycle(colors)
# plot bkg
bkg_collect = list()
bkg_scores_all = None
bkg_weights_all = None
for key, value in bkg_scores_dict.items():
node_score = value[:, node_id].flatten()
node_weight = bkg_weights_dict[key] * plot_config.bkg_scale
bkg_bins, _ = np.histogram(
node_score,
bins=plot_config.bins,
range=plot_config.range,
weights=node_weight,
density=plot_config.density,
)
if plot_config.bkg_scale != 1:
bkg_label = f"{key} x{plot_config.bkg_scale}"
else:
bkg_label = key
bkg = ampl.plot.Background(
bkg_label, bkg_bins, color=next(color_cycle)
)
bkg_collect.append(bkg)
if bkg_scores_all is None:
bkg_scores_all = node_score
bkg_weights_all = node_weight
else:
bkg_scores_all = np.concatenate((bkg_scores_all, node_score))
bkg_weights_all = np.concatenate(
(bkg_weights_all, node_weight)
)
bkg_all_bins, bkg_edges = np.histogram(
bkg_scores_all,
bins=plot_config.bins,
range=plot_config.range,
weights=bkg_weights_all,
density=plot_config.density,
)
sumw2, _ = np.histogram(
bkg_scores_all,
bins=plot_config.bins,
range=plot_config.range,
weights=np.power(bkg_weights_all, 2),
)
bkg_stats_errs = np.sqrt(sumw2)
if plot_config.density:
norm_sum = np.sum(bkg_weights_all) * (1 / plot_config.bins)
bkg_stats_errs /= norm_sum
ampl.plot.plot_backgrounds(bkg_collect, bkg_edges, ax=ax)
# plot sig
for key, value in sig_scores_dict.items():
sig_bins, sig_edges = np.histogram(
value[:, node_id].flatten(),
bins=plot_config.bins,
range=plot_config.range,
weights=sig_weights_dict[key] * plot_config.sig_scale,
density=plot_config.density,
)
if plot_config.sig_scale != 1:
sig_label = f"{key} x{plot_config.sig_scale}"
else:
sig_label = key
ampl.plot.plot_signal(
sig_label, sig_edges, sig_bins, color=next(color_cycle), ax=ax
)
# plot data
if plot_config.apply_data:
data_bins, data_edges = np.histogram(
data_scores[:, node_id].flatten(),
bins=plot_config.bins,
range=plot_config.range,
weights=data_weights * plot_config.data_scale,
density=plot_config.density,
)
sumw2, _ = np.histogram(
data_scores[:, node_id].flatten(),
bins=plot_config.bins,
range=plot_config.range,
weights=np.power(data_weights * plot_config.data_scale, 2),
)
data_stats_errs = np.sqrt(sumw2)
if plot_config.density:
norm_sum = np.sum(data_weights * plot_config.data_scale) * (
1 / plot_config.bins
)
data_stats_errs /= norm_sum
if plot_config.data_scale != 1:
data_label = f"data x{plot_config.data_scale}"
else:
data_label = "data"
ampl.plot.plot_data(
data_edges,
data_bins,
stat_errs=data_stats_errs,
label=data_label,
ax=ax,
)
# plot ratio
if plot_config.show_ratio:
ampl.plot.plot_ratio(
data_edges,
data_bins,
data_stats_errs,
bkg_all_bins,
bkg_stats_errs,
ratio_ax,
plottype="raw",
offscale_errs=True, # TODO: add as an option?
)
ratio_ax.set_ylim(0, 2)
ax.set_xlim(plot_config.range[0], plot_config.range[1])
ax.legend(loc="upper right", ncol=2)
if ac.plot_atlas_label:
ampl.plot.draw_atlas_label(
0.05, 0.95, ax=ax, **(ac.atlas_label.get_config_dict())
)
# Save lin/log plots
_, y_max = ax.get_ylim()
## save lin
ax.set_ylim(0, y_max * 1.4)
fig.savefig(
f"{save_dir}/{file_name}_node_{node}_lin.{plot_config.save_format}"
)
## save log
ax.set_yscale("log")
ax.set_ylim(
plot_config.logy_min,
y_max * np.power(10, np.log10(y_max / plot_config.logy_min) / 2),
)
fig.savefig(
f"{save_dir}/{file_name}_node_{node}_log.{plot_config.save_format}"
)
return 0 # success run
def plot_train_test_compare(
df: pd.DataFrame, job_config: ht.config, save_dir: ht.pathlike
):
"""Plots train/test datasets' cores distribution comparison"""
# initialize
logger.info("Plotting train/test scores.")
tc = job_config.train.clone()
ac = job_config.apply.clone()
plot_config = job_config.apply.cfg_train_test_compare
all_nodes = ["sig"] + tc.output_bkg_node_names
# get inputs
train_index = df["is_train"] == True
test_index = df["is_train"] == False
sig_index = (df["is_sig"] == True) & (df["is_mc"] == True)
bkg_index = (df["is_sig"] == False) & (df["is_mc"] == True)
xs_train_scores = df.loc[sig_index & train_index, ["y_pred"]].values
xs_test_scores = df.loc[sig_index & test_index, ["y_pred"]].values
xs_train_weight = df.loc[sig_index & train_index, ["weight"]].values
xs_test_weight = df.loc[sig_index & test_index, ["weight"]].values
xb_train_scores = df.loc[bkg_index & train_index, ["y_pred"]].values
xb_test_scores = df.loc[bkg_index & test_index, ["y_pred"]].values
xb_train_weight = df.loc[bkg_index & train_index, ["weight"]].values
xb_test_weight = df.loc[bkg_index & test_index, ["weight"]].values
# plot for each nodes
num_nodes = len(all_nodes)
for node_num in range(num_nodes):
fig, ax = plt.subplots()
# plot test scores
bkg_bins, bkg_edges = np.histogram(
xb_test_scores,
bins=plot_config.bins,
range=(0, 1),
weights=xb_test_weight,
density=plot_config.density,
)
bkg = ampl.plot.Background(
"background (test)", bkg_bins, color=plot_config.bkg_color
)
ampl.plot.plot_backgrounds([bkg], bkg_edges, ax=ax)
sig_bins, sig_edges = np.histogram(
xs_test_scores,
bins=plot_config.bins,
range=(0, 1),
weights=xs_test_weight,
density=plot_config.density,
)
ampl.plot.plot_signal(
"signal (test)", sig_edges, sig_bins, color=plot_config.sig_color
)
# plot train scores
## bkg
bkg_bins, bkg_edges = np.histogram(
xb_train_scores,
bins=plot_config.bins,
range=(0, 1),
weights=xb_train_weight,
density=plot_config.density,
)
sumw2, _ = np.histogram(
xb_train_scores,
bins=plot_config.bins,
range=(0, 1),
weights=np.power(xb_train_weight, 2),
)
bkg_stats_errs = np.sqrt(sumw2)
if plot_config.density:
norm_sum = np.sum(xb_train_weight) * (1 / plot_config.bins)
bkg_stats_errs /= norm_sum
err_x = 0.5 * (bkg_edges[:-1] + bkg_edges[1:])
ax.errorbar(
err_x,
bkg_bins,
bkg_stats_errs,
0.5 / plot_config.bins,
fmt=".k",
mfc=plot_config.bkg_color,
ms=10,
label="background (train)",
)
## sig
sig_bins, sig_edges = np.histogram(
xs_train_scores,
bins=plot_config.bins,
range=(0, 1),
weights=xs_train_weight,
density=plot_config.density,
)
sumw2, _ = np.histogram(
xs_train_scores,
bins=plot_config.bins,
range=(0, 1),
weights=np.power(xs_train_weight, 2),
)
sig_stats_errs = np.sqrt(sumw2)
if plot_config.density:
norm_sum = np.sum(xs_train_weight) * (1 / plot_config.bins)
sig_stats_errs /= norm_sum
err_x = 0.5 * (sig_edges[:-1] + sig_edges[1:])
ax.errorbar(
err_x,
sig_bins,
sig_stats_errs,
0.5 / plot_config.bins,
fmt=".k",
mfc=plot_config.sig_color,
ms=10,
label="signal (train)",
)
# final adjustments
ax.set_xlim(0, 1)
ax.legend(loc="upper right")
if ac.plot_atlas_label:
ampl.plot.draw_atlas_label(
0.05, 0.95, ax=ax, **(ac.atlas_label.get_config_dict())
)
ax.set_xlabel("DNN score")
# Save lin/log plots
file_name = f"mva_scores_{all_nodes[node_num]}"
_, y_max = ax.get_ylim()
## save lin
ax.set_ylim(0, y_max * 1.4)
fig.savefig(save_dir / f"{file_name}_lin.{plot_config.save_format}")
## save log
ax.set_yscale("log")
ax.set_ylim(
plot_config.logy_min, y_max * np.power(10, np.log10(y_max) / 2)
)
fig.savefig(save_dir / f"{file_name}_log.{plot_config.save_format}")
|
py | b40bdc64dd92e4897b3c4b0e48139ba68404deaa | # Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack import common
from nova.api.openstack.compute import create_backup \
as create_backup_v21
from nova.compute import api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack.compute import admin_only_action_common
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
class CreateBackupTestsV21(admin_only_action_common.CommonMixin,
test.NoDBTestCase):
create_backup = create_backup_v21
controller_name = 'CreateBackupController'
validation_error = exception.ValidationError
def setUp(self):
super(CreateBackupTestsV21, self).setUp()
self.controller = getattr(self.create_backup, self.controller_name)()
self.compute_api = self.controller.compute_api
patch_get = mock.patch.object(self.compute_api, 'get')
self.mock_get = patch_get.start()
self.addCleanup(patch_get.stop)
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_with_metadata(self, mock_backup, mock_check_image):
metadata = {'123': 'asdf'}
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': metadata,
},
}
image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
properties=metadata)
instance = fake_instance.fake_instance_obj(self.context)
self.mock_get.return_value = instance
mock_backup.return_value = image
res = self.controller._create_backup(self.req, instance.uuid,
body=body)
mock_check_image.assert_called_once_with(self.context, metadata)
mock_backup.assert_called_once_with(self.context, instance, 'Backup 1',
'daily', 1,
extra_properties=metadata)
self.assertEqual(202, res.status_int)
self.assertIn('fake-image-id', res.headers['Location'])
def test_create_backup_no_name(self):
# Name is required for backups.
body = {
'createBackup': {
'backup_type': 'daily',
'rotation': 1,
},
}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
def test_create_backup_name_with_leading_trailing_spaces(self):
body = {
'createBackup': {
'name': ' test ',
'backup_type': 'daily',
'rotation': 1,
},
}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_name_with_leading_trailing_spaces_compat_mode(
self, mock_backup, mock_check_image):
body = {
'createBackup': {
'name': ' test ',
'backup_type': 'daily',
'rotation': 1,
},
}
image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
properties={})
instance = fake_instance.fake_instance_obj(self.context)
self.mock_get.return_value = instance
mock_backup.return_value = image
self.req.set_legacy_v2()
self.controller._create_backup(self.req, instance.uuid,
body=body)
mock_check_image.assert_called_once_with(self.context, {})
mock_backup.assert_called_once_with(self.context, instance, 'test',
'daily', 1,
extra_properties={})
def test_create_backup_no_rotation(self):
# Rotation is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
},
}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
def test_create_backup_negative_rotation(self):
"""Rotation must be greater than or equal to zero
for backup requests
"""
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': -1,
},
}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
def test_create_backup_negative_rotation_with_string_number(self):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': '-1',
},
}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
def test_create_backup_no_backup_type(self):
# Backup Type (daily or weekly) is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
'rotation': 1,
},
}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
def test_create_backup_non_dict_metadata(self):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': 'non_dict',
},
}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
def test_create_backup_bad_entity(self):
body = {'createBackup': 'go'}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_rotation_is_zero(self, mock_backup,
mock_check_image):
# The happy path for creating backups if rotation is zero.
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 0,
},
}
image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
properties={})
instance = fake_instance.fake_instance_obj(self.context)
self.mock_get.return_value = instance
mock_backup.return_value = image
res = self.controller._create_backup(self.req, instance.uuid,
body=body)
mock_check_image.assert_called_once_with(self.context, {})
mock_backup.assert_called_once_with(self.context, instance, 'Backup 1',
'daily', 0,
extra_properties={})
self.assertEqual(202, res.status_int)
self.assertNotIn('Location', res.headers)
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_rotation_is_positive(self, mock_backup,
mock_check_image):
# The happy path for creating backups if rotation is positive.
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
properties={})
instance = fake_instance.fake_instance_obj(self.context)
self.mock_get.return_value = instance
mock_backup.return_value = image
res = self.controller._create_backup(self.req, instance.uuid,
body=body)
mock_check_image.assert_called_once_with(self.context, {})
mock_backup.assert_called_once_with(self.context, instance, 'Backup 1',
'daily', 1,
extra_properties={})
self.assertEqual(202, res.status_int)
self.assertIn('fake-image-id', res.headers['Location'])
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_rotation_is_string_number(
self, mock_backup, mock_check_image):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': '1',
},
}
image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
properties={})
instance = fake_instance.fake_instance_obj(self.context)
self.mock_get.return_value = instance
mock_backup.return_value = image
res = self.controller._create_backup(self.req, instance['uuid'],
body=body)
mock_check_image.assert_called_once_with(self.context, {})
mock_backup.assert_called_once_with(self.context, instance, 'Backup 1',
'daily', 1,
extra_properties={})
self.assertEqual(202, res.status_int)
self.assertIn('fake-image-id', res.headers['Location'])
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_raises_conflict_on_invalid_state(self,
mock_backup, mock_check_image):
body_map = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
instance = fake_instance.fake_instance_obj(self.context)
self.mock_get.return_value = instance
mock_backup.side_effect = exception.InstanceInvalidState(
attr='vm_state', instance_uuid=instance.uuid,
state='foo', method='backup')
ex = self.assertRaises(webob.exc.HTTPConflict,
self.controller._create_backup,
self.req, instance.uuid,
body=body_map)
self.assertIn("Cannot 'createBackup' instance %(id)s"
% {'id': instance.uuid}, ex.explanation)
@mock.patch.object(common, 'check_img_metadata_properties_quota')
def test_create_backup_with_non_existed_instance(self, mock_check_image):
body_map = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
uuid = fakes.FAKE_UUID
self.mock_get.side_effect = exception.InstanceNotFound(
instance_id=uuid)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._create_backup,
self.req, uuid, body=body_map)
mock_check_image.assert_called_once_with(self.context, {})
def test_create_backup_with_invalid_create_backup(self):
body = {
'createBackupup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
self.assertRaises(self.validation_error,
self.controller._create_backup,
self.req, fakes.FAKE_UUID, body=body)
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_backup_volume_backed_instance(self, mock_backup,
mock_check_image):
body = {
'createBackup': {
'name': 'BackupMe',
'backup_type': 'daily',
'rotation': 3
},
}
instance = fake_instance.fake_instance_obj(self.context)
instance.image_ref = None
self.mock_get.return_value = instance
mock_backup.side_effect = exception.InvalidRequest()
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._create_backup,
self.req, instance['uuid'], body=body)
mock_check_image.assert_called_once_with(self.context, {})
mock_backup.assert_called_once_with(self.context, instance, 'BackupMe',
'daily', 3,
extra_properties={})
class CreateBackupPolicyEnforcementv21(test.NoDBTestCase):
def setUp(self):
super(CreateBackupPolicyEnforcementv21, self).setUp()
self.controller = create_backup_v21.CreateBackupController()
self.req = fakes.HTTPRequest.blank('')
def test_create_backup_policy_failed(self):
rule_name = "os_compute_api:os-create-backup"
self.policy.set_rules({rule_name: "project:non_fake"})
metadata = {'123': 'asdf'}
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': metadata,
},
}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._create_backup, self.req, fakes.FAKE_UUID,
body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class CreateBackupTestsV239(test.NoDBTestCase):
def setUp(self):
super(CreateBackupTestsV239, self).setUp()
self.controller = create_backup_v21.CreateBackupController()
self.req = fakes.HTTPRequest.blank('', version='2.39')
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(common, 'get_instance')
def test_create_backup_no_quota_checks(self, mock_get_instance,
mock_check_quotas):
# 'mock_get_instance' helps to skip the whole logic of the action,
# but to make the test
mock_get_instance.side_effect = webob.exc.HTTPNotFound
metadata = {'123': 'asdf'}
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': metadata,
},
}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._create_backup, self.req,
fakes.FAKE_UUID, body=body)
# starting from version 2.39 no quota checks on Nova side are performed
# for 'createBackup' action after removing 'image-metadata' proxy API
mock_check_quotas.assert_not_called()
|
py | b40bdc940866c8a02072524e96004816b6e28fc7 | # MJPEG Streaming with FIR
#
# This example shows off how to do MJPEG streaming to a FIREFOX webrowser
# (IE and Chrome do not work). Just input your network SSID and KEY and then
# connect to the IP address/port printed out from ifconfig.
import sensor, image, network, usocket, fir
SSID='' # Network SSID
KEY='' # Network key
HOST = '' # Use first available interface
PORT = 8000 # Arbitrary non-privileged port
# Reset sensor
sensor.reset()
# Set sensor settings
sensor.set_contrast(1)
sensor.set_brightness(1)
sensor.set_saturation(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.RGB565)
# Initialize the thermal sensor
fir.init()
# Init wlan module and connect to network
print("Trying to connect... (may take a while)...")
wlan = network.WINC()
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
# We should have a valid IP now via DHCP
print(wlan.ifconfig())
# Create server socket
s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
# Bind and listen
s.bind((HOST, PORT))
s.listen(5)
# Set timeout to 1s
s.settimeout(1.0)
print ('Waiting for connections..')
client, addr = s.accept()
print ('Connected to ' + addr[0] + ':' + str(addr[1]))
# Read request from client
data = client.recv(1024)
# Should parse client request here
# Send multipart header
client.send("HTTP/1.1 200 OK\r\n" \
"Server: OpenMV\r\n" \
"Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
"Cache-Control: no-cache\r\n" \
"Pragma: no-cache\r\n\r\n")
# Start streaming images
while (True):
image = sensor.snapshot()
# Capture FIR data
# ta: Ambient temperature
# ir: Object temperatures (IR array)
# to_min: Minimum object temperature
# to_max: Maximum object temperature
ta, ir, to_min, to_max = fir.read_ir()
# Scale the image and belnd it with the framebuffer
fir.draw_ir(image, ir)
# Draw ambient, min and max temperatures.
image.draw_string(0, 0, "Ta: %0.2f"%ta, color = (0xFF, 0x00, 0x00))
image.draw_string(0, 8, "To min: %0.2f"%to_min, color = (0xFF, 0x00, 0x00))
image.draw_string(0, 16, "To max: %0.2f"%to_max, color = (0xFF, 0x00, 0x00))
cimage = image.compressed(quality=90)
client.send("\r\n--openmv\r\n" \
"Content-Type: image/jpeg\r\n"\
"Content-Length:"+str(cimage.size())+"\r\n\r\n")
client.send(cimage)
client.close()
|
py | b40bdddc63b1d1d23c93c372004cead8a8dd5072 | #!/Users/rogerurrutia/Documents/GitHub/Programacion-III/Proyecto Final/Duplicados/bin/python
# Author:
# Contact: [email protected]
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
|
py | b40bdf91571b861293989c68439365e3e8f51b5e | # -*- coding: utf-8 -*-
# the idea is to plot test (benchmark) results with C time at x axis and
# language time at y axis; different languages by different colours, different
# tests by different marker types
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
# provides dictionary with results
import results
lw = 0.7 # linewidth
ms = 7 # marker size
mew = 1.25 # marker edge width
m = 1.2 # additional space in xlim is proportional to m - 1
defaultstyle = {'markersize': ms, 'markeredgewidth': 0}
# kwargs which define different style for different languages
langstyle = {'C': {'color': 'DodgerBlue'}, 'C++': {'color': 'DeepSkyBlue'},
'Julia': {'color': 'DeepPink'}, 'Pypy': {'color': 'YellowGreen'},
'Python': {'color': 'Gold'}, 'Haskell': {'color': 'BlueViolet'}, 'R' :
{'color': 'Gray'}, 'C#': {'color': 'Thistle'}, 'Rust': {'color': 'none',
'markeredgecolor': 'DarkOrange', 'markeredgewidth': mew}}
# kwargs which define different style for different tests
teststyle = {'generation': {'marker': 'o'}, 'output': {'marker': '>'}, 'input':
{'marker': '<'}}
myset = results.set3
print(myset)
f = plt.figure(figsize=(6.65, 5.9), dpi = 300)
gs = gridspec.GridSpec(2, 2)
gs.update(left=0.09, right=0.98, bottom=0.08, top=0.99, wspace = 0.17)
font = {'family' : 'Liberation Serif',
'weight' : 'normal',
'size' : 10}
matplotlib.rc('font', **font)
matplotlib.rc('lines', linewidth=lw)
matplotlib.rc('axes', linewidth=lw)
def plotPoints(yscale, rset):
maxctime = 0
for testname in rset:
print(testname)
ctime = rset[testname][0]
if ctime > maxctime:
maxctime = ctime
style = {key: value for key, value in defaultstyle.items()}
style.update(langstyle['C'])
style.update(teststyle[testname])
plt.plot(ctime, ctime, **style)
for langname in rset[testname][1]:
print(' ' * 4 + langname)
for langtime in rset[testname][1][langname]:
print(' ' * 8 + str(langtime))
style = {key: value for key, value in defaultstyle.items()}
style.update(langstyle[langname])
style.update(teststyle[testname])
plt.plot(ctime, langtime, **style)
plt.xlim(0, maxctime * m)
plt.ylim(0, maxctime * m * yscale)
# makes y-range of plots wider
yscales = [1.5, 4, 11, 50]
for i in range(4):
plt.subplot(gs[int(i / 2), i % 2])
plotPoints(yscales[i], myset)
plt.xlabel('C time, s')
plt.ylabel('lang time, s')
plt.savefig('results.png')
|
py | b40bdfd650c53715cc151b8757e8ea4313ea806b | from taxstats.core import (taxstats)
from taxstats.utils import (parse_docs, create_labels) |
py | b40be0c3fc4d66fefa87a15779e7df34196e2846 | #
# Author: Dlo Bagari
# created Date: 14-11-2019
import logging
class Logger:
def __init__(self, log_directory):
self._log_directory = log_directory
self._log = logging
self._set_config()
def _set_config(self):
file_name = self._log_directory + "/consumer_service.log"
log_format = '%(asctime)s - %(levelname)s - %(message)s'
self._log.basicConfig(filename=file_name,
level=logging.INFO,
format=log_format)
def warning(self, class_name, message):
message = "{}: {}".format(class_name.__class__.__name__, message)
self._log.warning(message)
def critical(self, class_name, message):
message = "{}: {}".format(class_name.__class__.__name__, message)
self._log.critical(message)
def error(self, class_name, message):
message = "{}: {}".format(class_name.__class__.__name__, message)
self._log.error(message)
def debug(self, class_name, message):
message = "{}: {}".format(class_name.__class__.__name__, message)
self._log.debug(message)
def info(self, class_name, message):
message = "{}: {}".format(class_name.__class__.__name__, message)
self._log.info(message)
|
py | b40be0ff4c2df1cd93bf7229194cfb43ec5a6d82 | from sqlalchemy.inspection import inspect
from typing import Union, List, Callable
from qkoubot.models import Session, Info, Cancel, News
T = Union[Info, Cancel, News]
ARGS = Union[str, int]
def query_to_dict(result) -> dict:
"""
Convert to dict from given model object such as models.Subject or models.Info and so on.
Args:
result: Subject, Info, Cancel, News object which was got by query
Returns:
Dictionary of given objects property.
"""
res = {}
instance = inspect(result)
for key, obj in instance.attrs.items():
if key != "id":
res[key] = obj.value
return res
def insert_all(model: Callable[[ARGS], T], data_list: List[dict]) -> bool:
"""
Insert test data based on given model.
Args:
model: Info, News, Cancel, Subject model
data_list: list of dict of data
Returns:
success is True, fail is False
"""
try:
with Session() as session:
insert = [model(**data_dict) for data_dict in data_list]
session.add_all(insert)
session.commit()
return True
except Exception:
return False
def delete_all(model: T) -> bool:
"""
Delete test data based on given model.
Args:
model: Info, News, Cancel, Subject model
Returns:
success is True, fail is False
"""
try:
with Session() as session:
exists = session.query(model).all()
for exist in exists:
session.delete(exist)
session.commit()
return True
except Exception:
return False
|
py | b40be109d33742b322fe09d66455510d49a94d0f | from rest_framework import serializers
from .models import (
CyclicCommissionCategory,
CyclicCommissionItem,
)
class CyclicCommissionCategorySerializer(serializers.ModelSerializer):
class Meta:
model = CyclicCommissionCategory
fields = '__all__'
depth = 1
class CyclicCommissionItemSerializer(serializers.ModelSerializer):
class Meta:
model = CyclicCommissionItem
fields = '__all__'
depth = 1
|
py | b40be1175793cba853f5e34ac1737501fa22cbbe |
class CoffeeMaker:
def __init__(self):
self.__resources = {
"water": 300,
"milk": 200,
"coffee": 100,
}
def report(self):
print(f"Water: {self.__resources['water']}ml")
print(f"Milk: {self.__resources['milk']}ml")
print(f"Coffee: {self.__resources['coffee']}g")
def is_resource_sufficient(self, drink):
can_make = True
for item in drink.ingredients:
if drink.ingredients[item] > self.__resources[item]:
print(f"Sorry there is not enough {item}.")
can_make = False
return can_make
def make_coffee(self, order):
for item in order.ingredients:
self.__resources[item] -= order.ingredients[item]
print(f"Here is your {order.name} ☕️. Enjoy!")
|
py | b40be2535e11c4388dc84bc8adb119cdc424e251 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# Asena UserBot - Yusuf Usta
""" Olayları yönetmek için UserBot modülü.
UserBot'un ana bileşenlerinden biri. """
import sys
from asyncio import create_subprocess_shell as asyncsubshell
from asyncio import subprocess as asyncsub
from os import remove
from time import gmtime, strftime
from traceback import format_exc
from telethon import events
from userbot import bot, BOTLOG_CHATID, LOGSPAMMER, PATTERNS
def register(**args):
""" Yeni bir etkinlik kaydedin. """
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', False)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
if pattern:
args["pattern"] = pattern.replace("^.", "^["+ PATTERNS + "]")
if "disable_edited" in args:
del args['disable_edited']
if "ignore_unsafe" in args:
del args['ignore_unsafe']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
if "trigger_on_inline" in args:
del args['trigger_on_inline']
def decorator(func):
async def wrapper(check):
if not LOGSPAMMER:
send_to = check.chat_id
else:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`Bunun bir grup olduğunu sanmıyorum.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**USERBOT HATA RAPORU**\n"
link = "[NOXUS](https://t.me/Noxus_0)"
text += "İsterseniz, bunu rapor edebilirsiniz"
text += f"- sadece bu mesajı buraya iletin {link}.\n"
text += "Hata ve Tarih dışında hiçbir şey kaydedilmez\n"
ftext = "========== UYARI =========="
ftext += "\nBu dosya sadece burada yüklendi,"
ftext += "\nsadece hata ve tarih kısmını kaydettik,"
ftext += "\ngizliliğinize saygı duyuyoruz,"
ftext += "\nburada herhangi bir gizli veri varsa"
ftext += "\nbu hata raporu olmayabilir, kimse verilerinize ulaşamaz.\n"
ftext += "================================\n\n"
ftext += "--------USERBOT HATA GUNLUGU--------\n"
ftext += "\nTarih: " + date
ftext += "\nGrup ID: " + str(check.chat_id)
ftext += "\nGönderen kişinin ID: " + str(check.sender_id)
ftext += "\n\nOlay Tetikleyici:\n"
ftext += str(check.text)
ftext += "\n\nGeri izleme bilgisi:\n"
ftext += str(format_exc())
ftext += "\n\nHata metni:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------USERBOT HATA GUNLUGU BITIS--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nSon 10 commit:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = open("error.log", "w+")
file.write(ftext)
file.close()
if LOGSPAMMER:
await check.client.respond("`Üzgünüm, UserBot'um çöktü.\
\nHata günlükleri UserBot günlük grubunda saklanır.`")
await check.client.send_file(send_to,
"error.log",
caption=text)
remove("error.log")
else:
pass
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
|
py | b40be26418fcddb4a14e9f587c76f9dd0f5a3335 | #!/usr/bin/env python
"""Metric implementations to collect statistics."""
import abc
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.stats import stats_collector_instance
from grr_response_core.stats import stats_utils
class AbstractMetric(metaclass=abc.ABCMeta):
"""An abstract metric with a name, fields, and values.
Refer to default_stats_collector._Metric and DefaultStatsCollector to
see how StatsCollector handles the field definitions and values.
Attributes:
name: string containing the global metric name.
"""
def __init__(self, metadata):
"""Initializes a new metric and registers it with the StatsCollector."""
self.name = metadata.varname
stats_collector_instance.RegisterMetric(metadata)
def GetValue(self, fields=None):
"""Returns the value of a given metric for given field values."""
return stats_collector_instance.Get().GetMetricValue(
self.name, fields=fields)
def GetFields(self):
"""Returns all field values for the given metric."""
return stats_collector_instance.Get().GetMetricFields(self.name)
class Counter(AbstractMetric):
"""A Counter metric that can be incremented.
Refer to default_stats_collector._CounterMetric and DefaultStatsCollector to
see how StatsCollector handles the field definitions and values.
"""
def __init__(self, name, fields=(), docstring=None, units=None):
"""Initializes a Counter metric and registers it with the StatsCollector."""
super().__init__(
rdf_stats.MetricMetadata(
varname=name,
metric_type=rdf_stats.MetricMetadata.MetricType.COUNTER,
value_type=rdf_stats.MetricMetadata.ValueType.INT,
fields_defs=stats_utils.FieldDefinitionProtosFromTuples(fields),
docstring=docstring,
units=units))
def Increment(self, delta=1, fields=None):
"""Increments a counter metric by a given delta."""
stats_collector_instance.Get().IncrementCounter(
self.name, delta, fields=fields)
def Counted(self, fields=None):
"""Returns a decorator that counts function calls."""
return stats_utils.Counted(self, fields=fields)
def SuccessesCounted(self, fields=None):
"""Returns a decorator that counts calls that don't raise an exception."""
return stats_utils.SuccessesCounted(self, fields=fields)
def ErrorsCounted(self, fields=None):
"""Returns a decorator that counts calls that raise an exception."""
return stats_utils.ErrorsCounted(self, fields=fields)
class Gauge(AbstractMetric):
"""A Gauge metric that can be set to a value.
Refer to default_stats_collector._GaugeMetric and DefaultStatsCollector to
see how StatsCollector handles the field definitions and values.
"""
def __init__(self, name, value_type, fields=(), docstring=None, units=None):
"""Initializes a Gauge metric and registers it with the StatsCollector."""
super().__init__(
rdf_stats.MetricMetadata(
varname=name,
metric_type=rdf_stats.MetricMetadata.MetricType.GAUGE,
value_type=stats_utils.MetricValueTypeFromPythonType(value_type),
fields_defs=stats_utils.FieldDefinitionProtosFromTuples(fields),
docstring=docstring,
units=units))
def SetValue(self, value, fields=None):
"""Sets value of a given gauge metric."""
stats_collector_instance.Get().SetGaugeValue(
self.name, value, fields=fields)
def SetCallback(self, callback, fields=None):
"""Attaches a callback to the given gauge metric."""
stats_collector_instance.Get().SetGaugeCallback(
self.name, callback, fields=fields)
class Event(AbstractMetric):
"""An Event metric that records timings of events.
Refer to default_stats_collector._EventMetric and DefaultStatsCollector to
see how StatsCollector handles the field definitions and values.
"""
def __init__(self, name, bins=(), fields=(), docstring=None, units=None):
"""Initializes an Event metric and registers it with the StatsCollector."""
super().__init__(
rdf_stats.MetricMetadata(
varname=name,
bins=bins,
metric_type=rdf_stats.MetricMetadata.MetricType.EVENT,
value_type=rdf_stats.MetricMetadata.ValueType.DISTRIBUTION,
fields_defs=stats_utils.FieldDefinitionProtosFromTuples(fields),
docstring=docstring,
units=units))
def RecordEvent(self, value, fields=None):
"""Records value corresponding to the given event metric."""
stats_collector_instance.Get().RecordEvent(self.name, value, fields=fields)
def Timed(self, fields=None):
"""Returns a decorator that records timing metrics for function calls."""
return stats_utils.Timed(self, fields=fields)
|
py | b40be2b5e512d9d4e968c12a6111da2095444101 | from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.title"
_path_str = "layout.title.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.new_plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets the title font. Note that the title's font used to be
customized by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.layout.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.layout.title.Font
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.layout.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
py | b40be2b9483dabc343d563a69c0d2496b3beba77 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyAzureLoganalytics(PythonPackage):
"""Microsoft Azure Log Analytics Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-loganalytics/azure-loganalytics-0.1.0.zip"
version('0.1.0', sha256='3ceb350def677a351f34b0a0d1637df6be0c6fe87ff32a5270b17f540f6da06e')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:1', type=('build', 'run'))
depends_on('[email protected]:1', type=('build', 'run'))
|
py | b40be2c1fa28116588fceb37d53fbfb37178cd67 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .r2n2 import R2N2
from .utils import BlenderCamera, collate_batched_R2N2, render_cubified_voxels
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
py | b40be2df291d6bc8e8a8497025fbfeb0068ed26b | import requests
from time import sleep
API_URI = "http://84.201.158.229:8080/api/send_message"
FILENAME = "dialog_output.txt"
def send_message(message_text, message_history):
response = requests.post(API_URI, json={"message_text": message_text, "message_history": message_history})
return response.json()["text"]
def print_to_file_decorator(func):
def new_func(*args, **kwargs):
func(*args, **kwargs)
with open(FILENAME, "a") as f:
f.write(' '.join(list(map(str, args))) + "\n")
return new_func
class BotClient:
def __init__(self, message_history=None):
self.message_history = message_history if message_history else []
def send_message(self, message):
response = send_message(message, self.message_history)
self.message_history.append(message)
self.message_history.append(response)
return response
class BotConversation:
def __init__(self, first_bot: BotClient, second_bot: BotClient):
self.first_bot = first_bot
self.second_bot = second_bot
self.initial_message = "Hello!"
self.message_timeout = 2
def start(self):
print(f"Bot 2: {self.initial_message}")
self.second_bot.message_history.append(self.initial_message)
first_response = self.first_bot.send_message(self.initial_message)
print(f"Bot 1: {first_response}")
while True:
second_response = self.second_bot.send_message(first_response)
print(f"Bot 2: {second_response}")
sleep(self.message_timeout)
first_response = self.first_bot.send_message(second_response)
print(f"Bot 1: {first_response}")
sleep(self.message_timeout)
open(FILENAME, "w")
print = print_to_file_decorator(print)
FirstBot = BotClient(["your persona: i hate you shit fuck you i am 23 and i work as photographer"])
SecondBot = BotClient(["your persona: i hate you you fucking bitch i am 17 and i am in school my hobby is airplanes"])
print("# INITIAL CONTEXTS #")
print("Bot 1:", FirstBot.message_history)
print("Bot 2:", SecondBot.message_history)
print()
print("# DIALOG #")
MainBotConversation = BotConversation(FirstBot, SecondBot)
MainBotConversation.start()
|
py | b40be307e7e54e3201e907cf98574309458ed77e | import os
import json
from tqdm import tqdm
from pycocotools.coco import COCO
def main():
bbox_dir = '/home/ruichen/Documents/Documents_from_ubuntu_1604/AiFi_model_save/1000skus/synthetic_1000skus_test'
bbox = os.path.join(bbox_dir, 'bbox_modify.json')
# coco = COCO(bbox)
with open(bbox, 'r') as fp:
bbox_json = json.load(fp)
print("--- json file loaded")
print("--- with length:", len(bbox_json))
pbar = tqdm(total=len(bbox_json))
for prediction in bbox_json:
# step 1: xywh to xyxy
x, y, w, h = prediction["bbox"]
prediction["bbox"] = [x, y, x + w, y + h]
# step 2: if id >= 52, id --
if prediction["category_id"] >= 52:
prediction["category_id"] -= 1
pbar.update(1)
pbar.close()
with open('{}/bbox_fix_done.json'.format(bbox_dir), 'w') as output_json_file:
json.dump(bbox_json, output_json_file)
print("--- bbox fix saved")
print("--- with length:", len(bbox_json))
if __name__ == '__main__':
main()
|
py | b40be4a57af40062868481b81cac5d967521d06f | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmcv.parallel import DataContainer as DC
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets.pipelines import (Collect, FormatAudioShape,
FormatGCNInput, FormatShape,
ImageToTensor, Rename,
ToDataContainer, ToTensor, Transpose)
def test_rename():
org_name = 'a'
new_name = 'b'
mapping = {org_name: new_name}
rename = Rename(mapping)
results = dict(a=2)
results = rename(results)
assert results['b'] == 2
assert 'a' not in results
def test_to_tensor():
to_tensor = ToTensor(['str'])
with pytest.raises(TypeError):
# str cannot be converted to tensor
results = dict(str='0')
to_tensor(results)
# convert tensor, numpy, squence, int, float to tensor
target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float']
to_tensor = ToTensor(target_keys)
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1)
results = to_tensor(original_results)
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
# Add an additional key which is not in keys.
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1,
str='test')
results = to_tensor(original_results)
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
assert repr(to_tensor) == to_tensor.__class__.__name__ + \
f'(keys={target_keys})'
def test_to_data_container():
# check user-defined fields
fields = (dict(key='key1', stack=True), dict(key='key2'))
to_data_container = ToDataContainer(fields=fields)
target_keys = ['key1', 'key2']
original_results = dict(key1=np.random.randn(10, 20), key2=['a', 'b'])
results = to_data_container(original_results.copy())
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
# Add an additional key which is not in keys.
original_results = dict(
key1=np.random.randn(10, 20), key2=['a', 'b'], key3='value3')
results = to_data_container(original_results.copy())
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
assert repr(to_data_container) == (
to_data_container.__class__.__name__ + f'(fields={fields})')
def test_image_to_tensor():
original_results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(original_results)
assert results['imgs'].shape == torch.Size([3, 256, 256])
assert isinstance(results['imgs'], torch.Tensor)
assert torch.equal(results['imgs'].data, original_results['imgs'])
assert repr(image_to_tensor) == image_to_tensor.__class__.__name__ + \
f'(keys={keys})'
def test_transpose():
results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
order = [2, 0, 1]
transpose = Transpose(keys, order)
results = transpose(results)
assert results['imgs'].shape == (3, 256, 256)
assert repr(transpose) == transpose.__class__.__name__ + \
f'(keys={keys}, order={order})'
def test_collect():
inputs = dict(
imgs=np.random.randn(256, 256, 3),
label=[1],
filename='test.txt',
original_shape=(256, 256, 3),
img_shape=(256, 256, 3),
pad_shape=(256, 256, 3),
flip_direction='vertical',
img_norm_cfg=dict(to_bgr=False))
keys = ['imgs', 'label']
collect = Collect(keys)
results = collect(inputs)
assert sorted(list(results.keys())) == sorted(
['imgs', 'label', 'img_metas'])
imgs = inputs.pop('imgs')
assert set(results['img_metas'].data) == set(inputs)
for key in results['img_metas'].data:
assert results['img_metas'].data[key] == inputs[key]
assert repr(collect) == collect.__class__.__name__ + \
(f'(keys={keys}, meta_keys={collect.meta_keys}, '
f'nested={collect.nested})')
inputs['imgs'] = imgs
collect = Collect(keys, nested=True)
results = collect(inputs)
assert sorted(list(results.keys())) == sorted(
['imgs', 'label', 'img_metas'])
for k in results:
assert isinstance(results[k], list)
def test_format_shape():
with pytest.raises(ValueError):
# invalid input format
FormatShape('NHWC')
# 'NCHW' input format
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCHW')
assert format_shape(results)['input_shape'] == (3, 3, 224, 224)
# `NCTHW` input format with num_clips=1, clip_len=3
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCTHW')
assert format_shape(results)['input_shape'] == (1, 3, 3, 224, 224)
# `NCTHW` input format with num_clips=2, clip_len=3
results = dict(
imgs=np.random.randn(18, 224, 224, 3), num_clips=2, clip_len=3)
assert format_shape(results)['input_shape'] == (6, 3, 3, 224, 224)
target_keys = ['imgs', 'input_shape']
assert assert_dict_has_keys(results, target_keys)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTHW')"
# 'NPTCHW' input format
results = dict(
imgs=np.random.randn(72, 224, 224, 3),
num_clips=9,
clip_len=1,
num_proposals=8)
format_shape = FormatShape('NPTCHW')
assert format_shape(results)['input_shape'] == (8, 9, 3, 224, 224)
def test_format_audio_shape():
with pytest.raises(ValueError):
# invalid input format
FormatAudioShape('XXXX')
# 'NCTF' input format
results = dict(audios=np.random.randn(3, 128, 8))
format_shape = FormatAudioShape('NCTF')
assert format_shape(results)['input_shape'] == (3, 1, 128, 8)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTF')"
def test_format_gcn_input():
with pytest.raises(ValueError):
# invalid input format
FormatGCNInput('XXXX')
# 'NCTVM' input format
results = dict(
keypoint=np.random.randn(2, 300, 17, 2),
keypoint_score=np.random.randn(2, 300, 17))
format_shape = FormatGCNInput('NCTVM', num_person=2)
assert format_shape(results)['input_shape'] == (3, 300, 17, 2)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTVM')"
# test real num_person < 2
results = dict(
keypoint=np.random.randn(1, 300, 17, 2),
keypoint_score=np.random.randn(1, 300, 17))
assert format_shape(results)['input_shape'] == (3, 300, 17, 2)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTVM')"
# test real num_person > 2
results = dict(
keypoint=np.random.randn(3, 300, 17, 2),
keypoint_score=np.random.randn(3, 300, 17))
assert format_shape(results)['input_shape'] == (3, 300, 17, 2)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTVM')"
|
py | b40be6723bc892d12e3c2d45d8d13ed963f17deb | from unittest import TestCase
from keystore import FlashKeyStore
import os, json
import platform
TEST_DIR = "testdir"
def init_keystore(ks):
platform.maybe_mkdir(ks.path)
ks.load_secret(ks.path)
ks.load_state()
ks.initialized = True
class FlashKeyStoreTest(TestCase):
def get_keystore(self):
"""Clean up the test folder and create fresh keystore"""
try:
platform.delete_recursively(TEST_DIR)
os.rmdir(TEST_DIR)
except:
pass
FlashKeyStore.path = TEST_DIR
return FlashKeyStore()
def test_create_config(self):
"""Test initial config creation"""
ks = self.get_keystore()
init_keystore(ks)
files = [f[0] for f in os.ilistdir(TEST_DIR)]
self.assertTrue("secret" in files)
self.assertTrue("pin" in files)
self.assertEqual(ks.is_pin_set, False)
self.assertEqual(ks.pin_attempts_left, ks.pin_attempts_max)
self.assertTrue(ks.pin_attempts_left is not None)
def test_change_secret(self):
"""Test wipe exception if secret is changed"""
# create keystore
ks = self.get_keystore()
init_keystore(ks)
files = [f[0] for f in os.ilistdir(TEST_DIR)]
self.assertTrue("secret" in files)
self.assertTrue("pin" in files)
# now change secret value
with open(TEST_DIR+"/secret", "wb") as f:
# a different value
f.write(b"5"*32)
ks = FlashKeyStore()
# check it raises
with self.assertRaises(platform.CriticalErrorWipeImmediately):
init_keystore(ks)
# files are deleted
files = [f[0] for f in os.ilistdir(TEST_DIR)]
self.assertFalse("secret" in files)
self.assertFalse("pin" in files)
def test_change_pin_file(self):
"""Test wipe exception if pin state changed"""
# create keystore
ks = self.get_keystore()
init_keystore(ks)
# load signed pin state
with open(TEST_DIR+"/pin", "rb") as f:
# a different value
content = f.read()
# set invalid value
content = content[1:] + b"1"
# write new state
with open(TEST_DIR+"/pin", "wb") as f:
# a different value
f.write(content)
ks = FlashKeyStore()
# check it raises
with self.assertRaises(platform.CriticalErrorWipeImmediately):
init_keystore(ks)
# files are deleted
files = [f[0] for f in os.ilistdir(TEST_DIR)]
self.assertFalse("secret" in files)
self.assertFalse("pin" in files)
|
py | b40be6777dba8532592271773c16c1e38292da78 |
from spi.errors import RuntimeInterpreterError
class CallStack:
def __init__(self):
# TODO: try to replace it with linked list in the future and compare
# results.
self._records = []
def access_variable(self, var_name):
var = self.peek().get(var_name)
if var is not None:
return var
var = self._records[0].get(var_name)
if var is not None:
return var
# After proper semantic check this should never happen
raise RuntimeInterpreterError()
def push(self, ar):
self._records.append(ar)
def pop(self):
return self._records.pop()
def peek(self):
return self._records[-1]
def __len__(self):
return len(self._records)
def __str__(self):
s = "\n".join(repr(ar) for ar in reversed(self._records))
s = f"CALL STACK\n{s}\n"
return s
def __repr__(self):
return self.__str__()
|
py | b40be6ea6056af009c782feec3a699e51a666599 | from flask_restful import Resource
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
# import xml2json
BASE_URL = "http://www.fightingillini.com/schedule.aspx?path="
sports = ['baseball', 'mbball', 'mcross', 'football', 'mgolf',
'mgym', 'mten', 'mtrack', 'wrestling', 'wbball',
'wcross', 'wgolf', 'wgym', 'wsoc', 'softball',
'wswim', 'wten', 'wtrack', 'wvball']
'''class AthleticSchedule(Resource):
def get(self, sport):
request_url = 'http://app-uiuc-ncaa.yinzcam.com/V1/Game/List/?teamid=uiuc-' + sport + '&version=4.6&app_version=1.0.1&mcc=310&width=640&application=NCAA_UIUC&schoolid=UIUC&os=iOS&mnc=260&height=1136&os_version=9.1&ff=mobile&carrier=T-Mobile'
request = urlopen(request_url)
print(type(request))
return xml2json.xml2json(request.read(), None)'''
class AthleticSchedule(Resource):
def get(self, sport):
if sport.lower() in sports:
request_url = BASE_URL + sport
req = Request(request_url, None, {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'})
request = urlopen(req)
soup = BeautifulSoup(request, 'html.parser')
retval = {}
gamelist = []
for x in soup.find_all(class_='schedule_game'):
print(x)
game = {}
if (x.find(class_='schedule_game_opponent_name').a is None and x.find(class_='schedule_game_opponent_name').span is None):
game['Opponent'] = x.find(class_='schedule_game_opponent_name').string.strip()
elif (x.find(class_='schedule_game_opponent_name').span is None):
if (x.find(class_='schedule_game_opponent_name').a.string is None):
game['Opponent'] = x.find(class_='schedule_game_opponent_name').a.span.string.strip()
else:
game['Opponent'] = x.find(class_='schedule_game_opponent_name').a.string.strip()
else:
game['Opponent'] = x.find(class_='schedule_game_opponent_name').span.string.strip()
game['Date'] = x.find(class_='schedule_game_opponent_date').string.strip()
game['Time'] = x.find(class_='schedule_game_opponent_time').string.strip()
if (x.find(class_='schedule_game_location').span is None):
game['Location'] = x.find(class_='schedule_game_location').string.strip()
else:
if (x.find(class_='schedule_game_location').span.string is None):
game['Location'] = None
else:
game['Location'] = x.find(class_='schedule_game_location').span.string.strip()
#game['Home/Away'] = x.find(class_='schedule_game_location').span['class'][0].split('_')[1]
#game['Watchable Links'] = x.find(class_='schedule_game_links').span['class'].split('_')[1]
if (x.find(class_='schedule_game_results') is None or len(x.find(class_='schedule_game_results').div.contents) == 0):
game['Results'] = 'This has not happened yet or no results were reported'
else:
game['Results'] = (x.find(class_='schedule_game_results').div.contents[0])
gamelist.append(game)
retval['games'] = gamelist
return retval
else:
return {'This sport' : 'does not exist'}
|
py | b40be7c431015c3088da9722a3580c691f91421b | #!/usr/bin/python
# -*- encoding:utf-8 -*-
import json,pprint
from prettyprinter import prettyPrinter
from kscore.session import get_session
if __name__ == "__main__":
s = get_session()
region='cn-beijing-6'
#region='cn-shanghai-2'
eipClient = s.create_client("eip", region, use_ssl=True)
#allInstances=kecClient.describe_instances()
#allNics=vpcClient.describe_network_interfaces()
#allEips=eipClient.describe_addresses(MaxResults=7,NextToken='OA==')
allEips=eipClient.describe_addresses(MaxResults=7)
#allEips=eipClient.describe_addresses(**{'Filter.1.Name':'instance-type','Filter.1.Value.1':'Ipfwd'})
#allEips=eipClient.describe_addresses(**{'Filter.1.Name':'instance-type','Filter.1.Value.1':'Slb'})
#pprint.pprint(allEips)
#prettyPrinter().pprint(allEips)
#prettyPrinter().pprint(allNics)
#prettyPrinter().pprint(allInstances)
for item in allEips['AddressesSet']:
print item['PublicIp']
print item['AllocationId']
#eipClient.associate_address(**{'AllocationId':'1cd0da05-8a3e-4c8e-8230-e6d39b85331e','InstanceType':'Ipfwd','InstanceId':'bede9a1c-d3a7-4b31-82e6-6699790ad1a3', 'NetworkInterfaceId':'fec81567-a4c7-4460-a998-54f407e77c0a'})
#eipClient.disassociate_address(**{'AllocationId':'1cd0da05-8a3e-4c8e-8230-e6d39b85331e'})
#eipClient.modify_address(**{'AllocationId':'c054f87a-4508-4db2-bc10-f594b34a2ef3','BandWidth':1})
#eipClient.modify_address(**{'AllocationId':'070a4af5-90ff-4953-a388-01a694ebdae5','BandWidth':1})
|
py | b40beb8de275272cdd84045bb51bb7c277ba5655 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SeoAnaliz.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | b40beb9aaebde206170c4f5067e74288a38d5a53 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet)."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import glob
import timeit
import getpass
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig, BertForMultipleChoice, BertTokenizer)
if getpass.getuser() == 'Mitch':
from transformers import AdamW, get_linear_schedule_with_warmup
from examples.utils_arc import (read_arc_examples, convert_examples_to_features,
features_to_groups, write_predictions, RawResult)
# The follwing import is the official SQuAD evaluation script (2.0).
# You can remove it from the dependencies if you are using this script outside of the library
# We've added it here for automated tests (see examples/test_examples.py file)
from examples.utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad
else:
from transformers import AdamW
from transformers import WarmupLinearSchedule as get_linear_schedule_with_warmup
from utils_arc import (read_arc_examples, convert_examples_to_features,
features_to_groups, write_predictions, RawResult)
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in [BertConfig]), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForMultipleChoice, BertTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
input_file = args.predict_file if evaluate else args.train_file
cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples:
logger.info("Loading features from cached file %s", cached_features_file)
grouped_features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", input_file)
examples = read_arc_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
is_training=not evaluate,
cls_token_segment_id=0,
pad_token_segment_id=0,
cls_token_at_end=False,
sequence_a_is_doc=False)
grouped_features = features_to_groups(features)
if args.local_rank in [-1, 0]:
logger.info("Saving grouped features into cached file %s", cached_features_file)
torch.save(grouped_features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([gf.input_ids_ for gf in grouped_features], dtype=torch.long)
all_input_mask = torch.tensor([gf.input_masks for gf in grouped_features], dtype=torch.long)
all_cls_index = torch.tensor([gf.cls_indices for gf in grouped_features], dtype=torch.long)
all_segment_ids = torch.tensor([gf.segment_ids_ for gf in grouped_features], dtype=torch.long)
all_p_mask = torch.tensor([gf.p_masks for gf in grouped_features], dtype=torch.float)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_example_index, all_cls_index, all_p_mask)
else:
all_labels = torch.tensor([gf.label for gf in grouped_features], dtype=torch.long)
# all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
# all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_labels, all_cls_index, all_p_mask)
if output_examples:
return dataset, examples, grouped_features
return dataset
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
# 'start_positions': batch[3],
# 'end_positions': batch[4]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2]
if args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': batch[4],
'p_mask': batch[5]})
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
num_correct = 0
for value in results.values():
num_correct += value
tb_writer.add_scalar('eval num_correct', num_correct, global_step)
tb_writer.add_scalar('eval num possible', len(results))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
# TODO what needs to happen next: create a script to load model file and create features based on that model
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
dataset, examples, grouped_features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1]
}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids
example_indices = batch[3]
if args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': batch[4],
'p_mask': batch[5]})
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = grouped_features[example_index.item()]
unique_ids = eval_feature.unique_ids
result = RawResult(unique_id = '*'.join(unique_ids),
answer_logits = outputs[0][i])
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
results = write_predictions(examples, grouped_features, all_results, output_prediction_file, args.verbose_logging)
return results
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_file", default=None, type=str, required=True,
help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default=None, type=str, required=True,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
## Other parameters
parser.add_argument("--do_output_hidden_states", action="store_true",
help="In the outputs tuple should the hidden states be included")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--version_2_with_negative', action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.')
parser.add_argument('--null_score_diff_threshold', type=float, default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--verbose_logging", action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
output_hidden_states=args.output_hidden_states)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
# Evaluate
result = evaluate(args, model, tokenizer, prefix=global_step)
result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
return results
if __name__ == '__main__':
main()
class tempArgs(object):
def __init__(self,
local_rank,
predict_file,
train_file,
model_name_or_path,
max_seq_length,
overwrite_cache,
version_2_with_negative,
train_batch_size,
per_gpu_train_batch_size,
n_gpu,
max_steps,
num_train_epochs,
gradient_accumulation_steps,
weight_decay,
learning_rate,
adam_epsilon,
warmup_steps,
fp16,
seed,
device,
model_type,
max_grad_norm,
logging_steps,
evaluate_during_training,
save_steps,
):
self.local_rank = local_rank
self.predict_file = predict_file
self.train_file = train_file
self.model_name_or_path = model_name_or_path
self.max_seq_length = max_seq_length
self.overwrite_cache = overwrite_cache
self.version_2_with_negative = version_2_with_negative
self.train_batch_size = train_batch_size
self.per_gpu_train_batch_size = per_gpu_train_batch_size
self.n_gpu = n_gpu
self.max_steps = max_steps
self.num_train_epochs = num_train_epochs
self.gradient_accumulation_steps = gradient_accumulation_steps
self.weight_decay = weight_decay
self.learning_rate = learning_rate
self.adam_epsilon = adam_epsilon
self.warmup_steps = warmup_steps
self.fp16 = fp16
self.seed = seed
self.device = device
self.model_type = model_type
self.max_grad_norm = max_grad_norm
self.logging_steps = logging_steps
self.evaluate_during_training = evaluate_during_training
self.save_steps = save_steps
config_class, model_class, tokenizer_class = MODEL_CLASSES['bert']
config = config_class.from_pretrained('bert-base-uncased',
cache_dir=None,
output_hidden_states=True)
tokenizer = tokenizer_class.from_pretrained('bert-base-uncased',
do_lower_case=True,
cache_dir=None)
model = model_class.from_pretrained('bert-base-uncased',
from_tf=False,
config=config,
cache_dir=None)
args = tempArgs(local_rank=-1,
predict_file='',
train_file='C://Users/Mitch/PycharmProjects/ARC/ARC-with-context/dev.jsonl',
model_name_or_path='bert-base-uncased',
max_seq_length=512,
overwrite_cache=False,
version_2_with_negative=False,
train_batch_size=None,
per_gpu_train_batch_size=2,
n_gpu=0,
max_steps=5,
num_train_epochs=2,
gradient_accumulation_steps=1,
weight_decay=0,
learning_rate=5e-5,
adam_epsilon=1e-8,
warmup_steps=0,
fp16=False,
seed=1,
device='cpu',
model_type='bert',
max_grad_norm=1.,
logging_steps=20,
evaluate_during_training=False,
save_steps=10)
train_dataset, _, _ = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=True)
train(args, train_dataset, model, tokenizer) |
py | b40bec27cbd921ae7d98483087225b2ddae7d119 | import io, os, discord, random, alexflipnote, aiohttp, contextlib
from discord.ext import commands
from PIL import Image, ImageDraw
from io import BytesIO
import requests
from utilities.helpers.utils import Votelink, voteembed
from PIL import ImageOps
import pyimgur
im = pyimgur.Imgur(os.getenv("IMGUR_API_KEY"))
afp = alexflipnote.Client(token = os.getenv("AFP_KEY"))
class Images(commands.Cog, name="Image", description="Image Commands"):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.bot.topggpy = bot.topggpy
self.session = bot.httpsession
def check_voted(self, userid):
return self.bot.topggpy.get_user_vote(userid)
async def cog_command_error(
self, ctx: commands.Context, error: commands.CommandError
):
em = discord.Embed()
em.title = f"Error: {__name__}"
em.description = f"{error}"
em.color = 0xEE0000
await ctx.send(embed=em)
me = self.bot.get_user(881861601756577832)
await me.send(str(ctx.channel.id), embed=em)
@commands.command()
async def wanted(self, ctx, user: discord.Member = None):
user = ctx.author if not user else user
wanted = Image.open("utilities/images/wanted.jpg")
asset = user.avatar.with_format("jpg")
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((252, 252))
wanted.paste(pfp, (106, 247))
wanted.save("profile.jpg")
try:
await ctx.send(file=discord.File("profile.jpg"))
os.remove("profile.jpg")
except:
await ctx.send("Error!")
@commands.command()
async def kill(self, ctx, user: discord.Member = None):
user = ctx.author if not user else user
amogusimage = Image.open(f"utilities/images/kill2.jfif")
asset1 = user.avatar.with_format("jpg")
asset2 = ctx.author.avatar.with_format("jpg")
data1 = BytesIO(await asset1.read())
data2 = BytesIO(await asset2.read())
pfp = Image.open(data1)
author = Image.open(data2)
pfp = pfp.resize((55, 55))
author = author.resize((55, 55))
amogusimage.paste(author, (54, 58))
amogusimage.paste(pfp, (170, 40))
amogusimage.save("kill.jpg")
try:
await ctx.send(file=discord.File("kill.jpg"))
os.remove("kill.jpg")
except:
await ctx.send("Error!")
@commands.command()
async def disfine(self, ctx, user: discord.Member = None):
user = ctx.author if not user else user
wanted = Image.open("utilities/images/finelol.jpeg")
asset = user.avatar.with_format("jpg")
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((350, 350))
wanted.paste(pfp, (730, 335))
wanted.save("finelol.jpg")
try:
await ctx.send(file=discord.File("finelol.jpg"))
os.remove("finelol.jpg")
except:
await ctx.send("Error!")
@commands.command()
async def affect(self, ctx, user: discord.Member = None):
user = ctx.author if not user else user
wanted = Image.open("utilities/images/affect.png")
asset = user.avatar.with_format("png")
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((206, 162))
wanted.paste(pfp, (176, 383))
wanted.save("affectlol.jpg")
await ctx.send(file=discord.File("affectlol.jpg"))
os.remove("affectlol.jpg")
@commands.command()
async def dog(self, ctx):
request = await self.session.get(
"https://some-random-api.ml/img/dog"
) # Make a request
dogjson = await request.json() # Convert it to a JSON dictionary
embed = discord.Embed(
title="Doggo!", color=discord.Color.purple()
) # Create embed
embed.set_image(
url=dogjson["link"]
) # Set the embed image to the value of the 'link' key
await ctx.send(embed=embed) # Send the embed
@commands.command()
async def cat(self, ctx):
request = await self.session.get(
"https://some-random-api.ml/img/cat"
) # Make a request
dogjson = await request.json() # Convert it to a JSON dictionary
embed = discord.Embed(
title="CAT!", color=discord.Color.purple()
) # Create embed
embed.set_image(
url=dogjson["link"]
) # Set the embed image to the value of the 'link' key
await ctx.send(embed=embed) # Send the embed
@commands.command(name="textart", aliases=["au"])
async def font_generator(self, ctx, *, text: str = ""):
"""Generate cool font"""
if not text:
return await ctx.send("Please enter text :pager:")
url = f"https://gdcolon.com/tools/gdfont/img/{text}?font=3&color=00ffff"
async with self.session.get(url) as r:
if r.status != 200:
return await ctx.send("Failed to generate textart :x:")
data = io.BytesIO(await r.read())
await ctx.send(file=discord.File(data, "textart.png"))
@commands.command()
async def catgirl(self, ctx):
request = await self.session.get(
"http://api.nekos.fun:8080/api/neko"
) # Make a request
dogjson = await request.json() # Convert it to a JSON dictionary
embed = discord.Embed(
title="Catgirl", color=discord.Color.purple()
) # Create embed
embed.set_image(
url=dogjson["image"]
) # Set the embed image to the value of the 'link' key
embed.set_footer(
text="Image taken from Nekos.Fun api.\nDon't worry! There are no children or nsfw. Its just anime catgirls uwu"
)
await ctx.send(embed=embed) # Send the embed
@commands.command()
async def achievement(self, ctx, *, text: str = ""):
"""Achievement unlocked"""
if text == "":
return await ctx.send("You need to specify the achievement")
image = await afp.achievement(text=text)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "achievement.png"))
@commands.command(aliases=["aij"])
async def amiajoke(self, ctx, image=None):
if image == None:
image = ctx.author.avatar.url
image = await afp.amiajoke(image)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "amiajoke.png"))
@commands.command()
async def drake(self, ctx, *, text):
text = text.split(",")
if len(text) != 2:
return await ctx.send(
"Please specify `,` separated two sentences :page_facing_up:"
)
image = await afp.drake(text[0], text[1])
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "drake.png"))
@commands.command()
async def bad(self, ctx, image=None):
if image == None:
image = ctx.author.avatar.url
image = await afp.bad(image)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "bad.png"))
@commands.command()
async def birb(self, ctx):
image = await afp.birb()
# image_bytes = await image.read()
await ctx.send(image)
@commands.command()
async def coffee(self, ctx):
image = await afp.coffee()
# image_bytes = await image.read()
await ctx.send(image)
@commands.command()
async def calling(self, ctx, *, text: str = ""):
"""Call meme"""
if text == "":
return await ctx.send("You need to specify the text")
image = await afp.calling(text=text)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "call.png"))
@commands.command()
async def captcha(self, ctx, *, text: str = ""):
"""Make a custom fake captcha!!"""
if text == "":
return await ctx.send("You need to specify the text")
image = await afp.captcha(text=text)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "captcha.png"))
@commands.command()
async def colourify(self, ctx, image=None, colour=None, background=None):
if image == None:
image = ctx.author.avatar.url
image = await afp.colourify(image, colour, background)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "colourify.png"))
@commands.command()
async def didumean(self, ctx, *, text):
text = text.split(",")
if len(text) != 2:
return await ctx.send(
"Please specify `,` separated two sentences :page_facing_up:"
)
if len(text[0]) > 39 or len(text[1]) > 39:
return await ctx.send("Your text is too big. limit is 40 characters")
image = await afp.did_you_mean(text[0], text[1])
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "didumean.png"))
@commands.command()
async def factimage(self, ctx, *, text: str = ""):
"""Make a custom fake fact image!!"""
if text == "":
return await ctx.send("You need to specify the text")
image = await afp.facts(text=text)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "facts.png"))
@commands.command(
name="filter",
aliases=[
"blur",
"b&w",
"deepfry",
"sepia",
"pixelate",
"magik",
"jpegify",
"wide",
"snow",
"gay",
"communist",
],
)
async def filter(self, ctx, arg="", image_link=""):
"""Deepfry avatar"""
if not await self.check_voted(ctx.author.id) == True:
await ctx.send(embed=voteembed, view=Votelink())
filters = [
"b&w",
"blur",
"charcoal",
"communist",
"deepfry",
"edge",
"emboss",
"gay",
"glitch",
"implode",
"jpegify",
"magik",
"pixelate",
"primitive",
"sepia",
"sketch",
"snow",
"spread",
"swirl",
"wave",
"wide",
]
if arg == "--list":
return await ctx.send(
embed=discord.Embed(title="Filters", description="\n".join(filters))
)
if arg not in filters:
return await ctx.send(
"Invalid filter name\nUse `.filter --list` for all options"
)
if not image_link:
user = ctx.message.author
image_link = user.avatar.url
try:
user = ctx.message.mentions[0]
image_link = user.avatar.url
except IndexError:
pass
image = await afp.filter(arg, image_link)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "filtered.png"))
@commands.command()
async def floor(self, ctx, image=None):
if image == None:
image = ctx.author.avatar.url
image = await afp.floor(image)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "floor.png"))
@commands.command()
async def fml(self, ctx):
image = await afp.fml()
# image_bytes = await image.read()
await ctx.send(image)
@commands.command()
async def salty(self, ctx, image=None):
if image == None:
image = ctx.author.avatar.url
image = await afp.salty(image)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "salty.png"))
@commands.command()
async def shame(self, ctx, image=None):
if image == None:
image = ctx.author.avatar.url
image = await afp.shame(image)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "salty.png"))
@commands.command()
async def scroll(self, ctx, *, text: str = ""):
if text == "":
return await ctx.send("You need to specify the text")
image = await afp.scroll(text=text)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "scroll.png"))
@commands.command()
async def ship(
self, ctx, member1: discord.Member = None, member2: discord.Member = None
):
if member1 == None:
member1 = ctx.author
if member2 == None:
return await ctx.send("You need to specify a user to be shipped with!")
ppurl1 = member1.avatar.url
ppurl2 = member2.avatar.url
random.seed(member1.id + member2.id)
r = random.randint(1, 100)
shipper = r / 1.17
image = await afp.ship(ppurl1, ppurl2)
image_bytes = await image.read()
draw = ImageDraw.Draw(image_bytes)
draw.text((28, 36), shipper, fill=(255, 0, 0))
await ctx.send(file=discord.File(image_bytes, "ship.png"))
@commands.command()
async def what(self, ctx, image=None):
if image == None:
image = ctx.author.avatar.url
image = await afp.what(image)
image_bytes = await image.read()
await ctx.send(file=discord.File(image_bytes, "what.png"))
@commands.command()
async def imgur(self, ctx, image_url=None):
if image_url == None:
return await ctx.send(
"Usage: .imgur <discord image link ending with `.png` or `.jpg`>"
)
myfile = requests.get(image_url)
open("utilities/photos/imgur.png", "wb").write(myfile.content)
try:
uploaded_image = im.upload_image(
"utilities/photos/imgur.png", title=f"Uploaded by SpaceBot"
)
except:
await ctx.send(
"Error: either the link is invalid(it should end with .png or any other picture format, or i couldnt process the image"
)
await ctx.send(
embed=discord.Embed(
title="Upload successful",
description=f"Successfully uploaded image to imgur `LINK`- {uploaded_image.link}",
)
)
async def invert_image(
self,
ctx: commands.Context,
url,
image_type: str,
):
# Some of this image/url handling came from Red-DiscordBot, thanks
await ctx.trigger_typing()
if len(ctx.message.attachments) > 0:
data = await ctx.message.attachments[0].read()
else:
if url.startswith("<") and url.endswith(">"):
url = url[1:-1]
async with aiohttp.ClientSession() as session:
try:
async with session.get(url) as r:
data = await r.read()
except aiohttp.InvalidURL:
return await ctx.send("That URL is invalid.")
except aiohttp.ClientError:
return await ctx.send(
"Something went wrong while trying to get the image."
)
data = io.BytesIO(data)
try:
image = Image.open(data)
image = ImageOps.invert(image.convert("RGB"))
except Exception:
return await ctx.send(
"Failed to invert. Make sure that you have provided an image and in the correct format."
)
buff = io.BytesIO()
image.save(buff, "png")
buff.seek(0)
embed = discord.Embed(
title=f"Inverted {image_type.capitalize()}",
color=discord.Colour.random(),
)
try:
embed.set_image(url="attachment://image.png")
await ctx.send(file=discord.File(buff, filename="image.png"), embed=embed)
except discord.HTTPException:
await ctx.send("The image quality was too high, sorry!")
return
@commands.group()
async def invert(self, ctx: commands.Context):
"""Invert images and avatars."""
@invert.command()
async def image(self, ctx, url: str = None):
"""Invert an image.
You can either upload an image or paste a URL.
"""
if not any([url, ctx.message.attachments]):
return await ctx.send_help()
msg = await ctx.send("Inverting image...")
await self.invert_image(
ctx=ctx,
url=url,
image_type="image",
)
with contextlib.suppress(discord.NotFound):
await msg.delete()
@invert.command()
async def avatar(self, ctx, member: discord.Member = None):
"""Invert a user's avatar.
If no user is provided, it defaults to yourself.
"""
msg = await ctx.send("Inverting avatar...")
if not member:
member = ctx.author
avvy = str(member.avatar.url)
await self.invert_image(
ctx=ctx,
url=avvy,
image_type="avatar",
)
with contextlib.suppress(discord.NotFound):
await msg.delete()
def setup(bot):
bot.add_cog(Images(bot))
|
py | b40bec493b1d3f26c43332314e6bff13b36112eb | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/trip/', include('trip.urls')),
path('api/user/', include('user.urls')),
]
urlpatterns += [
path('api-auth/', include('rest_framework.urls')),
]
|
py | b40bec5e61ccf1e3f9f96c601c4f1dea94f83bf5 | import csv
import sys
#open connection and import into DB
import MySQLdb
#buffer to hold file input
data = []
import glob
# Open database connection
db = MySQLdb.connect("localhost","root","mysql","vmpTest")
# prepare a cursor object using cursor() method
cursor = db.cursor()
#iterate through all files provided in command line
with open(sys.argv[1], "rbU") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
#print 'Sku:' + row['Sku'] + '\tUPC: ' + row['UPC'] + "\tArtist: " + row['Artist'] + "\tAlbum: " + row['Album']
data.append(row)
for row in data:
cursor.execute("INSERT IGNORE INTO reports VALUES (%s, %s, %s, %s)", (row['Sku'], row['UPC'], row['Artist'], row['Album']))
db.commit();
db.close(); |
py | b40bec6ca3379c44937e689ba6789c3718340cb9 | """Semantic analysis of types"""
import itertools
from itertools import chain
from contextlib import contextmanager
from mypy.backports import OrderedDict
from typing import Callable, List, Optional, Set, Tuple, Iterator, TypeVar, Iterable, Sequence
from typing_extensions import Final
from mypy_extensions import DefaultNamedArg
from mypy.messages import MessageBuilder, quote_type_string, format_type_bare
from mypy.options import Options
from mypy.types import (
NEVER_NAMES, Type, UnboundType, TupleType, TypedDictType, UnionType, Instance, AnyType,
CallableType, NoneType, ErasedType, DeletedType, TypeList, TypeVarType, SyntheticTypeVisitor,
StarType, PartialType, EllipsisType, UninhabitedType, TypeType, CallableArgument,
TypeQuery, union_items, TypeOfAny, LiteralType, RawExpressionType,
PlaceholderType, Overloaded, get_proper_type, TypeAliasType, RequiredType,
TypeVarLikeType, ParamSpecType, ParamSpecFlavor, UnpackType,
callable_with_ellipsis, TYPE_ALIAS_NAMES, FINAL_TYPE_NAMES,
LITERAL_TYPE_NAMES, ANNOTATED_TYPE_NAMES,
)
from mypy.nodes import (
TypeInfo, Context, SymbolTableNode, Var, Expression,
get_nongen_builtins, check_arg_names, check_arg_kinds, ArgKind, ARG_POS, ARG_NAMED,
ARG_OPT, ARG_NAMED_OPT, ARG_STAR, ARG_STAR2, TypeVarExpr, TypeVarLikeExpr, ParamSpecExpr,
TypeAlias, PlaceholderNode, SYMBOL_FUNCBASE_TYPES, Decorator, MypyFile
)
from mypy.typetraverser import TypeTraverserVisitor
from mypy.tvar_scope import TypeVarLikeScope
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.plugin import Plugin, TypeAnalyzerPluginInterface, AnalyzeTypeContext
from mypy.semanal_shared import SemanticAnalyzerCoreInterface
from mypy.errorcodes import ErrorCode
from mypy import nodes, message_registry, errorcodes as codes
T = TypeVar('T')
type_constructors: Final = {
'typing.Callable',
'typing.Optional',
'typing.Tuple',
'typing.Type',
'typing.Union',
*LITERAL_TYPE_NAMES,
*ANNOTATED_TYPE_NAMES,
}
ARG_KINDS_BY_CONSTRUCTOR: Final = {
'mypy_extensions.Arg': ARG_POS,
'mypy_extensions.DefaultArg': ARG_OPT,
'mypy_extensions.NamedArg': ARG_NAMED,
'mypy_extensions.DefaultNamedArg': ARG_NAMED_OPT,
'mypy_extensions.VarArg': ARG_STAR,
'mypy_extensions.KwArg': ARG_STAR2,
}
GENERIC_STUB_NOT_AT_RUNTIME_TYPES: Final = {
'queue.Queue',
'builtins._PathLike',
'asyncio.futures.Future',
}
def analyze_type_alias(node: Expression,
api: SemanticAnalyzerCoreInterface,
tvar_scope: TypeVarLikeScope,
plugin: Plugin,
options: Options,
is_typeshed_stub: bool,
allow_placeholder: bool = False,
in_dynamic_func: bool = False,
global_scope: bool = True) -> Optional[Tuple[Type, Set[str]]]:
"""Analyze r.h.s. of a (potential) type alias definition.
If `node` is valid as a type alias rvalue, return the resulting type and a set of
full names of type aliases it depends on (directly or indirectly).
Return None otherwise. 'node' must have been semantically analyzed.
"""
try:
type = expr_to_unanalyzed_type(node, options, api.is_stub_file)
except TypeTranslationError:
api.fail('Invalid type alias: expression is not a valid type', node)
return None
analyzer = TypeAnalyser(api, tvar_scope, plugin, options, is_typeshed_stub,
defining_alias=True,
allow_placeholder=allow_placeholder)
analyzer.in_dynamic_func = in_dynamic_func
analyzer.global_scope = global_scope
res = type.accept(analyzer)
return res, analyzer.aliases_used
def no_subscript_builtin_alias(name: str, propose_alt: bool = True) -> str:
msg = '"{}" is not subscriptable'.format(name.split('.')[-1])
# This should never be called if the python_version is 3.9 or newer
nongen_builtins = get_nongen_builtins((3, 8))
replacement = nongen_builtins[name]
if replacement and propose_alt:
msg += ', use "{}" instead'.format(replacement)
return msg
class TypeAnalyser(SyntheticTypeVisitor[Type], TypeAnalyzerPluginInterface):
"""Semantic analyzer for types.
Converts unbound types into bound types. This is a no-op for already
bound types.
If an incomplete reference is encountered, this does a defer. The
caller never needs to defer.
"""
# Is this called from an untyped function definition?
in_dynamic_func: bool = False
# Is this called from global scope?
global_scope: bool = True
def __init__(self,
api: SemanticAnalyzerCoreInterface,
tvar_scope: TypeVarLikeScope,
plugin: Plugin,
options: Options,
is_typeshed_stub: bool, *,
defining_alias: bool = False,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
allow_required: bool = False,
report_invalid_types: bool = True) -> None:
self.api = api
self.lookup_qualified = api.lookup_qualified
self.lookup_fqn_func = api.lookup_fully_qualified
self.fail_func = api.fail
self.note_func = api.note
self.tvar_scope = tvar_scope
# Are we analysing a type alias definition rvalue?
self.defining_alias = defining_alias
self.allow_tuple_literal = allow_tuple_literal
# Positive if we are analyzing arguments of another (outer) type
self.nesting_level = 0
# Should we allow new type syntax when targeting older Python versions
# like 'list[int]' or 'X | Y' (allowed in stubs and with `__future__` import)?
self.always_allow_new_syntax = (
self.api.is_stub_file
or self.api.is_future_flag_set('annotations')
)
# Should we accept unbound type variables (always OK in aliases)?
self.allow_unbound_tvars = allow_unbound_tvars or defining_alias
# If false, record incomplete ref if we generate PlaceholderType.
self.allow_placeholder = allow_placeholder
# Are we in a context where Required[] is allowed?
self.allow_required = allow_required
# Should we report an error whenever we encounter a RawExpressionType outside
# of a Literal context: e.g. whenever we encounter an invalid type? Normally,
# we want to report an error, but the caller may want to do more specialized
# error handling.
self.report_invalid_types = report_invalid_types
self.plugin = plugin
self.options = options
self.is_typeshed_stub = is_typeshed_stub
# Names of type aliases encountered while analysing a type will be collected here.
self.aliases_used: Set[str] = set()
def visit_unbound_type(self, t: UnboundType, defining_literal: bool = False) -> Type:
typ = self.visit_unbound_type_nonoptional(t, defining_literal)
if t.optional:
# We don't need to worry about double-wrapping Optionals or
# wrapping Anys: Union simplification will take care of that.
return make_optional_type(typ)
return typ
def visit_unbound_type_nonoptional(self, t: UnboundType, defining_literal: bool) -> Type:
sym = self.lookup_qualified(t.name, t)
if sym is not None:
node = sym.node
if isinstance(node, PlaceholderNode):
if node.becomes_typeinfo:
# Reference to placeholder type.
if self.api.final_iteration:
self.cannot_resolve_type(t)
return AnyType(TypeOfAny.from_error)
elif self.allow_placeholder:
self.api.defer()
else:
self.api.record_incomplete_ref()
return PlaceholderType(node.fullname, self.anal_array(t.args), t.line)
else:
if self.api.final_iteration:
self.cannot_resolve_type(t)
return AnyType(TypeOfAny.from_error)
else:
# Reference to an unknown placeholder node.
self.api.record_incomplete_ref()
return AnyType(TypeOfAny.special_form)
if node is None:
self.fail('Internal error (node is None, kind={})'.format(sym.kind), t)
return AnyType(TypeOfAny.special_form)
fullname = node.fullname
hook = self.plugin.get_type_analyze_hook(fullname)
if hook is not None:
return hook(AnalyzeTypeContext(t, t, self))
if (fullname in get_nongen_builtins(self.options.python_version)
and t.args
and not self.always_allow_new_syntax):
self.fail(no_subscript_builtin_alias(fullname,
propose_alt=not self.defining_alias), t)
tvar_def = self.tvar_scope.get_binding(sym)
if isinstance(sym.node, ParamSpecExpr):
if tvar_def is None:
self.fail('ParamSpec "{}" is unbound'.format(t.name), t)
return AnyType(TypeOfAny.from_error)
assert isinstance(tvar_def, ParamSpecType)
if len(t.args) > 0:
self.fail('ParamSpec "{}" used with arguments'.format(t.name), t)
# Change the line number
return ParamSpecType(
tvar_def.name, tvar_def.fullname, tvar_def.id, tvar_def.flavor,
tvar_def.upper_bound, line=t.line, column=t.column,
)
if isinstance(sym.node, TypeVarExpr) and tvar_def is not None and self.defining_alias:
self.fail('Can\'t use bound type variable "{}"'
' to define generic alias'.format(t.name), t)
return AnyType(TypeOfAny.from_error)
if isinstance(sym.node, TypeVarExpr) and tvar_def is not None:
assert isinstance(tvar_def, TypeVarType)
if len(t.args) > 0:
self.fail('Type variable "{}" used with arguments'.format(t.name), t)
# Change the line number
return TypeVarType(
tvar_def.name, tvar_def.fullname, tvar_def.id, tvar_def.values,
tvar_def.upper_bound, tvar_def.variance, line=t.line, column=t.column,
)
special = self.try_analyze_special_unbound_type(t, fullname)
if special is not None:
return special
if isinstance(node, TypeAlias):
self.aliases_used.add(fullname)
an_args = self.anal_array(t.args)
disallow_any = self.options.disallow_any_generics and not self.is_typeshed_stub
res = expand_type_alias(node, an_args, self.fail, node.no_args, t,
unexpanded_type=t,
disallow_any=disallow_any)
# The only case where expand_type_alias() can return an incorrect instance is
# when it is top-level instance, so no need to recurse.
if (isinstance(res, Instance) and # type: ignore[misc]
len(res.args) != len(res.type.type_vars) and
not self.defining_alias):
fix_instance(
res,
self.fail,
self.note,
disallow_any=disallow_any,
python_version=self.options.python_version,
use_generic_error=True,
unexpanded_type=t)
if node.eager:
# TODO: Generate error if recursive (once we have recursive types)
res = get_proper_type(res)
return res
elif isinstance(node, TypeInfo):
return self.analyze_type_with_type_info(node, t.args, t)
elif node.fullname in TYPE_ALIAS_NAMES:
return AnyType(TypeOfAny.special_form)
else:
return self.analyze_unbound_type_without_type_info(t, sym, defining_literal)
else: # sym is None
return AnyType(TypeOfAny.special_form)
def cannot_resolve_type(self, t: UnboundType) -> None:
# TODO: Move error message generation to messages.py. We'd first
# need access to MessageBuilder here. Also move the similar
# message generation logic in semanal.py.
self.api.fail(
'Cannot resolve name "{}" (possible cyclic definition)'.format(t.name),
t)
def try_analyze_special_unbound_type(self, t: UnboundType, fullname: str) -> Optional[Type]:
"""Bind special type that is recognized through magic name such as 'typing.Any'.
Return the bound type if successful, and return None if the type is a normal type.
"""
if fullname == 'builtins.None':
return NoneType()
elif fullname == 'typing.Any' or fullname == 'builtins.Any':
return AnyType(TypeOfAny.explicit)
elif fullname in FINAL_TYPE_NAMES:
self.fail("Final can be only used as an outermost qualifier"
" in a variable annotation", t)
return AnyType(TypeOfAny.from_error)
elif (fullname == 'typing.Tuple' or
(fullname == 'builtins.tuple'
and (self.always_allow_new_syntax or self.options.python_version >= (3, 9)))):
# Tuple is special because it is involved in builtin import cycle
# and may be not ready when used.
sym = self.api.lookup_fully_qualified_or_none('builtins.tuple')
if not sym or isinstance(sym.node, PlaceholderNode):
if self.api.is_incomplete_namespace('builtins'):
self.api.record_incomplete_ref()
else:
self.fail('Name "tuple" is not defined', t)
return AnyType(TypeOfAny.special_form)
if len(t.args) == 0 and not t.empty_tuple_index:
# Bare 'Tuple' is same as 'tuple'
any_type = self.get_omitted_any(t)
return self.named_type('builtins.tuple', [any_type],
line=t.line, column=t.column)
if len(t.args) == 2 and isinstance(t.args[1], EllipsisType):
# Tuple[T, ...] (uniform, variable-length tuple)
instance = self.named_type('builtins.tuple', [self.anal_type(t.args[0])])
instance.line = t.line
return instance
return self.tuple_type(self.anal_array(t.args))
elif fullname == 'typing.Union':
items = self.anal_array(t.args)
return UnionType.make_union(items)
elif fullname == 'typing.Optional':
if len(t.args) != 1:
self.fail('Optional[...] must have exactly one type argument', t)
return AnyType(TypeOfAny.from_error)
item = self.anal_type(t.args[0])
return make_optional_type(item)
elif fullname == 'typing.Callable':
return self.analyze_callable_type(t)
elif (fullname == 'typing.Type' or
(fullname == 'builtins.type'
and (self.always_allow_new_syntax or self.options.python_version >= (3, 9)))):
if len(t.args) == 0:
if fullname == 'typing.Type':
any_type = self.get_omitted_any(t)
return TypeType(any_type, line=t.line, column=t.column)
else:
# To prevent assignment of 'builtins.type' inferred as 'builtins.object'
# See https://github.com/python/mypy/issues/9476 for more information
return None
if len(t.args) != 1:
type_str = 'Type[...]' if fullname == 'typing.Type' else 'type[...]'
self.fail(type_str + ' must have exactly one type argument', t)
item = self.anal_type(t.args[0])
return TypeType.make_normalized(item, line=t.line)
elif fullname == 'typing.ClassVar':
if self.nesting_level > 0:
self.fail('Invalid type: ClassVar nested inside other type', t)
if len(t.args) == 0:
return AnyType(TypeOfAny.from_omitted_generics, line=t.line, column=t.column)
if len(t.args) != 1:
self.fail('ClassVar[...] must have at most one type argument', t)
return AnyType(TypeOfAny.from_error)
return self.anal_type(t.args[0])
elif fullname in NEVER_NAMES:
return UninhabitedType(is_noreturn=True)
elif fullname in LITERAL_TYPE_NAMES:
return self.analyze_literal_type(t)
elif fullname in ANNOTATED_TYPE_NAMES:
if len(t.args) < 2:
self.fail("Annotated[...] must have exactly one type argument"
" and at least one annotation", t)
return AnyType(TypeOfAny.from_error)
return self.anal_type(t.args[0])
elif fullname in ('typing_extensions.Required', 'typing.Required'):
if not self.allow_required:
self.fail("Required[] can be only used in a TypedDict definition", t)
return AnyType(TypeOfAny.from_error)
if len(t.args) != 1:
self.fail("Required[] must have exactly one type argument", t)
return AnyType(TypeOfAny.from_error)
return RequiredType(self.anal_type(t.args[0]), required=True)
elif fullname in ('typing_extensions.NotRequired', 'typing.NotRequired'):
if not self.allow_required:
self.fail("NotRequired[] can be only used in a TypedDict definition", t)
return AnyType(TypeOfAny.from_error)
if len(t.args) != 1:
self.fail("NotRequired[] must have exactly one type argument", t)
return AnyType(TypeOfAny.from_error)
return RequiredType(self.anal_type(t.args[0]), required=False)
elif self.anal_type_guard_arg(t, fullname) is not None:
# In most contexts, TypeGuard[...] acts as an alias for bool (ignoring its args)
return self.named_type('builtins.bool')
elif fullname in ('typing.Unpack', 'typing_extensions.Unpack'):
# We don't want people to try to use this yet.
if not self.options.enable_incomplete_features:
self.fail('"Unpack" is not supported by mypy yet', t)
return AnyType(TypeOfAny.from_error)
return UnpackType(
self.anal_type(t.args[0]), line=t.line, column=t.column,
)
return None
def get_omitted_any(self, typ: Type, fullname: Optional[str] = None) -> AnyType:
disallow_any = not self.is_typeshed_stub and self.options.disallow_any_generics
return get_omitted_any(disallow_any, self.fail, self.note, typ,
self.options.python_version, fullname)
def analyze_type_with_type_info(
self, info: TypeInfo, args: Sequence[Type], ctx: Context) -> Type:
"""Bind unbound type when were able to find target TypeInfo.
This handles simple cases like 'int', 'modname.UserClass[str]', etc.
"""
if len(args) > 0 and info.fullname == 'builtins.tuple':
fallback = Instance(info, [AnyType(TypeOfAny.special_form)], ctx.line)
return TupleType(self.anal_array(args), fallback, ctx.line)
# Analyze arguments and (usually) construct Instance type. The
# number of type arguments and their values are
# checked only later, since we do not always know the
# valid count at this point. Thus we may construct an
# Instance with an invalid number of type arguments.
instance = Instance(info, self.anal_array(args, allow_param_spec=True),
ctx.line, ctx.column)
# Check type argument count.
if len(instance.args) != len(info.type_vars) and not self.defining_alias:
fix_instance(instance, self.fail, self.note,
disallow_any=self.options.disallow_any_generics and
not self.is_typeshed_stub,
python_version=self.options.python_version)
tup = info.tuple_type
if tup is not None:
# The class has a Tuple[...] base class so it will be
# represented as a tuple type.
if args:
self.fail('Generic tuple types not supported', ctx)
return AnyType(TypeOfAny.from_error)
return tup.copy_modified(items=self.anal_array(tup.items),
fallback=instance)
td = info.typeddict_type
if td is not None:
# The class has a TypedDict[...] base class so it will be
# represented as a typeddict type.
if args:
self.fail('Generic TypedDict types not supported', ctx)
return AnyType(TypeOfAny.from_error)
# Create a named TypedDictType
return td.copy_modified(item_types=self.anal_array(list(td.items.values())),
fallback=instance)
return instance
def analyze_unbound_type_without_type_info(self, t: UnboundType, sym: SymbolTableNode,
defining_literal: bool) -> Type:
"""Figure out what an unbound type that doesn't refer to a TypeInfo node means.
This is something unusual. We try our best to find out what it is.
"""
name = sym.fullname
if name is None:
assert sym.node is not None
name = sym.node.name
# Option 1:
# Something with an Any type -- make it an alias for Any in a type
# context. This is slightly problematic as it allows using the type 'Any'
# as a base class -- however, this will fail soon at runtime so the problem
# is pretty minor.
if isinstance(sym.node, Var):
typ = get_proper_type(sym.node.type)
if isinstance(typ, AnyType):
return AnyType(TypeOfAny.from_unimported_type,
missing_import_name=typ.missing_import_name)
# Option 2:
# Unbound type variable. Currently these may be still valid,
# for example when defining a generic type alias.
unbound_tvar = (isinstance(sym.node, TypeVarExpr) and
self.tvar_scope.get_binding(sym) is None)
if self.allow_unbound_tvars and unbound_tvar:
return t
# Option 3:
# Enum value. Note: we only want to return a LiteralType when
# we're using this enum value specifically within context of
# a "Literal[...]" type. So, if `defining_literal` is not set,
# we bail out early with an error.
#
# If, in the distant future, we decide to permit things like
# `def foo(x: Color.RED) -> None: ...`, we can remove that
# check entirely.
if isinstance(sym.node, Var) and sym.node.info and sym.node.info.is_enum:
value = sym.node.name
base_enum_short_name = sym.node.info.name
if not defining_literal:
msg = message_registry.INVALID_TYPE_RAW_ENUM_VALUE.format(
base_enum_short_name, value)
self.fail(msg, t)
return AnyType(TypeOfAny.from_error)
return LiteralType(
value=value,
fallback=Instance(sym.node.info, [], line=t.line, column=t.column),
line=t.line,
column=t.column,
)
# None of the above options worked. We parse the args (if there are any)
# to make sure there are no remaining semanal-only types, then give up.
t = t.copy_modified(args=self.anal_array(t.args))
# TODO: Move this message building logic to messages.py.
notes: List[str] = []
if isinstance(sym.node, Var):
notes.append('See https://mypy.readthedocs.io/en/'
'stable/common_issues.html#variables-vs-type-aliases')
message = 'Variable "{}" is not valid as a type'
elif isinstance(sym.node, (SYMBOL_FUNCBASE_TYPES, Decorator)):
message = 'Function "{}" is not valid as a type'
if name == 'builtins.any':
notes.append('Perhaps you meant "typing.Any" instead of "any"?')
elif name == 'builtins.callable':
notes.append('Perhaps you meant "typing.Callable" instead of "callable"?')
else:
notes.append('Perhaps you need "Callable[...]" or a callback protocol?')
elif isinstance(sym.node, MypyFile):
# TODO: suggest a protocol when supported.
message = 'Module "{}" is not valid as a type'
elif unbound_tvar:
message = 'Type variable "{}" is unbound'
short = name.split('.')[-1]
notes.append(('(Hint: Use "Generic[{}]" or "Protocol[{}]" base class'
' to bind "{}" inside a class)').format(short, short, short))
notes.append('(Hint: Use "{}" in function signature to bind "{}"'
' inside a function)'.format(short, short))
else:
message = 'Cannot interpret reference "{}" as a type'
self.fail(message.format(name), t, code=codes.VALID_TYPE)
for note in notes:
self.note(note, t, code=codes.VALID_TYPE)
# TODO: Would it be better to always return Any instead of UnboundType
# in case of an error? On one hand, UnboundType has a name so error messages
# are more detailed, on the other hand, some of them may be bogus,
# see https://github.com/python/mypy/issues/4987.
return t
def visit_any(self, t: AnyType) -> Type:
return t
def visit_none_type(self, t: NoneType) -> Type:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> Type:
return t
def visit_erased_type(self, t: ErasedType) -> Type:
# This type should exist only temporarily during type inference
assert False, "Internal error: Unexpected erased type"
def visit_deleted_type(self, t: DeletedType) -> Type:
return t
def visit_type_list(self, t: TypeList) -> Type:
self.fail('Bracketed expression "[...]" is not valid as a type', t)
self.note('Did you mean "List[...]"?', t)
return AnyType(TypeOfAny.from_error)
def visit_callable_argument(self, t: CallableArgument) -> Type:
self.fail('Invalid type', t)
return AnyType(TypeOfAny.from_error)
def visit_instance(self, t: Instance) -> Type:
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
# TODO: should we do something here?
return t
def visit_type_var(self, t: TypeVarType) -> Type:
return t
def visit_param_spec(self, t: ParamSpecType) -> Type:
return t
def visit_unpack_type(self, t: UnpackType) -> Type:
raise NotImplementedError
def visit_callable_type(self, t: CallableType, nested: bool = True) -> Type:
# Every Callable can bind its own type variables, if they're not in the outer scope
with self.tvar_scope_frame():
if self.defining_alias:
variables = t.variables
else:
variables = self.bind_function_type_variables(t, t)
special = self.anal_type_guard(t.ret_type)
arg_kinds = t.arg_kinds
if len(arg_kinds) >= 2 and arg_kinds[-2] == ARG_STAR and arg_kinds[-1] == ARG_STAR2:
arg_types = self.anal_array(t.arg_types[:-2], nested=nested) + [
self.anal_star_arg_type(t.arg_types[-2], ARG_STAR, nested=nested),
self.anal_star_arg_type(t.arg_types[-1], ARG_STAR2, nested=nested),
]
else:
arg_types = self.anal_array(t.arg_types, nested=nested)
ret = t.copy_modified(arg_types=arg_types,
ret_type=self.anal_type(t.ret_type, nested=nested),
# If the fallback isn't filled in yet,
# its type will be the falsey FakeInfo
fallback=(t.fallback if t.fallback.type
else self.named_type('builtins.function')),
variables=self.anal_var_defs(variables),
type_guard=special,
)
return ret
def anal_type_guard(self, t: Type) -> Optional[Type]:
if isinstance(t, UnboundType):
sym = self.lookup_qualified(t.name, t)
if sym is not None and sym.node is not None:
return self.anal_type_guard_arg(t, sym.node.fullname)
# TODO: What if it's an Instance? Then use t.type.fullname?
return None
def anal_type_guard_arg(self, t: UnboundType, fullname: str) -> Optional[Type]:
if fullname in ('typing_extensions.TypeGuard', 'typing.TypeGuard'):
if len(t.args) != 1:
self.fail("TypeGuard must have exactly one type argument", t)
return AnyType(TypeOfAny.from_error)
return self.anal_type(t.args[0])
return None
def anal_star_arg_type(self, t: Type, kind: ArgKind, nested: bool) -> Type:
"""Analyze signature argument type for *args and **kwargs argument."""
# TODO: Check that suffix and kind match
if isinstance(t, UnboundType) and t.name and '.' in t.name and not t.args:
components = t.name.split('.')
sym = self.lookup_qualified('.'.join(components[:-1]), t)
if sym is not None and isinstance(sym.node, ParamSpecExpr):
tvar_def = self.tvar_scope.get_binding(sym)
if isinstance(tvar_def, ParamSpecType):
if kind == ARG_STAR:
flavor = ParamSpecFlavor.ARGS
elif kind == ARG_STAR2:
flavor = ParamSpecFlavor.KWARGS
else:
assert False, kind
return ParamSpecType(tvar_def.name, tvar_def.fullname, tvar_def.id, flavor,
upper_bound=self.named_type('builtins.object'),
line=t.line, column=t.column)
return self.anal_type(t, nested=nested)
def visit_overloaded(self, t: Overloaded) -> Type:
# Overloaded types are manually constructed in semanal.py by analyzing the
# AST and combining together the Callable types this visitor converts.
#
# So if we're ever asked to reanalyze an Overloaded type, we know it's
# fine to just return it as-is.
return t
def visit_tuple_type(self, t: TupleType) -> Type:
# Types such as (t1, t2, ...) only allowed in assignment statements. They'll
# generate errors elsewhere, and Tuple[t1, t2, ...] must be used instead.
if t.implicit and not self.allow_tuple_literal:
self.fail('Syntax error in type annotation', t, code=codes.SYNTAX)
if len(t.items) == 0:
self.note('Suggestion: Use Tuple[()] instead of () for an empty tuple, or '
'None for a function without a return value', t, code=codes.SYNTAX)
elif len(t.items) == 1:
self.note('Suggestion: Is there a spurious trailing comma?', t, code=codes.SYNTAX)
else:
self.note('Suggestion: Use Tuple[T1, ..., Tn] instead of (T1, ..., Tn)', t,
code=codes.SYNTAX)
return AnyType(TypeOfAny.from_error)
star_count = sum(1 for item in t.items if isinstance(item, StarType))
if star_count > 1:
self.fail('At most one star type allowed in a tuple', t)
if t.implicit:
return TupleType([AnyType(TypeOfAny.from_error) for _ in t.items],
self.named_type('builtins.tuple'),
t.line)
else:
return AnyType(TypeOfAny.from_error)
any_type = AnyType(TypeOfAny.special_form)
# If the fallback isn't filled in yet, its type will be the falsey FakeInfo
fallback = (t.partial_fallback if t.partial_fallback.type
else self.named_type('builtins.tuple', [any_type]))
return TupleType(self.anal_array(t.items), fallback, t.line)
def visit_typeddict_type(self, t: TypedDictType) -> Type:
items = OrderedDict([
(item_name, self.anal_type(item_type))
for (item_name, item_type) in t.items.items()
])
return TypedDictType(items, set(t.required_keys), t.fallback)
def visit_raw_expression_type(self, t: RawExpressionType) -> Type:
# We should never see a bare Literal. We synthesize these raw literals
# in the earlier stages of semantic analysis, but those
# "fake literals" should always be wrapped in an UnboundType
# corresponding to 'Literal'.
#
# Note: if at some point in the distant future, we decide to
# make signatures like "foo(x: 20) -> None" legal, we can change
# this method so it generates and returns an actual LiteralType
# instead.
if self.report_invalid_types:
if t.base_type_name in ('builtins.int', 'builtins.bool'):
# The only time it makes sense to use an int or bool is inside of
# a literal type.
msg = "Invalid type: try using Literal[{}] instead?".format(repr(t.literal_value))
elif t.base_type_name in ('builtins.float', 'builtins.complex'):
# We special-case warnings for floats and complex numbers.
msg = "Invalid type: {} literals cannot be used as a type".format(t.simple_name())
else:
# And in all other cases, we default to a generic error message.
# Note: the reason why we use a generic error message for strings
# but not ints or bools is because whenever we see an out-of-place
# string, it's unclear if the user meant to construct a literal type
# or just misspelled a regular type. So we avoid guessing.
msg = 'Invalid type comment or annotation'
self.fail(msg, t, code=codes.VALID_TYPE)
if t.note is not None:
self.note(t.note, t, code=codes.VALID_TYPE)
return AnyType(TypeOfAny.from_error, line=t.line, column=t.column)
def visit_literal_type(self, t: LiteralType) -> Type:
return t
def visit_star_type(self, t: StarType) -> Type:
return StarType(self.anal_type(t.type), t.line)
def visit_union_type(self, t: UnionType) -> Type:
if (t.uses_pep604_syntax is True
and t.is_evaluated is True
and not self.always_allow_new_syntax
and not self.options.python_version >= (3, 10)):
self.fail("X | Y syntax for unions requires Python 3.10", t)
return UnionType(self.anal_array(t.items), t.line)
def visit_partial_type(self, t: PartialType) -> Type:
assert False, "Internal error: Unexpected partial type"
def visit_ellipsis_type(self, t: EllipsisType) -> Type:
self.fail('Unexpected "..."', t)
return AnyType(TypeOfAny.from_error)
def visit_type_type(self, t: TypeType) -> Type:
return TypeType.make_normalized(self.anal_type(t.item), line=t.line)
def visit_placeholder_type(self, t: PlaceholderType) -> Type:
n = None if t.fullname is None else self.api.lookup_fully_qualified(t.fullname)
if not n or isinstance(n.node, PlaceholderNode):
self.api.defer() # Still incomplete
return t
else:
# TODO: Handle non-TypeInfo
assert isinstance(n.node, TypeInfo)
return self.analyze_type_with_type_info(n.node, t.args, t)
def analyze_callable_args_for_paramspec(
self,
callable_args: Type,
ret_type: Type,
fallback: Instance,
) -> Optional[CallableType]:
"""Construct a 'Callable[P, RET]', where P is ParamSpec, return None if we cannot."""
if not isinstance(callable_args, UnboundType):
return None
sym = self.lookup_qualified(callable_args.name, callable_args)
if sym is None:
return None
tvar_def = self.tvar_scope.get_binding(sym)
if not isinstance(tvar_def, ParamSpecType):
return None
# TODO: Use tuple[...] or Mapping[..] instead?
obj = self.named_type('builtins.object')
return CallableType(
[ParamSpecType(tvar_def.name, tvar_def.fullname, tvar_def.id, ParamSpecFlavor.ARGS,
upper_bound=obj),
ParamSpecType(tvar_def.name, tvar_def.fullname, tvar_def.id, ParamSpecFlavor.KWARGS,
upper_bound=obj)],
[nodes.ARG_STAR, nodes.ARG_STAR2],
[None, None],
ret_type=ret_type,
fallback=fallback,
)
def analyze_callable_type(self, t: UnboundType) -> Type:
fallback = self.named_type('builtins.function')
if len(t.args) == 0:
# Callable (bare). Treat as Callable[..., Any].
any_type = self.get_omitted_any(t)
ret = callable_with_ellipsis(any_type, any_type, fallback)
elif len(t.args) == 2:
callable_args = t.args[0]
ret_type = t.args[1]
if isinstance(callable_args, TypeList):
# Callable[[ARG, ...], RET] (ordinary callable type)
analyzed_args = self.analyze_callable_args(callable_args)
if analyzed_args is None:
return AnyType(TypeOfAny.from_error)
args, kinds, names = analyzed_args
ret = CallableType(args,
kinds,
names,
ret_type=ret_type,
fallback=fallback)
elif isinstance(callable_args, EllipsisType):
# Callable[..., RET] (with literal ellipsis; accept arbitrary arguments)
ret = callable_with_ellipsis(AnyType(TypeOfAny.explicit),
ret_type=ret_type,
fallback=fallback)
else:
# Callable[P, RET] (where P is ParamSpec)
maybe_ret = self.analyze_callable_args_for_paramspec(
callable_args,
ret_type,
fallback
)
if maybe_ret is None:
# Callable[?, RET] (where ? is something invalid)
# TODO(PEP612): change error to mention paramspec, once we actually have some
# support for it
self.fail('The first argument to Callable must be a list of types or "..."', t)
return AnyType(TypeOfAny.from_error)
ret = maybe_ret
else:
self.fail('Please use "Callable[[<parameters>], <return type>]" or "Callable"', t)
return AnyType(TypeOfAny.from_error)
assert isinstance(ret, CallableType)
return ret.accept(self)
def analyze_callable_args(self, arglist: TypeList) -> Optional[Tuple[List[Type],
List[ArgKind],
List[Optional[str]]]]:
args: List[Type] = []
kinds: List[ArgKind] = []
names: List[Optional[str]] = []
for arg in arglist.items:
if isinstance(arg, CallableArgument):
args.append(arg.typ)
names.append(arg.name)
if arg.constructor is None:
return None
found = self.lookup_qualified(arg.constructor, arg)
if found is None:
# Looking it up already put an error message in
return None
elif found.fullname not in ARG_KINDS_BY_CONSTRUCTOR:
self.fail('Invalid argument constructor "{}"'.format(
found.fullname), arg)
return None
else:
assert found.fullname is not None
kind = ARG_KINDS_BY_CONSTRUCTOR[found.fullname]
kinds.append(kind)
if arg.name is not None and kind.is_star():
self.fail("{} arguments should not have names".format(
arg.constructor), arg)
return None
else:
args.append(arg)
kinds.append(ARG_POS)
names.append(None)
# Note that arglist below is only used for error context.
check_arg_names(names, [arglist] * len(args), self.fail, "Callable")
check_arg_kinds(kinds, [arglist] * len(args), self.fail)
return args, kinds, names
def analyze_literal_type(self, t: UnboundType) -> Type:
if len(t.args) == 0:
self.fail('Literal[...] must have at least one parameter', t)
return AnyType(TypeOfAny.from_error)
output: List[Type] = []
for i, arg in enumerate(t.args):
analyzed_types = self.analyze_literal_param(i + 1, arg, t)
if analyzed_types is None:
return AnyType(TypeOfAny.from_error)
else:
output.extend(analyzed_types)
return UnionType.make_union(output, line=t.line)
def analyze_literal_param(self, idx: int, arg: Type, ctx: Context) -> Optional[List[Type]]:
# This UnboundType was originally defined as a string.
if isinstance(arg, UnboundType) and arg.original_str_expr is not None:
assert arg.original_str_fallback is not None
return [LiteralType(
value=arg.original_str_expr,
fallback=self.named_type_with_normalized_str(arg.original_str_fallback),
line=arg.line,
column=arg.column,
)]
# If arg is an UnboundType that was *not* originally defined as
# a string, try expanding it in case it's a type alias or something.
if isinstance(arg, UnboundType):
self.nesting_level += 1
try:
arg = self.visit_unbound_type(arg, defining_literal=True)
finally:
self.nesting_level -= 1
# Literal[...] cannot contain Any. Give up and add an error message
# (if we haven't already).
arg = get_proper_type(arg)
if isinstance(arg, AnyType):
# Note: We can encounter Literals containing 'Any' under three circumstances:
#
# 1. If the user attempts use an explicit Any as a parameter
# 2. If the user is trying to use an enum value imported from a module with
# no type hints, giving it an implicit type of 'Any'
# 3. If there's some other underlying problem with the parameter.
#
# We report an error in only the first two cases. In the third case, we assume
# some other region of the code has already reported a more relevant error.
#
# TODO: Once we start adding support for enums, make sure we report a custom
# error for case 2 as well.
if arg.type_of_any not in (TypeOfAny.from_error, TypeOfAny.special_form):
self.fail('Parameter {} of Literal[...] cannot be of type "Any"'.format(idx), ctx)
return None
elif isinstance(arg, RawExpressionType):
# A raw literal. Convert it directly into a literal if we can.
if arg.literal_value is None:
name = arg.simple_name()
if name in ('float', 'complex'):
msg = 'Parameter {} of Literal[...] cannot be of type "{}"'.format(idx, name)
else:
msg = 'Invalid type: Literal[...] cannot contain arbitrary expressions'
self.fail(msg, ctx)
# Note: we deliberately ignore arg.note here: the extra info might normally be
# helpful, but it generally won't make sense in the context of a Literal[...].
return None
# Remap bytes and unicode into the appropriate type for the correct Python version
fallback = self.named_type_with_normalized_str(arg.base_type_name)
assert isinstance(fallback, Instance)
return [LiteralType(arg.literal_value, fallback, line=arg.line, column=arg.column)]
elif isinstance(arg, (NoneType, LiteralType)):
# Types that we can just add directly to the literal/potential union of literals.
return [arg]
elif isinstance(arg, Instance) and arg.last_known_value is not None:
# Types generated from declarations like "var: Final = 4".
return [arg.last_known_value]
elif isinstance(arg, UnionType):
out = []
for union_arg in arg.items:
union_result = self.analyze_literal_param(idx, union_arg, ctx)
if union_result is None:
return None
out.extend(union_result)
return out
else:
self.fail('Parameter {} of Literal[...] is invalid'.format(idx), ctx)
return None
def analyze_type(self, t: Type) -> Type:
return t.accept(self)
def fail(self, msg: str, ctx: Context, *, code: Optional[ErrorCode] = None) -> None:
self.fail_func(msg, ctx, code=code)
def note(self, msg: str, ctx: Context, *, code: Optional[ErrorCode] = None) -> None:
self.note_func(msg, ctx, code=code)
@contextmanager
def tvar_scope_frame(self) -> Iterator[None]:
old_scope = self.tvar_scope
self.tvar_scope = self.tvar_scope.method_frame()
yield
self.tvar_scope = old_scope
def infer_type_variables(self,
type: CallableType) -> List[Tuple[str, TypeVarLikeExpr]]:
"""Return list of unique type variables referred to in a callable."""
names: List[str] = []
tvars: List[TypeVarLikeExpr] = []
for arg in type.arg_types:
for name, tvar_expr in arg.accept(
TypeVarLikeQuery(self.lookup_qualified, self.tvar_scope)
):
if name not in names:
names.append(name)
tvars.append(tvar_expr)
# When finding type variables in the return type of a function, don't
# look inside Callable types. Type variables only appearing in
# functions in the return type belong to those functions, not the
# function we're currently analyzing.
for name, tvar_expr in type.ret_type.accept(
TypeVarLikeQuery(self.lookup_qualified, self.tvar_scope, include_callables=False)
):
if name not in names:
names.append(name)
tvars.append(tvar_expr)
return list(zip(names, tvars))
def bind_function_type_variables(
self, fun_type: CallableType, defn: Context
) -> Sequence[TypeVarLikeType]:
"""Find the type variables of the function type and bind them in our tvar_scope"""
if fun_type.variables:
for var in fun_type.variables:
var_node = self.lookup_qualified(var.name, defn)
assert var_node, "Binding for function type variable not found within function"
var_expr = var_node.node
assert isinstance(var_expr, TypeVarLikeExpr)
self.tvar_scope.bind_new(var.name, var_expr)
return fun_type.variables
typevars = self.infer_type_variables(fun_type)
# Do not define a new type variable if already defined in scope.
typevars = [(name, tvar) for name, tvar in typevars
if not self.is_defined_type_var(name, defn)]
defs: List[TypeVarLikeType] = []
for name, tvar in typevars:
if not self.tvar_scope.allow_binding(tvar.fullname):
self.fail('Type variable "{}" is bound by an outer class'.format(name), defn)
self.tvar_scope.bind_new(name, tvar)
binding = self.tvar_scope.get_binding(tvar.fullname)
assert binding is not None
defs.append(binding)
return defs
def is_defined_type_var(self, tvar: str, context: Context) -> bool:
tvar_node = self.lookup_qualified(tvar, context)
if not tvar_node:
return False
return self.tvar_scope.get_binding(tvar_node) is not None
def anal_array(self,
a: Iterable[Type],
nested: bool = True, *,
allow_param_spec: bool = False) -> List[Type]:
res: List[Type] = []
for t in a:
res.append(self.anal_type(t, nested, allow_param_spec=allow_param_spec))
return res
def anal_type(self, t: Type, nested: bool = True, *, allow_param_spec: bool = False) -> Type:
if nested:
self.nesting_level += 1
old_allow_required = self.allow_required
self.allow_required = False
try:
analyzed = t.accept(self)
finally:
if nested:
self.nesting_level -= 1
self.allow_required = old_allow_required
if (not allow_param_spec
and isinstance(analyzed, ParamSpecType)
and analyzed.flavor == ParamSpecFlavor.BARE):
self.fail('Invalid location for ParamSpec "{}"'.format(analyzed.name), t)
self.note(
'You can use ParamSpec as the first argument to Callable, e.g., '
"'Callable[{}, int]'".format(analyzed.name),
t
)
return analyzed
def anal_var_def(self, var_def: TypeVarLikeType) -> TypeVarLikeType:
if isinstance(var_def, TypeVarType):
return TypeVarType(
var_def.name,
var_def.fullname,
var_def.id.raw_id,
self.anal_array(var_def.values),
var_def.upper_bound.accept(self),
var_def.variance,
var_def.line
)
else:
return var_def
def anal_var_defs(self, var_defs: Sequence[TypeVarLikeType]) -> List[TypeVarLikeType]:
return [self.anal_var_def(vd) for vd in var_defs]
def named_type_with_normalized_str(self, fully_qualified_name: str) -> Instance:
"""Does almost the same thing as `named_type`, except that we immediately
unalias `builtins.bytes` and `builtins.unicode` to `builtins.str` as appropriate.
"""
python_version = self.options.python_version
if python_version[0] == 2 and fully_qualified_name == 'builtins.bytes':
fully_qualified_name = 'builtins.str'
if python_version[0] >= 3 and fully_qualified_name == 'builtins.unicode':
fully_qualified_name = 'builtins.str'
return self.named_type(fully_qualified_name)
def named_type(self, fully_qualified_name: str,
args: Optional[List[Type]] = None,
line: int = -1,
column: int = -1) -> Instance:
node = self.lookup_fqn_func(fully_qualified_name)
assert isinstance(node.node, TypeInfo)
any_type = AnyType(TypeOfAny.special_form)
return Instance(node.node, args or [any_type] * len(node.node.defn.type_vars),
line=line, column=column)
def tuple_type(self, items: List[Type]) -> TupleType:
any_type = AnyType(TypeOfAny.special_form)
return TupleType(items, fallback=self.named_type('builtins.tuple', [any_type]))
TypeVarLikeList = List[Tuple[str, TypeVarLikeExpr]]
# Mypyc doesn't support callback protocols yet.
MsgCallback = Callable[[str, Context, DefaultNamedArg(Optional[ErrorCode], 'code')], None]
def get_omitted_any(disallow_any: bool, fail: MsgCallback, note: MsgCallback,
orig_type: Type, python_version: Tuple[int, int],
fullname: Optional[str] = None,
unexpanded_type: Optional[Type] = None) -> AnyType:
if disallow_any:
nongen_builtins = get_nongen_builtins(python_version)
if fullname in nongen_builtins:
typ = orig_type
# We use a dedicated error message for builtin generics (as the most common case).
alternative = nongen_builtins[fullname]
fail(message_registry.IMPLICIT_GENERIC_ANY_BUILTIN.format(alternative), typ,
code=codes.TYPE_ARG)
else:
typ = unexpanded_type or orig_type
type_str = typ.name if isinstance(typ, UnboundType) else format_type_bare(typ)
fail(
message_registry.BARE_GENERIC.format(quote_type_string(type_str)),
typ,
code=codes.TYPE_ARG)
base_type = get_proper_type(orig_type)
base_fullname = (
base_type.type.fullname if isinstance(base_type, Instance) else fullname
)
# Ideally, we'd check whether the type is quoted or `from __future__ annotations`
# is set before issuing this note
if python_version < (3, 9) and base_fullname in GENERIC_STUB_NOT_AT_RUNTIME_TYPES:
# Recommend `from __future__ import annotations` or to put type in quotes
# (string literal escaping) for classes not generic at runtime
note(
"Subscripting classes that are not generic at runtime may require "
"escaping, see https://mypy.readthedocs.io/en/stable/runtime_troubles.html"
"#not-generic-runtime",
typ,
code=codes.TYPE_ARG)
any_type = AnyType(TypeOfAny.from_error, line=typ.line, column=typ.column)
else:
any_type = AnyType(
TypeOfAny.from_omitted_generics, line=orig_type.line, column=orig_type.column
)
return any_type
def fix_instance(t: Instance, fail: MsgCallback, note: MsgCallback,
disallow_any: bool, python_version: Tuple[int, int],
use_generic_error: bool = False,
unexpanded_type: Optional[Type] = None,) -> None:
"""Fix a malformed instance by replacing all type arguments with Any.
Also emit a suitable error if this is not due to implicit Any's.
"""
if len(t.args) == 0:
if use_generic_error:
fullname: Optional[str] = None
else:
fullname = t.type.fullname
any_type = get_omitted_any(disallow_any, fail, note, t, python_version, fullname,
unexpanded_type)
t.args = (any_type,) * len(t.type.type_vars)
return
# Invalid number of type parameters.
n = len(t.type.type_vars)
s = '{} type arguments'.format(n)
if n == 0:
s = 'no type arguments'
elif n == 1:
s = '1 type argument'
act = str(len(t.args))
if act == '0':
act = 'none'
fail('"{}" expects {}, but {} given'.format(
t.type.name, s, act), t, code=codes.TYPE_ARG)
# Construct the correct number of type arguments, as
# otherwise the type checker may crash as it expects
# things to be right.
t.args = tuple(AnyType(TypeOfAny.from_error) for _ in t.type.type_vars)
t.invalid = True
def expand_type_alias(node: TypeAlias, args: List[Type],
fail: MsgCallback, no_args: bool, ctx: Context, *,
unexpanded_type: Optional[Type] = None,
disallow_any: bool = False) -> Type:
"""Expand a (generic) type alias target following the rules outlined in TypeAlias docstring.
Here:
target: original target type (contains unbound type variables)
alias_tvars: type variable names
args: types to be substituted in place of type variables
fail: error reporter callback
no_args: whether original definition used a bare generic `A = List`
ctx: context where expansion happens
"""
exp_len = len(node.alias_tvars)
act_len = len(args)
if exp_len > 0 and act_len == 0:
# Interpret bare Alias same as normal generic, i.e., Alias[Any, Any, ...]
return set_any_tvars(node, ctx.line, ctx.column,
disallow_any=disallow_any, fail=fail,
unexpanded_type=unexpanded_type)
if exp_len == 0 and act_len == 0:
if no_args:
assert isinstance(node.target, Instance) # type: ignore[misc]
# Note: this is the only case where we use an eager expansion. See more info about
# no_args aliases like L = List in the docstring for TypeAlias class.
return Instance(node.target.type, [], line=ctx.line, column=ctx.column)
return TypeAliasType(node, [], line=ctx.line, column=ctx.column)
if (exp_len == 0 and act_len > 0
and isinstance(node.target, Instance) # type: ignore[misc]
and no_args):
tp = Instance(node.target.type, args)
tp.line = ctx.line
tp.column = ctx.column
return tp
if act_len != exp_len:
fail('Bad number of arguments for type alias, expected: %s, given: %s'
% (exp_len, act_len), ctx)
return set_any_tvars(node, ctx.line, ctx.column, from_error=True)
typ = TypeAliasType(node, args, ctx.line, ctx.column)
assert typ.alias is not None
# HACK: Implement FlexibleAlias[T, typ] by expanding it to typ here.
if (isinstance(typ.alias.target, Instance) # type: ignore
and typ.alias.target.type.fullname == 'mypy_extensions.FlexibleAlias'):
exp = get_proper_type(typ)
assert isinstance(exp, Instance)
return exp.args[-1]
return typ
def set_any_tvars(node: TypeAlias,
newline: int, newcolumn: int, *,
from_error: bool = False,
disallow_any: bool = False,
fail: Optional[MsgCallback] = None,
unexpanded_type: Optional[Type] = None) -> Type:
if from_error or disallow_any:
type_of_any = TypeOfAny.from_error
else:
type_of_any = TypeOfAny.from_omitted_generics
if disallow_any:
assert fail is not None
otype = unexpanded_type or node.target
type_str = otype.name if isinstance(otype, UnboundType) else format_type_bare(otype)
fail(message_registry.BARE_GENERIC.format(quote_type_string(type_str)),
Context(newline, newcolumn), code=codes.TYPE_ARG)
any_type = AnyType(type_of_any, line=newline, column=newcolumn)
return TypeAliasType(node, [any_type] * len(node.alias_tvars), newline, newcolumn)
def remove_dups(tvars: Iterable[T]) -> List[T]:
# Get unique elements in order of appearance
all_tvars: Set[T] = set()
new_tvars: List[T] = []
for t in tvars:
if t not in all_tvars:
new_tvars.append(t)
all_tvars.add(t)
return new_tvars
def flatten_tvars(ll: Iterable[List[T]]) -> List[T]:
return remove_dups(chain.from_iterable(ll))
class TypeVarLikeQuery(TypeQuery[TypeVarLikeList]):
"""Find TypeVar and ParamSpec references in an unbound type."""
def __init__(self,
lookup: Callable[[str, Context], Optional[SymbolTableNode]],
scope: 'TypeVarLikeScope',
*,
include_callables: bool = True,
include_bound_tvars: bool = False) -> None:
self.include_callables = include_callables
self.lookup = lookup
self.scope = scope
self.include_bound_tvars = include_bound_tvars
super().__init__(flatten_tvars)
def _seems_like_callable(self, type: UnboundType) -> bool:
if not type.args:
return False
if isinstance(type.args[0], (EllipsisType, TypeList)):
return True
return False
def visit_unbound_type(self, t: UnboundType) -> TypeVarLikeList:
name = t.name
node = None
# Special case P.args and P.kwargs for ParamSpecs only.
if name.endswith('args'):
if name.endswith('.args') or name.endswith('.kwargs'):
base = '.'.join(name.split('.')[:-1])
n = self.lookup(base, t)
if n is not None and isinstance(n.node, ParamSpecExpr):
node = n
name = base
if node is None:
node = self.lookup(name, t)
if node and isinstance(node.node, TypeVarLikeExpr) and (
self.include_bound_tvars or self.scope.get_binding(node) is None):
assert isinstance(node.node, TypeVarLikeExpr)
return [(name, node.node)]
elif not self.include_callables and self._seems_like_callable(t):
return []
elif node and node.fullname in LITERAL_TYPE_NAMES:
return []
elif node and node.fullname in ANNOTATED_TYPE_NAMES and t.args:
# Don't query the second argument to Annotated for TypeVars
return self.query_types([t.args[0]])
else:
return super().visit_unbound_type(t)
def visit_callable_type(self, t: CallableType) -> TypeVarLikeList:
if self.include_callables:
return super().visit_callable_type(t)
else:
return []
def check_for_explicit_any(typ: Optional[Type],
options: Options,
is_typeshed_stub: bool,
msg: MessageBuilder,
context: Context) -> None:
if (options.disallow_any_explicit and
not is_typeshed_stub and
typ and
has_explicit_any(typ)):
msg.explicit_any(context)
def has_explicit_any(t: Type) -> bool:
"""
Whether this type is or type it contains is an Any coming from explicit type annotation
"""
return t.accept(HasExplicitAny())
class HasExplicitAny(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_any(self, t: AnyType) -> bool:
return t.type_of_any == TypeOfAny.explicit
def visit_typeddict_type(self, t: TypedDictType) -> bool:
# typeddict is checked during TypedDict declaration, so don't typecheck it here.
return False
def has_any_from_unimported_type(t: Type) -> bool:
"""Return true if this type is Any because an import was not followed.
If type t is such Any type or has type arguments that contain such Any type
this function will return true.
"""
return t.accept(HasAnyFromUnimportedType())
class HasAnyFromUnimportedType(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_any(self, t: AnyType) -> bool:
return t.type_of_any == TypeOfAny.from_unimported_type
def visit_typeddict_type(self, t: TypedDictType) -> bool:
# typeddict is checked during TypedDict declaration, so don't typecheck it here
return False
def collect_all_inner_types(t: Type) -> List[Type]:
"""
Return all types that `t` contains
"""
return t.accept(CollectAllInnerTypesQuery())
class CollectAllInnerTypesQuery(TypeQuery[List[Type]]):
def __init__(self) -> None:
super().__init__(self.combine_lists_strategy)
def query_types(self, types: Iterable[Type]) -> List[Type]:
return self.strategy([t.accept(self) for t in types]) + list(types)
@classmethod
def combine_lists_strategy(cls, it: Iterable[List[Type]]) -> List[Type]:
return list(itertools.chain.from_iterable(it))
def make_optional_type(t: Type) -> Type:
"""Return the type corresponding to Optional[t].
Note that we can't use normal union simplification, since this function
is called during semantic analysis and simplification only works during
type checking.
"""
t = get_proper_type(t)
if isinstance(t, NoneType):
return t
elif isinstance(t, UnionType):
items = [item for item in union_items(t)
if not isinstance(item, NoneType)]
return UnionType(items + [NoneType()], t.line, t.column)
else:
return UnionType([t, NoneType()], t.line, t.column)
def fix_instance_types(t: Type, fail: MsgCallback, note: MsgCallback,
python_version: Tuple[int, int]) -> None:
"""Recursively fix all instance types (type argument count) in a given type.
For example 'Union[Dict, List[str, int]]' will be transformed into
'Union[Dict[Any, Any], List[Any]]' in place.
"""
t.accept(InstanceFixer(fail, note, python_version))
class InstanceFixer(TypeTraverserVisitor):
def __init__(
self, fail: MsgCallback, note: MsgCallback, python_version: Tuple[int, int]
) -> None:
self.fail = fail
self.note = note
self.python_version = python_version
def visit_instance(self, typ: Instance) -> None:
super().visit_instance(typ)
if len(typ.args) != len(typ.type.type_vars):
fix_instance(typ, self.fail, self.note, disallow_any=False,
python_version=self.python_version, use_generic_error=True)
|
py | b40beceb1b24436d5ca643a2001b3d6ea750ff60 | #!/usr/bin/env python
# coding: utf-8
# # Automação de Sistemas e Processos com Python
#
# ### Desafio:
#
# Todos os dias, o nosso sistema atualiza as vendas do dia anterior.
# O seu trabalho diário, como analista, é enviar um e-mail para a diretoria, assim que começar a trabalhar, com o faturamento e a quantidade de produtos vendidos no dia anterior
#
# E-mail da diretoria: [email protected]<br>
# Local onde o sistema disponibiliza as vendas do dia anterior: https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga?usp=sharing
#
# Para resolver isso, vamos usar o pyautogui, uma biblioteca de automação de comandos do mouse e do teclado
# In[23]:
import pyautogui
import time
import pyperclip
#pausa padrao
pyautogui.PAUSE=1
#abrir navegador
#pyautogui.press("winleft")
#pyautogui.write("chrome")
#pyautogui.press("enter")
#Alerta sobre inicio do processo
pyautogui.alert("Começarei o processo, aperte OK e não mexa em mais nada!")
#Abrir aba
pyautogui.hotkey('ctrl', 't')
#Abrir drive
#Evitar pyautogui.write porque o navegador pode modificar o endereço com autocompletar
#Criar uma constante com o endereço e copiá-lo diretamente
link = "https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga"
pyperclip.copy(link)
pyautogui.hotkey("ctrl", "v")
pyautogui.press("enter")
#Esperar para o próximo comando, evitando que a açao seja realizada durante algum delay
time.sleep(10)
#Parte problemática do código
#Encontrar a posição do ponteiro na tela pyautogui.position()
#Usar o ponteiro do mouse para clicar em uma região da tela, caso mude resolução da tela/janela/posição do objeto vai dar errado
pyautogui.click(227, 194, clicks=2)
time.sleep(6)
pyautogui.click(227, 194)
pyautogui.click(1465, 125)
pyautogui.click(1362, 404)
time.sleep(6)
# In[26]:
# Vamos agora ler o arquivo baixado para pegar os indicadores
#
# - Faturamento
# - Quantidade de Produtos
import pandas as pd
df = pd.read_excel(r'C:\Users\soare\Downloads\Vendas - Dez.xlsx')
display(df)
faturamento = df['Valor Final'].sum()
qtde_produtos = df['Quantidade'].sum()
display(faturamento, qtde_produtos)
# In[34]:
# ### Vamos agora enviar um e-mail pelo gmail
pyautogui.hotkey('ctrl', 't')
pyautogui.write("mail.google.com")
pyautogui.press("enter")
time.sleep(6)
#Criar novo email
pyautogui.click(90, 190)
pyautogui.write("[email protected]")
pyautogui.press("tab")
pyautogui.press("tab")
assunto = "Relatório Imersão"
pyperclip.copy(assunto)
pyautogui.hotkey("ctrl", "v")
pyautogui.press("tab")
time.sleep(6)
#Criar corpo do email
texto = f"""
Saudações.
O faturamento de ontem foi de : R${faturamento:,.2f}
A quantidade de produtos vendidos foi de: {qtde_produtos:,}
Att.:
Paulo Soares"""
pyperclip.copy(texto)
pyautogui.hotkey("ctrl", "v")
pyautogui.hotkey("ctrl", "enter")
|
py | b40bed524c56330512b80dd8e294ae8679a02f46 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""GeneralizedExtremeValue bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'GeneralizedExtremeValueCDF',
]
class GeneralizedExtremeValueCDF(bijector.Bijector):
"""Compute the GeneralizedExtremeValue CDF.
Compute `Y = g(X) = exp(-t(X))`,
where `t(x)` is defined to be:
*`(1 + conc * (x - loc) / scale) ) ** (-1 / conc)` when `conc != 0`;
*`exp(-(x - loc) / scale)` when `conc = 0`.
This bijector maps inputs from the domain to `[0, 1]`, where the domain is
* [loc - scale/conc, inf) when conc > 0;
* (-inf, loc - scale/conc] when conc < 0;
* (-inf, inf) when conc = 0;
The inverse of the bijector applied to a uniform random variable
`X ~ U(0, 1)` gives back a random variable with the
[Generalized extreme value distribution](
https://https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution):
When `concentration -> +-inf`, the probability mass concentrates near `loc`.
```none
Y ~ GeneralizedExtremeValueCDF(loc, scale, conc)
pdf(y; loc, scale, conc) = t(y; loc, scale, conc) ** (1 + conc) * exp(
- t(y; loc, scale, conc) ) / scale
where t(x) =
* (1 + conc * (x - loc) / scale) ) ** (-1 / conc) when conc != 0;
* exp(-(x - loc) / scale) when conc = 0.
```
"""
def __init__(self,
loc=0.,
scale=1.,
concentration=0,
validate_args=False,
name='generalizedextremevalue_cdf'):
"""Instantiates the `GeneralizedExtremeValueCDF` bijector.
Args:
loc: Float-like `Tensor` that is the same dtype and is broadcastable with
`scale` and `concentration`.
scale: Positive Float-like `Tensor` that is the same dtype and is
broadcastable with `loc` and `concentration`.
concentration: Nonzero float-like `Tensor` that is the same dtype and is
broadcastable with `loc` and `scale`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale, concentration],
dtype_hint=tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, dtype=dtype, name='loc')
self._scale = tensor_util.convert_nonref_to_tensor(
scale, dtype=dtype, name='scale')
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, dtype=dtype, name='concentration')
super(GeneralizedExtremeValueCDF, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
parameters=parameters,
name=name)
@property
def loc(self):
"""The location parameter in the Generalized Extreme Value CDF."""
return self._loc
@property
def scale(self):
"""The scale parameter in the Generalized Extreme Value CDF."""
return self._scale
@property
def concentration(self):
"""The concentration parameter in the Generalized Extreme Value CDF."""
return self._concentration
@classmethod
def _is_increasing(cls):
return True
def _forward(self, x):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
concentration = tf.convert_to_tensor(self.concentration)
with tf.control_dependencies(
self._maybe_assert_valid_x(
x, loc=loc, scale=scale, concentration=concentration)):
z = (x - loc) / scale
t = tf.where(
tf.equal(concentration, 0.), tf.math.exp(-z),
tf.math.exp(-tf.math.log1p(z * concentration) / concentration))
return tf.exp(-t)
def _inverse(self, y):
with tf.control_dependencies(self._maybe_assert_valid_y(y)):
t = -tf.math.log(y)
conc = tf.convert_to_tensor(self.concentration)
z = tf.where(
tf.equal(conc, 0.), -tf.math.log(t),
tf.math.expm1(-tf.math.log(t) * conc) / conc)
return self.loc + self.scale * z
def _forward_log_det_jacobian(self, x):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
concentration = tf.convert_to_tensor(self.concentration)
with tf.control_dependencies(
self._maybe_assert_valid_x(
x, loc=loc, scale=scale, concentration=concentration)):
z = (x - loc) / scale
log_t = tf.where(
tf.equal(concentration, 0.), -z,
-tf.math.log1p(z * concentration) / concentration)
return (tf.math.multiply_no_nan(concentration + 1., log_t) -
tf.math.exp(log_t) - tf.math.log(scale))
def _inverse_log_det_jacobian(self, y):
with tf.control_dependencies(self._maybe_assert_valid_y(y)):
t = -tf.math.log(y)
log_dt = tf.math.xlogy(-self.concentration - 1., t)
return tf.math.log(self.scale / y) + log_dt
def _maybe_assert_valid_x(self, x, loc=None, scale=None, concentration=None):
if not self.validate_args:
return []
loc = tf.convert_to_tensor(self.loc) if loc is None else loc
scale = tf.convert_to_tensor(self.scale) if scale is None else scale
concentration = (
tf.convert_to_tensor(self.concentration) if concentration is None else
concentration)
# The support of this bijector depends on the sign of concentration.
is_in_bounds = tf.where(
concentration > 0.,
x >= loc - scale / concentration,
x <= loc - scale / concentration)
# For concentration 0, the domain is the whole line.
is_in_bounds = is_in_bounds | tf.math.equal(concentration, 0.)
return [
assert_util.assert_equal(
is_in_bounds,
True,
message='Forward transformation input must be inside domain.')
]
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return []
is_positive = assert_util.assert_non_negative(
y, message='Inverse transformation input must be greater than 0.')
less_than_one = assert_util.assert_less_equal(
y,
tf.constant(1., y.dtype),
message='Inverse transformation input must be less than or equal to 1.')
return [is_positive, less_than_one]
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.scale):
assertions.append(
assert_util.assert_positive(
self.scale, message='Argument `scale` must be positive.'))
return assertions
|
py | b40bed8ebc15cf004ea580e033e417715f612b0b | from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy
extensions = [
Extension('cec2016', ['cec2016.pyx', 'cec16_test_func.cpp'],
include_dirs=[numpy.get_include()],
extra_compile_args=['-std=c++17'],
language='c++'
),
]
setup(
ext_modules=cythonize(extensions),
extra_compile_args=["-w", '-g', '-O3'],
)
|
py | b40bede817be3bda2d97e5879f9bcc247df23685 | """
Copyright 2018 EPAM Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import click
from syndicate.core import CONFIG, CONF_PATH
from syndicate.core.build.artifact_processor import (build_mvn_lambdas,
build_python_lambdas)
from syndicate.core.build.bundle_processor import (create_bundles_bucket,
load_bundle,
upload_bundle_to_s3)
from syndicate.core.build.deployment_processor import (
continue_deployment_resources, create_deployment_resources,
remove_deployment_resources, remove_failed_deploy_resources,
update_lambdas)
from syndicate.core.build.meta_processor import create_meta
from syndicate.core.conf.config_holder import (MVN_BUILD_TOOL_NAME,
PYTHON_BUILD_TOOL_NAME)
from syndicate.core.helper import (check_required_param,
create_bundle_callback,
handle_futures_progress_bar,
resolve_path_callback, timeit,
verify_bundle_callback,
verify_meta_bundle_callback)
# TODO - command descriptions
@click.group(name='syndicate')
def syndicate():
click.echo('Group syndicate')
click.echo('Path to sdct.conf: ' + CONF_PATH)
# =============================================================================
@syndicate.command(name='clean')
@timeit
@click.option('--deploy_name', nargs=1, callback=check_required_param)
@click.option('--bundle_name', nargs=1, callback=check_required_param)
@click.option('--clean_only_types', multiple=True)
@click.option('--clean_only_resources', multiple=True)
@click.option('--clean_only_resources_path', nargs=1, type=str)
@click.option('--excluded_resources', multiple=True)
@click.option('--excluded_resources_path', nargs=1, type=str)
@click.option('--excluded_types', multiple=True)
@click.option('--rollback', is_flag=True)
def clean(deploy_name, bundle_name, clean_only_types, clean_only_resources,
clean_only_resources_path, excluded_resources,
excluded_resources_path, excluded_types, rollback):
click.echo('Command clean')
click.echo('Deploy name: %s' % deploy_name)
if clean_only_types:
click.echo('Clean only types: %s' % str(clean_only_types))
if clean_only_resources:
click.echo('Clean only resources : %s' % clean_only_resources)
if clean_only_resources_path:
click.echo(
'Clean only resources path: %s' % clean_only_resources_path)
if excluded_resources:
click.echo('Excluded resources: %s' % str(excluded_resources))
if excluded_resources_path:
click.echo('Excluded resources path: %s' % excluded_resources_path)
if excluded_types:
click.echo('Excluded types: %s' % str(excluded_types))
if clean_only_resources_path and os.path.exists(
clean_only_resources_path):
clean_resources_list = json.load(open(clean_only_resources_path))
clean_only_resources = tuple(
set(clean_only_resources + tuple(clean_resources_list)))
if excluded_resources_path and os.path.exists(excluded_resources_path):
excluded_resources_list = json.load(open(excluded_resources_path))
excluded_resources = tuple(
set(excluded_resources + tuple(excluded_resources_list)))
if rollback:
remove_failed_deploy_resources(deploy_name=deploy_name,
bundle_name=bundle_name,
clean_only_resources=clean_only_resources,
clean_only_types=clean_only_types,
excluded_resources=excluded_resources,
excluded_types=excluded_types)
else:
remove_deployment_resources(deploy_name=deploy_name,
bundle_name=bundle_name,
clean_only_resources=clean_only_resources,
clean_only_types=clean_only_types,
excluded_resources=excluded_resources,
excluded_types=excluded_types)
click.echo('AWS resources were removed.')
# =============================================================================
@syndicate.command(name='mvn_compile_java')
@timeit
@click.option('--bundle_name', nargs=1, callback=create_bundle_callback)
@click.option('--project_path', '-path', nargs=1,
callback=resolve_path_callback)
def mvn_compile_java(bundle_name, project_path):
click.echo('Command compile java project path: %s' % project_path)
build_mvn_lambdas(bundle_name, project_path)
click.echo('Java artifacts were prepared successfully.')
@syndicate.command(name='assemble_python')
@timeit
@click.option('--bundle_name', nargs=1, callback=create_bundle_callback)
@click.option('--project_path', '-path', nargs=1,
callback=resolve_path_callback)
def assemble_python(bundle_name, project_path):
click.echo('Command assemble python: project_path: %s ' % project_path)
build_python_lambdas(bundle_name, project_path)
click.echo('Python artifacts were prepared successfully.')
COMMAND_TO_BUILD_MAPPING = {
MVN_BUILD_TOOL_NAME: mvn_compile_java,
PYTHON_BUILD_TOOL_NAME: assemble_python
}
@syndicate.command(name='build_artifacts')
@timeit
@click.option('--bundle_name', nargs=1, callback=create_bundle_callback)
@click.pass_context
def build_artifacts(ctx, bundle_name):
click.echo('Building artifacts ...')
if CONFIG.build_projects_mapping:
for key, values in CONFIG.build_projects_mapping.items():
for value in values:
func = COMMAND_TO_BUILD_MAPPING.get(key)
if func:
ctx.invoke(func, bundle_name=bundle_name,
project_path=value)
else:
click.echo('Build tool is not supported: %s' % key)
else:
click.echo('Projects to be built are not found')
# =============================================================================
@syndicate.command(name='package_meta')
@timeit
@click.option('--bundle_name', nargs=1, callback=verify_bundle_callback)
def package_meta(bundle_name):
click.echo('Package meta, bundle: %s' % bundle_name)
create_meta(bundle_name)
click.echo('Meta was configured successfully.')
# =============================================================================
@syndicate.command(name='create_deploy_target_bucket')
@timeit
def create_deploy_target_bucket():
click.echo('Create deploy target sdk: %s' % CONFIG.deploy_target_bucket)
create_bundles_bucket()
click.echo('Deploy target bucket was created successfully')
@syndicate.command(name='upload_bundle')
@timeit
@click.option('--bundle_name', nargs=1, callback=verify_meta_bundle_callback)
def upload_bundle(bundle_name):
click.echo('Upload bundle: %s' % bundle_name)
futures = upload_bundle_to_s3(bundle_name)
handle_futures_progress_bar(futures)
click.echo('Bundle was uploaded successfully')
@syndicate.command(name='copy_bundle')
@click.option('--bundle_name', nargs=1, callback=create_bundle_callback)
@click.option('--src_account_id', '-acc_id', nargs=1,
callback=check_required_param)
@click.option('--src_bucket_region', '-r', nargs=1,
callback=check_required_param)
@click.option('--src_bucket_name', '-bucket_name', nargs=1,
callback=check_required_param)
@click.option('--role_name', '-role', nargs=1,
callback=check_required_param)
@timeit
@click.pass_context
def copy_bundle(ctx, bundle_name, src_account_id, src_bucket_region,
src_bucket_name, role_name):
click.echo('Copy bundle: %s' % bundle_name)
click.echo('Bundle name: %s' % bundle_name)
click.echo('Source account id: %s' % src_account_id)
click.echo('Source bucket region: %s' % src_bucket_region)
click.echo('Source bucket name: %s' % src_bucket_name)
futures = load_bundle(bundle_name, src_account_id, src_bucket_region,
src_bucket_name, role_name)
handle_futures_progress_bar(futures)
click.echo('Bundle was downloaded successfully')
ctx.invoke(upload_bundle, bundle_name=bundle_name)
click.echo('Bundle was copied successfully')
# =============================================================================
@syndicate.command(name='build_bundle')
@click.option('--bundle_name', nargs=1, callback=check_required_param)
@click.pass_context
@timeit
def build_bundle(ctx, bundle_name):
ctx.invoke(build_artifacts, bundle_name=bundle_name)
ctx.invoke(package_meta, bundle_name=bundle_name)
ctx.invoke(upload_bundle, bundle_name=bundle_name)
# =============================================================================
@syndicate.command(name='deploy')
@click.option('--deploy_name', nargs=1, callback=check_required_param)
@click.option('--bundle_name', nargs=1, callback=check_required_param)
@click.option('--deploy_only_types', multiple=True)
@click.option('--deploy_only_resources', multiple=True)
@click.option('--deploy_only_resources_path', nargs=1)
@click.option('--excluded_resources', multiple=True)
@click.option('--excluded_resources_path', nargs=1)
@click.option('--excluded_types', multiple=True)
@click.option('--continue_deploy', is_flag=True)
@timeit
def deploy(deploy_name, bundle_name, deploy_only_types, deploy_only_resources,
deploy_only_resources_path, excluded_resources,
excluded_resources_path, excluded_types, continue_deploy):
click.echo('Command deploy backend')
click.echo('Deploy name: %s' % deploy_name)
if deploy_only_resources_path and os.path.exists(
deploy_only_resources_path):
deploy_resources_list = json.load(open(deploy_only_resources_path))
deploy_only_resources = tuple(
set(deploy_only_resources + tuple(deploy_resources_list)))
if excluded_resources_path and os.path.exists(excluded_resources_path):
excluded_resources_list = json.load(open(excluded_resources_path))
excluded_resources = tuple(
set(excluded_resources + tuple(excluded_resources_list)))
if continue_deploy:
deploy_success = continue_deployment_resources(deploy_name,
bundle_name,
deploy_only_resources,
deploy_only_types,
excluded_resources,
excluded_types)
else:
deploy_success = create_deployment_resources(deploy_name, bundle_name,
deploy_only_resources,
deploy_only_types,
excluded_resources,
excluded_types)
click.echo('Backend resources were deployed{0}.'.format(
'' if deploy_success else ' with errors. See deploy output file'))
# =============================================================================
@syndicate.command(name='publish_lambda_version')
@click.option('--bundle_name', nargs=1, callback=check_required_param)
@click.option('--publish_only_lambdas', multiple=True)
@click.option('--publish_only_lambdas_path', nargs=1)
@click.option('--excluded_lambdas_resources', multiple=True)
@click.option('--excluded_lambdas_resources_path', nargs=1)
@timeit
def publish_lambda_version(bundle_name,
publish_only_lambdas, publish_only_lambdas_path,
excluded_lambdas_resources,
excluded_lambdas_resources_path):
click.echo('Command publish lambda version backend')
click.echo('Bundle name: %s' % bundle_name)
if publish_only_lambdas_path and os.path.exists(
publish_only_lambdas_path):
update_lambdas_list = json.load(open(publish_only_lambdas_path))
publish_only_lambdas = tuple(
set(publish_only_lambdas + tuple(update_lambdas_list)))
if excluded_lambdas_resources_path and os.path.exists(
excluded_lambdas_resources_path):
excluded_lambdas_list = json.load(
open(excluded_lambdas_resources_path))
excluded_lambdas_resources = tuple(
set(excluded_lambdas_resources + tuple(excluded_lambdas_list)))
update_lambdas(bundle_name=bundle_name,
publish_only_lambdas=publish_only_lambdas,
excluded_lambdas_resources=excluded_lambdas_resources)
click.echo('Lambda versions were published.')
|
py | b40bee3870b9118b0d8421bf8d7584c926830fc5 | import sys
if sys.version_info < (3, 7):
from ._color import ColorValidator
from ._arearatio import ArearatioValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._color.ColorValidator", "._arearatio.ArearatioValidator"]
)
|
py | b40bee9e35bb5eb6fc14dbba142a91df97b6de3e | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': '[email protected]',
'password': 'testing',
'name': 'Test name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating user that already exists fails"""
payload = {'email': '[email protected]', 'password': 'testing'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that pass must be more > 5 chars"""
payload = {'email': '[email protected]',
'password': 'wow',
'name': 'test'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': '[email protected]', 'password': 'testing'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials is given"""
create_user(email='[email protected]', password='testing')
payload = {'email': '[email protected]', 'password': 'salahpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesnt exist"""
payload = {'email': '[email protected]', 'password': 'testing'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL,
{'email': '[email protected]', 'password': ''}
)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that auth is required for user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API request that requires authentication"""
def setUp(self):
self.user = create_user(
email='[email protected]',
password='testpass',
name='name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in used"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed in any URL"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'testing123123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
py | b40befcd20d7505db6e0b5323809528fc4bfc5f4 | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1-969t2*&j!!&dd73j3hj&3bl(+pmi%968f#zu)lb3&bx8h!c3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media/'
STATIC_ROOT = '/vol/web/static/'
AUTH_USER_MODEL ='core.User'
|
py | b40bf0623a0a5e20b960edc72ff6e2121f2c6e42 | import json
from stix_shifter.stix_translation.src.utils import transformers
from stix_shifter.stix_translation.src.json_to_stix import json_to_stix_translator
from stix_shifter.stix_translation.src.modules.bigfix import bigfix_translator
interface = bigfix_translator.Translator()
map_file = open(interface.mapping_filepath).read()
map_data = json.loads(map_file)
data_source = {
"type": "identity",
"id": "identity--3532c56d-ea72-48be-a2ad-1a53f4c9c6d3",
"name": "BigFix",
"identity_class": "events"
}
options = {}
class TestBigFixResultsToStix(object):
@staticmethod
def get_first(itr, constraint):
return next(
(obj for obj in itr if constraint(obj)),
None
)
@staticmethod
def get_first_of_type(itr, typ):
return TestBigFixResultsToStix.get_first(itr, lambda o: type(o) == dict and o.get('type') == typ)
def test_common_prop(self):
data = {"computer_identity": "12369754-bigdata4545.canlab.ibm.com", "subQueryID": 1, "start_time": "1541424881", "type": "process", "process_name": "systemd", "process_id": "1",
"sha256hash": "9c74c625b2aba7a2e8d8a42e2e94715c355367f7cbfa9bd5404ba52b726792a6", "sha1hash": "916933045c5c91ebcaa325e7f8302f3a732a0a3d", "md5hash": "28a9beb86c4d4c31ba572805bea8494f", "file_path": "/usr/lib/systemd/systemd"}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], transformers.get_all_transformers(), options)
print(json.dumps(result_bundle, indent=2))
assert(result_bundle['type'] == 'bundle')
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert(result_bundle_identity['type'] == data_source['type'])
assert(result_bundle_identity['id'] == data_source['id'])
assert(result_bundle_identity['name'] == data_source['name'])
assert(result_bundle_identity['identity_class']
== data_source['identity_class'])
observed_data = result_bundle_objects[1]
print(observed_data)
assert(observed_data['id'] is not None)
assert(observed_data['type'] == "observed-data")
assert(observed_data['created_by_ref'] == result_bundle_identity['id'])
assert(observed_data['created'] is not None)
assert(observed_data['first_observed'] is not None)
assert(observed_data['last_observed'] is not None)
def test_file_results_to_stix(self):
file_name = '.X0-lock'
data = {"computer_identity": "12369754-bigdata4545.canlab.ibm.com", "subQueryID": 1, "type": "file", "file_name": ".X0-lock", "sha256hash": "7236f966f07259a1de3ee0d48a3ef0ee47c4a551af7f0d76dcabbbb9d6e00940",
"sha1hash": "8b5e953be1db90172af66631132f6f27dda402d2", "md5hash": "e5307d27f0eb9a27af8597a1ddc51e89", "file_path": "/tmp/.X0-lock", "modified_time": "1541424894"}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], transformers.get_all_transformers(), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert(result_bundle_identity['type'] == data_source['type'])
observed_data = result_bundle_objects[1]
assert('objects' in observed_data)
objects = observed_data['objects']
file_obj = TestBigFixResultsToStix.get_first_of_type(objects.values(), 'file')
assert(file_obj is not None), 'file object type not found'
assert(file_obj.keys() == {'type', 'name', 'hashes', 'parent_directory_ref'})
assert(file_obj['name'] == file_name)
def test_process_results_to_stix(self):
process_name = 'systemd'
data = {"computer_identity": "12369754-bigdata4545.canlab.ibm.com", "subQueryID": 1, "start_time": "1541424881", "type": "process", "process_name": "systemd", "process_id": "1",
"sha256hash": "9c74c625b2aba7a2e8d8a42e2e94715c355367f7cbfa9bd5404ba52b726792a6", "sha1hash": "916933045c5c91ebcaa325e7f8302f3a732a0a3d", "md5hash": "28a9beb86c4d4c31ba572805bea8494f", "file_path": "/usr/lib/systemd/systemd"}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], transformers.get_all_transformers(), options)
print(json.dumps(result_bundle, indent=2))
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert(result_bundle_identity['type'] == data_source['type'])
observed_data = result_bundle_objects[1]
assert('objects' in observed_data)
objects = observed_data['objects']
process_obj = TestBigFixResultsToStix.get_first_of_type(objects.values(), 'process')
assert(process_obj is not None), 'process object type not found'
assert(process_obj.keys() == {'type', 'name', 'pid', 'binary_ref'})
assert(process_obj['name'] == process_name)
|
py | b40bf29c3880e69736ea79d85cc92b7050fa437a | import calendar
import datetime
from djmodels.utils.html import avoid_wrapping
from djmodels.utils.timezone import is_aware, utc
from djmodels.utils.translation import gettext, ngettext_lazy
TIME_STRINGS = {
'year': ngettext_lazy('%d year', '%d years'),
'month': ngettext_lazy('%d month', '%d months'),
'week': ngettext_lazy('%d week', '%d weeks'),
'day': ngettext_lazy('%d day', '%d days'),
'hour': ngettext_lazy('%d hour', '%d hours'),
'minute': ngettext_lazy('%d minute', '%d minutes'),
}
TIMESINCE_CHUNKS = (
(60 * 60 * 24 * 365, 'year'),
(60 * 60 * 24 * 30, 'month'),
(60 * 60 * 24 * 7, 'week'),
(60 * 60 * 24, 'day'),
(60 * 60, 'hour'),
(60, 'minute'),
)
def timesince(d, now=None, reversed=False, time_strings=None):
"""
Take two datetime objects and return the time between d and now as a nicely
formatted string, e.g. "10 minutes". If d occurs after now, return
"0 minutes".
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
`time_strings` is an optional dict of strings to replace the default
TIME_STRINGS dict.
Adapted from
http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
if time_strings is None:
time_strings = TIME_STRINGS
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
now = now or datetime.datetime.now(utc if is_aware(d) else None)
if reversed:
d, now = now, d
delta = now - d
# Deal with leapyears by subtracing the number of leapdays
leapdays = calendar.leapdays(d.year, now.year)
if leapdays != 0:
if calendar.isleap(d.year):
leapdays -= 1
elif calendar.isleap(now.year):
leapdays += 1
delta -= datetime.timedelta(leapdays)
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(gettext('0 minutes'))
for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):
count = since // seconds
if count != 0:
break
result = avoid_wrapping(time_strings[name] % count)
if i + 1 < len(TIMESINCE_CHUNKS):
# Now get the second item
seconds2, name2 = TIMESINCE_CHUNKS[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
result += gettext(', ') + avoid_wrapping(time_strings[name2] % count2)
return result
def timeuntil(d, now=None, time_strings=None):
"""
Like timesince, but return a string measuring the time until the given time.
"""
return timesince(d, now, reversed=True, time_strings=time_strings)
|
py | b40bf2e5d297529f359e3564ecb8a74033fc409e | #Análisis de Algoritmos 3CV2
# Alan Romero Lucero
# Josué David Hernández Ramírez
# Práctica 2 Fibonacci Iterativo
# Este archivo sirve para gráficar los resultados obtenidos
import matplotlib.pyplot as plt
import numpy as np
def graph ( count, fibo, f, n ):
# Título de la ventana
plt.figure ( "Fibonacci Iterative Algorithm" )
# Título de la grafica
plt.title ( "Fibonacci ( " + str ( n ) + " ): " + str ( fibo ) )
# Parámetro del tiempo ( t ) de la gráfica.
t = np.arange ( 0, count, ( count / ( len ( f ) + 1 ) ) )
_t = list ( map ( ( lambda x: x * ( 5 / 2 ) ), t ) )
_f = np.arange ( 0, len ( f ) + 1 )
# Nombre de los ejes.
plt.xlabel ( "Time ( t )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
plt.ylabel ( "Fibonacci ( f )", color = ( 0.3, 0.4, 0.6 ), size = "large" )
# Plot.
plt.plot ( _f, _t, "b^", label = "g( n ) = ( 5/2 )( n )" )
plt.plot ( _f, t, "ro", label = "T( n ) = ( n )" )
plt.plot ( _f, _t, "b--")
plt.plot ( _f, t, "r--")
plt.legend ( loc = "lower right" )
plt.show ( ) |
py | b40bf35d18e73f3f4816fa154cf802b1139c8407 | """[summary]
Board
array of cells - game field
hot cells - minos
movement of cells
delete complete row
update score
"""
from config.board_config import BoardConfig
from board.so_mino import Mino
from board.so_metadata import GameData
from board.so_board_cell import BoardCell
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s | %(name)s | %(levelname)s | %(message)s')
logger = logging.getLogger(__name__)
class Board():
def __init__(self) -> None:
self._board = [[BoardCell.empty() for y in range(BoardConfig.board_max_row)]
for x in range(BoardConfig.board_max_col)]
self.__spawn_next_mino()
self._game_data = GameData()
self._game_over = False
@property
def board(self):
return self._board
@property
def game_over(self):
return self._game_over
# @property
# def game_data(self):
# return self._game_data
def __spawn_next_mino(self):
self._mino = Mino(self._board)
success = self._mino.spawn()
if(not success):
logger.info("Could not spawn Mino, GAME OVER!!")
self._game_over = True
def move(self, direction: int):
if(self._game_over):
logger.info("GAME OVER!!")
return
success = self._mino.move(direction)
if(direction == BoardConfig.DIRECTION_DOWN and not success):
self.__invalidate()
# check if move possible
# y - move
# n - freeze mino
# n - check if rows completed
# y - delete row
# y - update score
# n - spawn_next_mino
pass
def __invalidate(self):
logger.info("Invalidate board requested!!")
self._mino.freeze()
count = self.__remove_completed_rows()
self._game_data.on_rows_completed(count)
self.__spawn_next_mino()
pass
def __remove_completed_rows(self):
count = 0
for y in range(0, BoardConfig.board_max_row):
completed = True
for x in range(0, BoardConfig.board_max_col):
if(self._board[x][y].active == BoardConfig.TYPE_EMPTY):
completed = False
if(completed == True):
self.__remove_row(y)
count = count + 1
return count
def __remove_row(self, row_num: int):
for y in range(row_num, 1, -1):
for x in range(0, BoardConfig.board_max_col-1):
self._board[x][y].color = self._board[x][y-1].color
self._board[x][y].active = self._board[x][y-1].active
for x in range(0, BoardConfig.board_max_col-1):
self._board[x][0].color = BoardConfig.TYPE_EMPTY
self._board[x][0].active = BoardConfig.COL_EMPTY
def game_over(self):
# self._game_data.on_game_completed()
self._game_over = True
|
py | b40bf4919dc2007a37dbb0e3f024a531f8e45f7e | import logging
from io import BytesIO
import pytest
from pyfbx import loader, Properties70, Property70, PropertyTemplate
logger = logging.getLogger("tests")
def test_property_70():
property70 = Property70("TestProperty", "int", "", "", 0)
serialized = loader.serialize(property70)
deserialized = loader.deserialize(BytesIO(serialized), Property70)
assert deserialized == property70
def test_property_70_empty_value():
property70 = Property70("TestProperty", "int", "", "", None)
serialized = loader.serialize(property70)
deserialized = loader.deserialize(BytesIO(serialized), Property70)
assert deserialized == property70
def test_property_70_multiple_value():
property70 = Property70("TestProperty", "int", "", "", 0, 1, 2)
serialized = loader.serialize(property70)
deserialized = loader.deserialize(BytesIO(serialized), Property70)
assert deserialized == property70
def test_properties_70_empty():
properties70 = Properties70()
serialized = loader.serialize(properties70)
deserialized = loader.deserialize(BytesIO(serialized), Properties70)
assert deserialized == properties70
def test_properties_70():
properties70 = Properties70(
Property70("TestProperty1", "int", "", "", -1, 1, 0),
Property70("TestProperty2", "string", "", "", "Property Value")
)
serialized = loader.serialize(properties70)
deserialized = loader.deserialize(BytesIO(serialized), Properties70)
assert deserialized == properties70
def test_property_template():
property_template = PropertyTemplate(
"TestTemplate",
Properties70(
Property70("TestProperty1", "int", "", "", -1, 1, 0),
Property70("TestProperty2", "string", "", "", "Property Value")
)
)
serialized = loader.serialize(property_template)
deserialized = loader.deserialize(BytesIO(serialized), PropertyTemplate)
assert deserialized == property_template |
py | b40bf4af64285731151df36b5e81c1eb5c2bc5a7 | r"""
Diametrically point loaded 2-D disk. See :ref:`sec-primer`.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.discrete.fem.utils import refine_mesh
from sfepy import data_dir
# Fix the mesh file name if you run this file outside the SfePy directory.
filename_mesh = data_dir + '/meshes/2d/its2D.mesh'
refinement_level = 0
filename_mesh = refine_mesh(filename_mesh, refinement_level)
output_dir = '.' # set this to a valid directory you have write access to
young = 2000.0 # Young's modulus [MPa]
poisson = 0.4 # Poisson's ratio
options = {
'output_dir' : output_dir,
}
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < 0.001)', 'facet'),
'Bottom' : ('vertices in (y < 0.001)', 'facet'),
'Top' : ('vertex 2', 'vertex'),
}
materials = {
'Asphalt' : ({'D': stiffness_from_youngpoisson(2, young, poisson)},),
'Load' : ({'.val' : [0.0, -1000.0]},),
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.2.Omega(Asphalt.D, v, u)
= dw_point_load.0.Top(Load.val, v)""",
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
ebcs = {
'XSym' : ('Bottom', {'u.1' : 0.0}),
'YSym' : ('Left', {'u.0' : 0.0}),
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-6,
}),
}
|
py | b40bf573c816d18e32a831aa339ddecd7b6ea670 | lines = []
line = input()
while line not in ('Done', 'done', 'd'):
lines.append(line)
line = input()
for line in reversed(lines):
print(line[::-1])
|
py | b40bf60002f6e0ba76bdfe912dac6ce673dcbb80 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.tool.android2grd'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import xml.dom.minidom
from grit import grd_reader
from grit import util
from grit.node import empty
from grit.node import io
from grit.node import message
from grit.node import misc
from grit.tool import android2grd
class Android2GrdUnittest(unittest.TestCase):
def __Parse(self, xml_string):
return xml.dom.minidom.parseString(xml_string).childNodes[0]
def testCreateTclibMessage(self):
tool = android2grd.Android2Grd()
msg = tool.CreateTclibMessage(self.__Parse(r'''
<string name="simple">A simple string</string>'''))
self.assertEqual(msg.GetRealContent(), 'A simple string')
msg = tool.CreateTclibMessage(self.__Parse(r'''
<string name="outer_whitespace">
Strip leading/trailing whitespace
</string>'''))
self.assertEqual(msg.GetRealContent(), 'Strip leading/trailing whitespace')
msg = tool.CreateTclibMessage(self.__Parse(r'''
<string name="inner_whitespace">Fold multiple spaces</string>'''))
self.assertEqual(msg.GetRealContent(), 'Fold multiple spaces')
msg = tool.CreateTclibMessage(self.__Parse(r'''
<string name="escaped_spaces">Retain \n escaped\t spaces</string>'''))
self.assertEqual(msg.GetRealContent(), 'Retain \n escaped\t spaces')
msg = tool.CreateTclibMessage(self.__Parse(r'''
<string name="quotes"> " Quotes preserve
whitespace" but only for "enclosed elements "
</string>'''))
self.assertEqual(msg.GetRealContent(), ''' Quotes preserve
whitespace but only for enclosed elements ''')
msg = tool.CreateTclibMessage(self.__Parse(
r'''<string name="escaped_characters">Escaped characters: \"\'\\\t\n'''
'</string>'))
self.assertEqual(msg.GetRealContent(), '''Escaped characters: "'\\\t\n''')
msg = tool.CreateTclibMessage(self.__Parse(
'<string xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2" '
'name="placeholders">'
'Open <xliff:g id="FILENAME" example="internet.html">%s</xliff:g>?'
'</string>'))
self.assertEqual(msg.GetRealContent(), 'Open %s?')
self.assertEqual(len(msg.GetPlaceholders()), 1)
self.assertEqual(msg.GetPlaceholders()[0].presentation, 'FILENAME')
self.assertEqual(msg.GetPlaceholders()[0].original, '%s')
self.assertEqual(msg.GetPlaceholders()[0].example, 'internet.html')
msg = tool.CreateTclibMessage(self.__Parse(r'''
<string name="comment">Contains a <!-- ignore this --> comment
</string>'''))
self.assertEqual(msg.GetRealContent(), 'Contains a comment')
def testIsTranslatable(self):
tool = android2grd.Android2Grd()
string_el = self.__Parse('<string>Hi</string>')
self.assertTrue(tool.IsTranslatable(string_el))
string_el = self.__Parse(
'<string translatable="true">Hi</string>')
self.assertTrue(tool.IsTranslatable(string_el))
string_el = self.__Parse(
'<string translatable="false">Hi</string>')
self.assertFalse(tool.IsTranslatable(string_el))
def __ParseAndroidXml(self, options = []):
tool = android2grd.Android2Grd()
tool.ParseOptions(options)
android_path = util.PathFromRoot('grit/testdata/android.xml')
with open(android_path) as android_file:
android_dom = xml.dom.minidom.parse(android_file)
grd = tool.AndroidDomToGrdDom(android_dom)
self.assertTrue(isinstance(grd, misc.GritNode))
return grd
def testAndroidDomToGrdDom(self):
grd = self.__ParseAndroidXml(['--languages', 'en-US,en-GB,ru'])
# Check that the structure of the GritNode is as expected.
messages = grd.GetChildrenOfType(message.MessageNode)
translations = grd.GetChildrenOfType(empty.TranslationsNode)
files = grd.GetChildrenOfType(io.FileNode)
self.assertEqual(len(translations), 1)
self.assertEqual(len(files), 3)
self.assertEqual(len(messages), 5)
# Check that a message node is constructed correctly.
msg = filter(lambda x: x.GetTextualIds()[0] == "IDS_PLACEHOLDERS", messages)
self.assertTrue(msg)
msg = msg[0]
self.assertTrue(msg.IsTranslateable())
self.assertEqual(msg.attrs["desc"], "A string with placeholder.")
def testProductAttribute(self):
grd = self.__ParseAndroidXml([])
messages = grd.GetChildrenOfType(message.MessageNode)
msg = filter(lambda x: x.GetTextualIds()[0] ==
"IDS_SIMPLE_product_nosdcard",
messages)
self.assertTrue(msg)
def testTranslatableAttribute(self):
grd = self.__ParseAndroidXml([])
messages = grd.GetChildrenOfType(message.MessageNode)
msgs = filter(lambda x: x.GetTextualIds()[0] == "IDS_CONSTANT", messages)
self.assertTrue(msgs)
self.assertFalse(msgs[0].IsTranslateable())
def testTranslations(self):
grd = self.__ParseAndroidXml(['--languages', 'en-US,en-GB,ru,id'])
files = grd.GetChildrenOfType(io.FileNode)
us_file = filter(lambda x: x.attrs['lang'] == 'en-US', files)
self.assertTrue(us_file)
self.assertEqual(us_file[0].GetInputPath(),
'chrome_android_strings_en-US.xtb')
id_file = filter(lambda x: x.attrs['lang'] == 'id', files)
self.assertTrue(id_file)
self.assertEqual(id_file[0].GetInputPath(),
'chrome_android_strings_id.xtb')
def testOutputs(self):
grd = self.__ParseAndroidXml(['--languages', 'en-US,ru,id',
'--rc-dir', 'rc/dir',
'--header-dir', 'header/dir',
'--xtb-dir', 'xtb/dir',
'--xml-dir', 'xml/dir'])
outputs = grd.GetChildrenOfType(io.OutputNode)
self.assertEqual(len(outputs), 7)
header_outputs = filter(lambda x: x.GetType() == 'rc_header', outputs)
rc_outputs = filter(lambda x: x.GetType() == 'rc_all', outputs)
xml_outputs = filter(lambda x: x.GetType() == 'android', outputs)
self.assertEqual(len(header_outputs), 1)
self.assertEqual(len(rc_outputs), 3)
self.assertEqual(len(xml_outputs), 3)
# The header node should have an "<emit>" child and the proper filename.
self.assertTrue(header_outputs[0].GetChildrenOfType(io.EmitNode))
self.assertEqual(util.normpath(header_outputs[0].GetFilename()),
util.normpath('header/dir/chrome_android_strings.h'))
id_rc = filter(lambda x: x.GetLanguage() == 'id', rc_outputs)
id_xml = filter(lambda x: x.GetLanguage() == 'id', xml_outputs)
self.assertTrue(id_rc)
self.assertTrue(id_xml)
self.assertEqual(util.normpath(id_rc[0].GetFilename()),
util.normpath('rc/dir/chrome_android_strings_id.rc'))
self.assertEqual(util.normpath(id_xml[0].GetFilename()),
util.normpath('xml/dir/values-in/strings.xml'))
us_rc = filter(lambda x: x.GetLanguage() == 'en-US', rc_outputs)
us_xml = filter(lambda x: x.GetLanguage() == 'en-US', xml_outputs)
self.assertTrue(us_rc)
self.assertTrue(us_xml)
self.assertEqual(util.normpath(us_rc[0].GetFilename()),
util.normpath('rc/dir/chrome_android_strings_en-US.rc'))
self.assertEqual(util.normpath(us_xml[0].GetFilename()),
util.normpath('xml/dir/values-en-rUS/strings.xml'))
if __name__ == '__main__':
unittest.main()
|
py | b40bf6fbe24cda7d43a6ae1eaccc4b0374123423 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.activity_level'
db.add_column(u'users_userprofile', 'activity_level',
self.gf('django.db.models.fields.CharField')(default='EN', max_length=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.activity_level'
db.delete_column(u'users_userprofile', 'activity_level')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activity_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'birthday': ('django.db.models.fields.DateField', [], {}),
'elbow_diameter': ('django.db.models.fields.IntegerField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['users'] |
py | b40bf890dc34bd07de52dff47c6dd5c939f40b07 | from selenium import webdriver
from django.test import TestCase
import time
DELAY = 2
class HomePageTests(TestCase):
# Gavin goes to www.gavmac.com
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.get('https://ioc-experiment-one.herokuapp.com/')
def tearDown(self):
self.driver.close()
# Gavin expects to see Home in title
def test_home_in_title_of_default_route(self):
self.assertIn("Home", self.driver.title)
# Gavin expects to see a username and password box with Login button
def test_login_form_exists_on_page(self):
self.driver.find_element_by_id('username')
self.driver.find_element_by_id('password')
self.driver.find_element_by_id('submit')
# Gavin expects to be able to enter username and password
def test_able_to_enter_username_and_password(self):
username = self.driver.find_element_by_id('username')
username.send_keys('Gavin')
password = self.driver.find_element_by_id('password')
password.send_keys('TestPass')
time.sleep(DELAY)
# Gavin expects to be able to re-enter a username and password if both fields are empty
# when Login button clicked
def test_message_shown_if_username_AND_password_empty_when_login_button_clicked(self):
username = self.driver.find_element_by_id('username')
password = self.driver.find_element_by_id('password')
username.clear()
password.clear()
login_button = self.driver.find_element_by_id('submit')
login_button.click()
message = self.driver.find_element_by_class_name('warning')
self.assertEqual(message.text, 'Missing fields for username, password')
# Gavin expects to be able to re-enter a username if field was empty
# when Login button clicked
def test_message_shown_if_username_field_is_empty_when_login_button_clicked(self):
username = self.driver.find_element_by_id('username')
username.clear()
password = self.driver.find_element_by_id('password')
password.send_keys('TestPass')
login_button = self.driver.find_element_by_id('submit')
login_button.click()
message = self.driver.find_element_by_class_name('warning')
self.assertEqual(message.text, 'Missing fields for username')
# Gavin expects to be able to re-enter a password if field was empty
# when Login button clicked
def test_message_shown_if_password_field_is_empty_when_login_button_clicked(self):
username = self.driver.find_element_by_id('username')
username.send_keys('Gavin')
password = self.driver.find_element_by_id('password')
password.clear()
login_button = self.driver.find_element_by_id('submit')
login_button.click()
message = self.driver.find_element_by_class_name('warning')
self.assertEqual(message.text, 'Missing fields for password')
# Given a valid username and password are provided then
# Gavin expects to see the Welcome page when the Login button is clicked
# Welcome page includes bespoke message welcoming Gavin
def test_welcome_page_displayed_when_login_details_valid(self):
username = self.driver.find_element_by_id('username')
username.send_keys('Gavin')
password = self.driver.find_element_by_id('password')
password.send_keys('TestPass')
login_button = self.driver.find_element_by_id('submit')
login_button.click()
time.sleep(DELAY)
welcome_message = self.driver.find_element_by_id('welcomeMessage')
self.assertEqual(welcome_message.text, 'Welcome, Gavin!')
# Gavin provides correct username but incorrect password and expects to be
# able to re-enter credentials to enable login
def test_message_shown_if_credentials_are_not_authenticated_when_login_button_clicked(self):
username = self.driver.find_element_by_id('username')
username.send_keys('Gavin')
password = self.driver.find_element_by_id('password')
password.send_keys('InvalidPassword')
login_button = self.driver.find_element_by_id('submit')
login_button.click()
invalid_credentials_message = self.driver.find_element_by_class_name('warning')
self.assertEqual(invalid_credentials_message.text, \
'Invalid Credentials')
# Susan is a new user to www.gavmac.com and expects to a 'Invalid Credentials'
# message when she trys to login. She then expects to see a sign up page when she clicks
# "Sign Up" button.
# After entering ddetails she expects to see a Welcome page after clicking "Sign Up" button.
def test_error_page_displayed_when_login_details_invalid(self):
username = self.driver.find_element_by_id('username')
username.send_keys('Susan')
password = self.driver.find_element_by_id('password')
password.send_keys('SusanTestPass')
login_button = self.driver.find_element_by_id('submit')
login_button.click()
time.sleep(DELAY)
invalid_credentials_message = self.driver.find_element_by_class_name('warning')
self.assertEqual(invalid_credentials_message.text, \
'Invalid Credentials')
username = self.driver.find_element_by_id('username')
username.clear()
password = self.driver.find_element_by_id('password')
password.clear()
signup_button = self.driver.find_element_by_id('signup')
signup_button.click()
time.sleep(DELAY)
self.assertIn("Sign Up", self.driver.title)
username = self.driver.find_element_by_id('username')
username.clear()
email = self.driver.find_element_by_id('email')
email.clear()
password = self.driver.find_element_by_id('password')
password.clear()
username.send_keys('Susan')
email.send_keys('[email protected]')
password.send_keys('SusanTestPass')
time.sleep(DELAY)
signup_button = self.driver.find_element_by_id('signup')
signup_button.click()
welcome_message = self.driver.find_element_by_id('welcomeMessage')
self.assertEqual(welcome_message.text, 'Welcome, Susan!')
# Susan expects to be able to see a sign up page after she clicks on 'Sign Up;
# button
def test_sign_up_button_renders_sign_up_pages(self):
username = self.driver.find_element_by_id('username')
username.clear()
password = self.driver.find_element_by_id('password')
password.clear()
signup_button = self.driver.find_element_by_id('signup')
signup_button.click()
time.sleep(DELAY)
self.assertIn("Sign Up", self.driver.title)
|
py | b40bf8a5938c2cbfee61030dd795c071b5491ed6 | from __future__ import unicode_literals
import os
import frappe
from frappe.database.db_manager import DbManager
expected_settings_10_2_earlier = {
"innodb_file_format": "Barracuda",
"innodb_file_per_table": "ON",
"innodb_large_prefix": "ON",
"character_set_server": "utf8mb4",
"collation_server": "utf8mb4_unicode_ci",
}
expected_settings_10_3_later = {
"character_set_server": "utf8mb4",
"collation_server": "utf8mb4_unicode_ci",
}
def get_mariadb_versions():
# MariaDB classifies their versions as Major (1st and 2nd number), and Minor (3rd number)
# Example: Version 10.3.13 is Major Version = 10.3, Minor Version = 13
mariadb_variables = frappe._dict(frappe.db.sql("""show variables"""))
version_string = mariadb_variables.get("version").split("-")[0]
versions = {}
versions["major"] = version_string.split(".")[0] + "." + version_string.split(".")[1]
versions["minor"] = version_string.split(".")[2]
return versions
def setup_database(force, source_sql, verbose, no_mariadb_socket=False):
frappe.local.session = frappe._dict({"user": "Administrator"})
db_name = frappe.local.conf.db_name
root_conn = get_root_connection(frappe.flags.root_login, frappe.flags.root_password)
dbman = DbManager(root_conn)
if force or (db_name not in dbman.get_database_list()):
dbman.delete_user(db_name)
if no_mariadb_socket:
dbman.delete_user(db_name, host="%")
dbman.drop_database(db_name)
else:
raise Exception("Database %s already exists" % (db_name,))
dbman.create_user(db_name, frappe.conf.db_password)
if no_mariadb_socket:
dbman.create_user(db_name, frappe.conf.db_password, host="%")
if verbose:
print("Created user %s" % db_name)
dbman.create_database(db_name)
if verbose:
print("Created database %s" % db_name)
dbman.grant_all_privileges(db_name, db_name)
if no_mariadb_socket:
dbman.grant_all_privileges(db_name, db_name, host="%")
dbman.flush_privileges()
if verbose:
print("Granted privileges to user %s and database %s" % (db_name, db_name))
# close root connection
root_conn.close()
bootstrap_database(db_name, verbose, source_sql)
def setup_help_database(help_db_name):
dbman = DbManager(get_root_connection(frappe.flags.root_login, frappe.flags.root_password))
dbman.drop_database(help_db_name)
# make database
if not help_db_name in dbman.get_database_list():
try:
dbman.create_user(help_db_name, help_db_name)
except Exception as e:
# user already exists
if e.args[0] != 1396:
raise
dbman.create_database(help_db_name)
dbman.grant_all_privileges(help_db_name, help_db_name)
dbman.flush_privileges()
def drop_user_and_database(db_name, root_login, root_password):
frappe.local.db = get_root_connection(root_login, root_password)
dbman = DbManager(frappe.local.db)
dbman.delete_user(db_name, host="%")
dbman.delete_user(db_name)
dbman.drop_database(db_name)
def bootstrap_database(db_name, verbose, source_sql=None):
import sys
frappe.connect(db_name=db_name)
if not check_database_settings():
print("Database settings do not match expected values; stopping database setup.")
sys.exit(1)
import_db_from_sql(source_sql, verbose)
frappe.connect(db_name=db_name)
if "tabDefaultValue" not in frappe.db.get_tables(cached=False):
from click import secho
secho(
"Table 'tabDefaultValue' missing in the restored site. "
"Database not installed correctly, this can due to lack of "
"permission, or that the database name exists. Check your mysql"
" root password, validity of the backup file or use --force to"
" reinstall",
fg="red",
)
sys.exit(1)
def import_db_from_sql(source_sql=None, verbose=False):
if verbose:
print("Starting database import...")
db_name = frappe.conf.db_name
if not source_sql:
source_sql = os.path.join(os.path.dirname(__file__), "framework_mariadb.sql")
DbManager(frappe.local.db).restore_database(db_name, source_sql, db_name, frappe.conf.db_password)
if verbose:
print("Imported from database %s" % source_sql)
def check_database_settings():
versions = get_mariadb_versions()
if versions["major"] <= "10.2":
expected_variables = expected_settings_10_2_earlier
else:
expected_variables = expected_settings_10_3_later
mariadb_variables = frappe._dict(frappe.db.sql("""show variables"""))
# Check each expected value vs. actuals:
result = True
for key, expected_value in expected_variables.items():
if mariadb_variables.get(key) != expected_value:
print(
"For key %s. Expected value %s, found value %s"
% (key, expected_value, mariadb_variables.get(key))
)
result = False
if not result:
site = frappe.local.site
msg = (
"Creation of your site - {x} failed because MariaDB is not properly {sep}"
"configured. If using version 10.2.x or earlier, make sure you use the {sep}"
"the Barracuda storage engine. {sep}{sep}"
"Please verify the settings above in MariaDB's my.cnf. Restart MariaDB. And {sep}"
"then run `bench new-site {x}` again.{sep2}"
""
).format(x=site, sep2="\n" * 2, sep="\n")
print_db_config(msg)
return result
def get_root_connection(root_login, root_password):
import getpass
if not frappe.local.flags.root_connection:
if not root_login:
root_login = "root"
if not root_password:
root_password = frappe.conf.get("root_password") or None
if not root_password:
root_password = getpass.getpass("MySQL root password: ")
frappe.local.flags.root_connection = frappe.database.get_db(
user=root_login, password=root_password
)
return frappe.local.flags.root_connection
def print_db_config(explanation):
print("=" * 80)
print(explanation)
print("=" * 80)
|
py | b40bf8a96ce8533dbe515af5cea3cfa5fa1f5922 | # encoding: utf-8
"""
@author: xingyu liao
@contact: [email protected]
"""
# based on:
# https://github.com/NVIDIA/apex/blob/d74fda260c403f775817470d87f810f816f3d615/apex/parallel/LARC.py
import torch
from torch.optim.optimizer import Optimizer
class LARS(Optimizer):
"""
:class:`LARS` is a pytorch implementation of both the scaling and clipping variants of LARC,
in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive
local learning rate for each individual parameter. The algorithm is designed to improve
convergence of large batch training.
See https://arxiv.org/abs/1708.03888 for calculation of the local learning rate.
In practice it modifies the gradients of parameters as a proxy for modifying the learning rate
of the parameters. This design allows it to be used as a wrapper around any torch.optim Optimizer.
```
model = ...
optim = torch.optim.Adam(model.parameters(), lr=...)
optim = LARS(optim)
```
It can even be used in conjunction with apex.fp16_utils.FP16_optimizer.
```
model = ...
optim = torch.optim.Adam(model.parameters(), lr=...)
optim = LARS(optim)
optim = apex.fp16_utils.FP16_Optimizer(optim)
```
Args:
optimizer: Pytorch optimizer to wrap and modify learning rate for.
trust_coefficient: Trust coefficient for calculating the lr. See https://arxiv.org/abs/1708.03888
clip: Decides between clipping or scaling mode of LARC. If `clip=True` the learning rate is set to `min(optimizer_lr, local_lr)` for each parameter. If `clip=False` the learning rate is set to `local_lr*optimizer_lr`.
eps: epsilon kludge to help with numerical stability while calculating adaptive_lr
"""
def __init__(self, optimizer, trust_coefficient=0.02, clip=True, eps=1e-8):
self.param_groups = optimizer.param_groups
self.optim = optimizer
self.trust_coefficient = trust_coefficient
self.eps = eps
self.clip = clip
def __getstate__(self):
return self.optim.__getstate__()
def __setstate__(self, state):
self.optim.__setstate__(state)
def __repr__(self):
return self.optim.__repr__()
def state_dict(self):
return self.optim.state_dict()
def load_state_dict(self, state_dict):
self.optim.load_state_dict(state_dict)
def zero_grad(self):
self.optim.zero_grad()
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
def step(self):
with torch.no_grad():
weight_decays = []
for group in self.optim.param_groups:
# absorb weight decay control from optimizer
weight_decay = group['weight_decay'] if 'weight_decay' in group else 0
weight_decays.append(weight_decay)
group['weight_decay'] = 0
for p in group['params']:
if p.grad is None:
continue
param_norm = torch.norm(p.data)
grad_norm = torch.norm(p.grad.data)
if param_norm != 0 and grad_norm != 0:
# calculate adaptive lr + weight decay
adaptive_lr = self.trust_coefficient * (param_norm) / (
grad_norm + param_norm * weight_decay + self.eps)
# clip learning rate for LARC
if self.clip:
# calculation of adaptive_lr so that when multiplied by lr it equals `min(adaptive_lr, lr)`
adaptive_lr = min(adaptive_lr / group['lr'], 1)
p.grad.data += weight_decay * p.data
p.grad.data *= adaptive_lr
self.optim.step()
# return weight decay control to optimizer
for i, group in enumerate(self.optim.param_groups):
group['weight_decay'] = weight_decays[i]
|
py | b40bf97b78a7933da1eb59e960d2420975ce5ecc | import tweepy
from simple_settings import settings
import os
os.environ[ 'SIMPLE_SETTINGS'] = 'umask_config'
def debug_print(text):
"""Print text if debugging mode is on"""
if settings.debug:
print (text)
def get_last_id(statefile):
"""Retrieve last status ID from a file"""
debug_print('Getting last ID from %s' % (statefile,))
try:
f = open(statefile,'r')
id = int(f.read())
f.close()
except IOError:
debug_print('IOError raised, returning zero (0)')
return 0
debug_print('Got %d' % (id,))
return id
def save_id(statefile,id):
"""Save last status ID to a file"""
last_id = get_last_id(statefile)
if last_id < id:
debug_print('Saving new ID %d to %s' % (id,statefile))
f = open(statefile,'w')
f.write(str(id)) # no trailing newline
f.close()
else:
debug_print('Received smaller ID, not saving. Old: %d, New: %s' % (
last_id, id))
def add_trolls(tweet):
debug_print('Agregando trolles')
# Authenticate to Twitter
auth = tweepy.OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)
auth.set_access_token(settings.ACCESS_TOKEN, settings.ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
try:
api.verify_credentials()
debug_print("Authentication OK")
except:
debug_print("Error during authentication")
tweets = []
last_id = get_last_id(settings.last_id_file)
debug_print ("Busqueda:")
debug_print (settings.busqueda)
search_terms = settings.busqueda
tweets = api.search(q=search_terms,since_id=last_id,count=3)
for tweet in tweets:
print(f'{tweet.id} ----- {tweet.text}')
tweet.text = '#antitroll'+tweet.text
api.retweet(tweet.id)
save_id(settings.last_id_file,tweet.id)
|
py | b40bfbedf97d6f4f3fe242fb2716dc3faab52af4 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilites for mutual TLS."""
import six
from google.auth import exceptions
from google.auth.transport import _mtls_helper
def has_default_client_cert_source():
"""Check if default client SSL credentials exists on the device.
Returns:
bool: indicating if the default client cert source exists.
"""
metadata_path = _mtls_helper._check_dca_metadata_path(
_mtls_helper.CONTEXT_AWARE_METADATA_PATH
)
return metadata_path is not None
def default_client_cert_source():
"""Get a callback which returns the default client SSL credentials.
Returns:
Callable[[], [bytes, bytes]]: A callback which returns the default
client certificate bytes and private key bytes, both in PEM format.
Raises:
google.auth.exceptions.DefaultClientCertSourceError: If the default
client SSL credentials don't exist or are malformed.
"""
if not has_default_client_cert_source():
raise exceptions.MutualTLSChannelError(
"Default client cert source doesn't exist"
)
def callback():
try:
_, cert_bytes, key_bytes = _mtls_helper.get_client_cert_and_key()
except (OSError, RuntimeError, ValueError) as caught_exc:
new_exc = exceptions.MutualTLSChannelError(caught_exc)
six.raise_from(new_exc, caught_exc)
return cert_bytes, key_bytes
return callback
def default_client_encrypted_cert_source(cert_path, key_path):
"""Get a callback which returns the default encrpyted client SSL credentials.
Args:
cert_path (str): The cert file path. The default client certificate will
be written to this file when the returned callback is called.
key_path (str): The key file path. The default encrypted client key will
be written to this file when the returned callback is called.
Returns:
Callable[[], [str, str, bytes]]: A callback which generates the default
client certificate, encrpyted private key and passphrase. It writes
the certificate and private key into the cert_path and key_path, and
returns the cert_path, key_path and passphrase bytes.
Raises:
google.auth.exceptions.DefaultClientCertSourceError: If any problem
occurs when loading or saving the client certificate and key.
"""
if not has_default_client_cert_source():
raise exceptions.MutualTLSChannelError(
"Default client encrypted cert source doesn't exist"
)
def callback():
try:
(
_,
cert_bytes,
key_bytes,
passphrase_bytes,
) = _mtls_helper.get_client_ssl_credentials(generate_encrypted_key=True)
with open(cert_path, "wb") as cert_file:
cert_file.write(cert_bytes)
with open(key_path, "wb") as key_file:
key_file.write(key_bytes)
except (exceptions.ClientCertError, OSError) as caught_exc:
new_exc = exceptions.MutualTLSChannelError(caught_exc)
six.raise_from(new_exc, caught_exc)
return cert_path, key_path, passphrase_bytes
return callback
|
py | b40bfc31fdd4745c8e048dd68fc34e270c2bf5d6 | import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import init_sawyer_camera_v3, sawyer_pusher_camera_upright, sawyer_door_env_camera, \
sawyer_pusher_camera_top_down, sawyer_pusher_camera_upright_v2
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env import SawyerPushAndReachXYEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.misc.ml_util import PiecewiseLinearSchedule
from rlkit.torch.vae.conv_vae import ConvVAE
from rlkit.torch.vae.vae_trainer import ConvVAETrainer
from rlkit.torch.grill.launcher import generate_vae_dataset
def experiment(variant):
from rlkit.core import logger
import rlkit.torch.pytorch_util as ptu
beta = variant["beta"]
representation_size = variant["representation_size"]
train_data, test_data, info = generate_vae_dataset(
**variant['generate_vae_dataset_kwargs']
)
logger.save_extra_data(info)
logger.get_snapshot_dir()
if 'beta_schedule_kwargs' in variant:
# kwargs = variant['beta_schedule_kwargs']
# kwargs['y_values'][2] = variant['beta']
# kwargs['x_values'][1] = variant['flat_x']
# kwargs['x_values'][2] = variant['ramp_x'] + variant['flat_x']
variant['beta_schedule_kwargs']['y_values'][-1] = variant['beta']
beta_schedule = PiecewiseLinearSchedule(**variant['beta_schedule_kwargs'])
else:
beta_schedule = None
m = ConvVAE(representation_size, input_channels=3, **variant['conv_vae_kwargs'])
if ptu.gpu_enabled():
m.cuda()
t = ConvVAETrainer(train_data, test_data, m, beta=beta,
beta_schedule=beta_schedule, **variant['algo_kwargs'])
save_period = variant['save_period']
for epoch in range(variant['num_epochs']):
should_save_imgs = (epoch % save_period == 0)
t.train_epoch(epoch)
t.test_epoch(epoch, save_reconstruction=should_save_imgs,
save_scatterplot=should_save_imgs)
if should_save_imgs:
t.dump_samples(epoch)
if __name__ == "__main__":
n_seeds = 1
mode = 'local'
exp_prefix = 'sawyer_pusher_vae_arena_large_puck'
# n_seeds = 1
# mode = 'ec2'
# exp_prefix = 'sawyer_pusher_vae_real_world_goal_space_large_puck'
use_gpu = True
variant = dict(
num_epochs=5000,
algo_kwargs=dict(
is_auto_encoder=False,
batch_size=64,
lr=1e-3,
),
generate_vae_dataset_kwargs=dict(
N=20000,
oracle_dataset=True,
use_cached=True,
env_class=SawyerPushAndReachXYEnv,
env_kwargs=dict(
hide_goal_markers=True,
reward_type='puck_distance',
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.28, 0.3, 0.02, -.2, .4),
goal_high=(0.28, 0.9, 0.02, .2, .8),
),
init_camera=sawyer_pusher_camera_upright_v2,
show=False,
tag='arena'
),
# beta_schedule_kwargs=dict(
# x_values=[0, 1000, 3000],
# y_values=[0, 0, 1],
# ),
conv_vae_kwargs=dict(),
save_period=100,
beta=5,
representation_size=16,
)
search_space = {
'beta':[2.5]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for _ in range(n_seeds):
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
run_experiment(
experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=use_gpu,
num_exps_per_instance=1,
snapshot_mode='gap_and_last',
snapshot_gap=500,
)
|
py | b40bfc40ed7cf7db3555d32411d5f920ed809155 | #!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: loss_hdwG.py
# --- Creation Date: 19-04-2020
# --- Last Modified: Tue 21 Apr 2020 23:57:34 AEST
# --- Author: Xinqi Zhu
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
HD disentanglement model with trainable G losses.
"""
import numpy as np
import tensorflow as tf
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
from training.loss_hd import calc_vc_loss, calc_cls_loss
from training.loss_hd import reparameterize, log_normal_pdf
def IandMandG_hyperplane_loss(G, D, I, M, opt, training_set, minibatch_size, I_info=None, latent_type='uniform',
C_global_size=10, D_global_size=0, D_lambda=0, C_lambda=1, cls_alpha=0, epsilon=1,
random_eps=False, traj_lambda=None, resolution_manual=1024, use_std_in_m=False,
model_type='hd_dis_model', hyperplane_lambda=1, prior_latent_size=512, hyperdir_lambda=1):
_ = opt
resolution_log2 = int(np.log2(resolution_manual))
nd_out_base = C_global_size // (resolution_log2 - 1)
nd_out_list = [nd_out_base + C_global_size % (resolution_log2 - 1) if i == 0 else nd_out_base for i in range(resolution_log2 - 1)]
nd_out_list = nd_out_list[::-1]
# Sample delta latents
C_delta_latents = tf.random.uniform([minibatch_size], minval=0, maxval=C_global_size, dtype=tf.int32)
C_delta_latents = tf.cast(tf.one_hot(C_delta_latents, C_global_size), tf.float32)
delta_var_latents = C_delta_latents
all_delta_var_latents = tf.eye(C_global_size, dtype=tf.float32)
labels = training_set.get_random_labels_tf(minibatch_size)
# Get variation direction in prior latent space.
prior_var_latents, hyperplane_constraint = M.get_output_for(delta_var_latents, is_training=True)
prior_all_dirs, _ = M.get_output_for(all_delta_var_latents, is_training=True)
prior_var_latents = autosummary('Loss/prior_var_latents', prior_var_latents)
manipulated_prior_dir = tf.matmul(prior_var_latents, tf.transpose(prior_all_dirs)) # [batch, C_global_size]
manipulated_prior_dir = manipulated_prior_dir * (1. - C_delta_latents) # [batch, C_global_size]
manipulated_prior_dir = tf.matmul(manipulated_prior_dir, prior_all_dirs) # [batch, prior_latent_size]
prior_dir_to_go = prior_var_latents - manipulated_prior_dir
# prior_dir_to_go = prior_var_latents
prior_dir_to_go = autosummary('Loss/prior_dir_to_go', prior_dir_to_go)
if latent_type == 'uniform':
prior_latents = tf.random.uniform([minibatch_size, prior_latent_size], minval=-2, maxval=2)
elif latent_type == 'normal':
prior_latents = tf.random.normal([minibatch_size, prior_latent_size])
elif latent_type == 'trunc_normal':
prior_latents = tf.random.truncated_normal([minibatch_size, prior_latent_size])
else:
raise ValueError('Latent type not supported: ' + latent_type)
prior_latents = autosummary('Loss/prior_latents', prior_latents)
if random_eps:
epsilon = epsilon * tf.random.normal([minibatch_size, 1], mean=0.0, stddev=2.0)
prior_delta_latents = prior_latents + epsilon * prior_dir_to_go
fake1_out = G.get_output_for(prior_latents, labels, is_training=True, randomize_noise=True, normalize_latents=False)
fake2_out = G.get_output_for(prior_delta_latents, labels, is_training=True, randomize_noise=True, normalize_latents=False)
fake1_out = autosummary('Loss/fake1_out', fake1_out)
# Send to D
fake_scores_out = D.get_output_for(fake1_out, labels, is_training=True)
G_loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
# Send to I
# regress_out_list = I.get_output_for(fake1_out, fake2_out, is_training=True)
# regress_out = tf.concat(regress_out_list, axis=1)
regress_out = I.get_output_for(fake1_out, fake2_out, is_training=True)
I_loss = calc_vc_loss(C_delta_latents, regress_out, C_global_size, D_lambda, C_lambda)
I_loss = autosummary('Loss/I_loss', I_loss)
dir_constraint = tf.reduce_sum(prior_var_latents * prior_dir_to_go, axis=1)
norm_prior_var_latents = tf.math.sqrt(tf.reduce_sum(prior_var_latents * prior_var_latents, axis=1))
norm_prior_dir_to_go = tf.math.sqrt(tf.reduce_sum(prior_dir_to_go * prior_dir_to_go, axis=1))
dir_constraint = - dir_constraint / (norm_prior_var_latents * norm_prior_dir_to_go)
dir_constraint = autosummary('Loss/dir_constraint', dir_constraint)
I_loss = I_loss + hyperplane_lambda * hyperplane_constraint + hyperdir_lambda * dir_constraint + G_loss
# I_loss = I_loss + hyperplane_lambda * hyperplane_constraint + G_loss
return I_loss, None
def IandG_vc_loss(G, D, I, M, opt, training_set, minibatch_size, I_info=None, latent_type='uniform',
C_global_size=10, D_global_size=0, D_lambda=0, C_lambda=1, cls_alpha=0, epsilon=1,
random_eps=False, traj_lambda=None, resolution_manual=1024, use_std_in_m=False,
model_type='hd_dis_model', hyperplane_lambda=1, prior_latent_size=512, hyperdir_lambda=1):
_ = opt
if latent_type == 'uniform':
latents = tf.random.uniform([minibatch_size, C_global_size], minval=-2, maxval=2)
elif latent_type == 'normal':
latents = tf.random.normal([minibatch_size, C_global_size])
elif latent_type == 'trunc_normal':
latents = tf.random.truncated_normal([minibatch_size, C_global_size])
else:
raise ValueError('Latent type not supported: ' + latent_type)
latents = autosummary('Loss/latents', latents)
# Sample delta latents
C_delta_latents = tf.random.uniform([minibatch_size], minval=0, maxval=C_global_size, dtype=tf.int32)
C_delta_latents = tf.cast(tf.one_hot(C_delta_latents, C_global_size), tf.float32)
if not random_eps:
delta_target = C_delta_latents * epsilon
# delta_latents = tf.concat([tf.zeros([minibatch_size, D_global_size]), delta_target], axis=1)
else:
epsilon = epsilon * tf.random.normal([minibatch_size, 1], mean=0.0, stddev=2.0)
# delta_target = tf.math.abs(C_delta_latents * epsilon)
delta_target = C_delta_latents * epsilon
# delta_latents = tf.concat([tf.zeros([minibatch_size, D_global_size]), delta_target], axis=1)
delta_var_latents = delta_target
delta_latents = delta_var_latents + latents
labels = training_set.get_random_labels_tf(minibatch_size)
# Get variation direction in prior latent space.
prior_latents = M.get_output_for(latents, is_training=True)
prior_delta_latents = M.get_output_for(delta_latents, is_training=True)
prior_delta_latents = autosummary('Loss/prior_delta_latents', prior_delta_latents)
fake1_out = G.get_output_for(prior_latents, labels, is_training=True, randomize_noise=True, normalize_latents=False)
fake2_out = G.get_output_for(prior_delta_latents, labels, is_training=True, randomize_noise=True, normalize_latents=False)
fake1_out = autosummary('Loss/fake1_out', fake1_out)
# Send to D
fake_scores_out = D.get_output_for(fake1_out, labels, is_training=True)
G_loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
# Send to I
regress_out = I.get_output_for(fake1_out, fake2_out, is_training=True)
I_loss = calc_vc_loss(C_delta_latents, regress_out, C_global_size, D_lambda, C_lambda)
I_loss = autosummary('Loss/I_loss', I_loss)
I_loss = I_loss + G_loss
return I_loss, None
|
py | b40bfca0a3f0b420eb3d0047237a167e9c14d603 | from __future__ import absolute_import, division, print_function
import time
from flytekit.sdk.tasks import python_task, inputs, outputs
from flytekit.sdk.types import Types
from flytekit.sdk.workflow import workflow_class
from six.moves import range
@inputs(value1_to_add=Types.Integer, value2_to_add=Types.Integer)
@outputs(out=Types.Integer)
@python_task(cpu_request="5", cpu_limit="5", memory_request="32G")
def sum_and_print(workflow_parameters, value1_to_add, value2_to_add, out):
for i in range(11*60):
print("This is load test task. I have been running for {} seconds.".format(i))
time.sleep(1)
summed = sum([value1_to_add, value2_to_add])
print("Summed up to: {}".format(summed))
out.set(summed)
@workflow_class
class FlytePythonLoadTestWorkflow(object):
print_sum = [None] * 30
for i in range(0, 30):
print_sum[i] = sum_and_print(
value1_to_add=1,
value2_to_add=1
)
|
py | b40bfd2aae6ef9b61da2f1118fa7f8f558e00c33 | # %% [markdown]
# La descripción y ejecución de este código está disponible en el stream:
#
# **Zorzal TV #1: Mapas en Python**
# https://www.twitch.tv/videos/1057304044?t=0h7m7s
# %%
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import geopandas as gpd
import contextily as cx
from sklearn.preprocessing import minmax_scale
from matplotlib import colorbar
from matplotlib_scalebar.scalebar import ScaleBar # https://github.com/ppinard/matplotlib-scalebar/
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
sns.set(context='paper', font='Fira Sans Extra Condensed', style='ticks', palette='colorblind', font_scale=1.0)
# %%
from aves.data import DATA_PATH, eod, census
from aves.features.utils import normalize_rows
from aves.features.geo import to_point_geodataframe, clip_area_geodataframe
from aves.features.weights import weighted_mean
from aves.visualization.figures import figure_from_geodataframe, small_multiples_from_geodataframe, tighten_figure
from aves.visualization.maps import dot_map, choropleth_map, bubble_map, heat_map, add_labels_from_dataframe, GeographicalNodeLink
from aves.visualization.colors import color_legend, colormap_from_palette
from aves.visualization.fdeb import FDB
# %%
zones = gpd.read_file(DATA_PATH / 'processed/scl_zonas_urbanas.json').set_index('ID')
zones.head()
# %%
zones.plot()
# %%
viajes = eod.read_trips()
print(len(viajes))
# descartamos sectores que no sean relevantes en los orígenes y destinos de los viajes
viajes = viajes[(viajes['SectorOrigen'] != 'Exterior a RM')
& (viajes['SectorDestino'] != 'Exterior a RM')
& (viajes['SectorOrigen'] != 'Extensión Sur-Poniente')
& (viajes['SectorDestino'] != 'Extensión Sur-Poniente')
& pd.notnull(viajes['SectorOrigen'])
& pd.notnull(viajes['SectorDestino'])
# también descartamos viajes que hayan sido imputados en la encuesta
& (viajes['Imputada'] == 0)
# y finalmente descartamos viajes cuya distancia indique que son viajes cortísimos o bien demasiado largos para el tamaño de la ciudad
& (viajes['DistManhattan'].between(500, 45000))]
print(len(viajes))
# %%
personas = eod.read_people()
personas.head()
# %%
viajes_persona = viajes.merge(personas)
viajes_persona.head()
# %%
viajes_persona['PesoLaboral'] = viajes_persona['FactorLaboralNormal'] * viajes_persona['Factor_LaboralNormal']
# %%
viajes_persona = viajes_persona[pd.notnull(viajes_persona['PesoLaboral'])]
len(viajes_persona)
# %%
print('{} viajes expandidos a {}'.format(len(viajes_persona), int(viajes_persona['PesoLaboral'].sum())))
# %% [markdown]
# ## 1. ¿Cuál es la distribución geográfica de los viajes al trabajo desde el hogar de acuerdo al modo de transporte?
# %%
viajes_persona[['OrigenCoordX', 'OrigenCoordY']].head()
# %%
origenes_viajes = to_point_geodataframe(viajes_persona, 'OrigenCoordX', 'OrigenCoordY', crs='epsg:32719')
origenes_viajes.head()
# %%
zones = zones.to_crs(origenes_viajes.crs)
zones.plot()
# %%
fig, ax = figure_from_geodataframe(zones, height=6, remove_axes=True)
zones.plot(ax=ax, color='#efefef', edgecolor='#abacab', linewidth=1)
origenes_viajes.plot(ax=ax, markersize=1, marker='.', alpha=0.5)
tighten_figure(fig)
# %%
# los parámetros lsuffix y rsuffix indican el sufijo a agregar a las columnas de cada tabla
print(len(origenes_viajes))
origenes_urbanos = gpd.sjoin(origenes_viajes, zones, op='within', lsuffix='_l', rsuffix='_r')
print(len(origenes_urbanos))
# %%
fig, ax = figure_from_geodataframe(zones, height=6, remove_axes=True)
zones.plot(ax=ax, color='#efefef', edgecolor='#abacab', linewidth=1)
origenes_urbanos.plot(ax=ax, markersize=1, marker='.', alpha=0.5)
tighten_figure(fig)
# %%
origenes_urbanos.ModoDifusion.value_counts(normalize=True)
# %%
origenes_a_graficar = origenes_urbanos[(origenes_urbanos.Proposito == 'Al trabajo') &
(origenes_urbanos.ModoDifusion.isin(['Bip!', 'Auto', 'Caminata', 'Bicicleta']))]
# %%
fig, ax = figure_from_geodataframe(zones, height=6, remove_axes=True)
zones.plot(ax=ax, color='#efefef', edgecolor='#abacab', linewidth=1)
dot_map(ax, origenes_a_graficar, category='ModoDifusion', size=10, palette='Set3')
tighten_figure(fig)
# %%
origenes_urbanos['PesoLaboral'].describe()
# %%
origenes_urbanos['PesoVisual'] = minmax_scale(origenes_urbanos['PesoLaboral'], (0.01, 1.0))
origenes_urbanos['PesoVisual'].describe()
# %%
fig, ax = figure_from_geodataframe(zones, height=6, remove_axes=True)
zones.plot(ax=ax, color='#efefef', edgecolor='white', linewidth=1)
bubble_map(ax,
origenes_urbanos[(origenes_urbanos.Proposito == 'Al trabajo') & origenes_urbanos.ModoDifusion.isin(['Bip!', 'Auto', 'Caminata', 'Bicicleta'])],
category='ModoDifusion',
size='PesoVisual',
scale=500,
sort_categories=False,
palette='husl',
alpha=0.45,
edge_color='none')
tighten_figure(fig)
# %%
fig, axes = small_multiples_from_geodataframe(zones, 4, height=6, col_wrap=2, remove_axes=True)
colors = sns.color_palette('cool', n_colors=4)
# el método zip nos permite iterar sobre tres listas simultáneamente
for ax, modo, color in zip(axes, ['Auto', 'Bip!', 'Caminata', 'Bicicleta'], colors):
zones.plot(ax=ax, color='#efefef', edgecolor='#abacab', linewidth=0.5)
ax.set_title(modo)
origenes_a_graficar = origenes_urbanos[(origenes_urbanos.Proposito == 'Al trabajo') &
(origenes_urbanos.ModoDifusion == modo)]
bubble_map(ax, origenes_a_graficar, size='PesoVisual', scale=500, color=color, edge_color='none', alpha=0.45)
tighten_figure(fig)
# %%
fig, ax = figure_from_geodataframe(zones, height=6, remove_axes=True)
zones.plot(ax=ax, color='#efefef', edgecolor='white', linewidth=1)
heat_map(ax, origenes_urbanos[origenes_urbanos.Proposito == 'Al trabajo'],
weight_column='PesoLaboral', alpha=0.75, palette='inferno', n_levels=10,
# área de influencia
bandwidth=1000
)
tighten_figure(fig)
# %%
fig, axes = small_multiples_from_geodataframe(zones, 4, height=6, col_wrap=2, remove_axes=True)
for ax, modo in zip(axes, ['Auto', 'Bip!', 'Caminata', 'Bicicleta']):
zones.plot(ax=ax, color='#efefef', edgecolor='#abacab', linewidth=0.5)
ax.set_title(modo)
origenes_a_graficar = origenes_urbanos[(origenes_urbanos.Proposito == 'Al trabajo') &
(origenes_urbanos.ModoDifusion == modo)]
heat_map(ax, origenes_a_graficar, n_levels=10,
weight_column='PesoLaboral', alpha=0.75, palette='inferno',
# área de influencia
bandwidth=1000,
# no pintar áreas con valores muy bajos
low_threshold=0.05
)
tighten_figure(fig)
cax = fig.add_axes([0.25, -0.012, 0.5, 0.01])
color_legend(cax, colormap_from_palette('inferno', n_colors=10), remove_axes=True)
cax.set_title('Magnitud relativa de los viajes, de menor a mayor', loc='center', fontsize=8)
# %% [markdown]
# ## 2. ¿Cuán lejos queda el trabajo de acuerdo al lugar de residencia?
# %%
viajes_trabajo = viajes_persona[(viajes_persona.Proposito == 'Al trabajo') &
(pd.notnull(viajes_persona.PesoLaboral))]
print(len(viajes_trabajo), viajes_trabajo.PesoLaboral.sum())
# %%
viajes_trabajo['DistEuclidiana'].describe()
# %%
viajes_trabajo['DistEuclidiana'].mean(), weighted_mean(viajes_trabajo, 'DistEuclidiana', 'PesoLaboral')
# %%
distancia_zonas = (viajes_trabajo
.groupby(['ZonaOrigen'])
.apply(lambda x: weighted_mean(x, 'DistEuclidiana', 'PesoLaboral'))
.rename('distancia_al_trabajo')
)
distancia_zonas
# %%
distancia_zonas.plot(kind='kde')
plt.xlim([0, distancia_zonas.max()])
plt.title('Distancia al Trabajo por Zonas')
plt.xlabel('Distancia')
plt.ylabel('Densidad (KDE)')
sns.despine()
# %%
fig, ax = figure_from_geodataframe(zones, height=8, remove_axes=True)
ax, cax = choropleth_map(ax, zones.join(distancia_zonas, how='inner'), 'distancia_al_trabajo',
k=6, legend_type='hist', binning='fisher_jenks',
cbar_location='lower center', cbar_height=0.4, cbar_width=6)
cax.set_title('Distancia al Trabajo')
tighten_figure(fig);
# %%
# %%
comunas = census.read_census_map('comuna').to_crs(zones.crs)
comunas.plot()
# %%
comunas_urbanas = comunas[comunas['COMUNA'].isin(zones['Com'].unique())].drop('NOM_COMUNA', axis=1).copy()
comunas_urbanas['NombreComuna'] = comunas_urbanas['COMUNA'].map(dict(zip(zones['Com'], zones['Comuna'])))
comunas_urbanas.plot()
# %%
bounding_box = zones.total_bounds
bounding_box
# %%
comunas_urbanas = clip_area_geodataframe(comunas_urbanas, zones.total_bounds, buffer=500)
comunas_urbanas.plot()
# %%
fig, ax = figure_from_geodataframe(zones, height=8, remove_axes=True)
ax, cax = choropleth_map(ax, zones.join(distancia_zonas, how='inner'), 'distancia_al_trabajo',
k=6, legend_type='hist', binning='quantiles',
cbar_location='lower center', cbar_height=0.1, cbar_width=6, linewidth=0)
comunas_urbanas.plot(ax=ax, facecolor='none', edgecolor='black', linewidth=0.5)
cax.set_title('Distancia al Trabajo')
# %%
fig, ax = figure_from_geodataframe(zones, height=8, remove_axes=True)
ax, cax = choropleth_map(ax, zones.join(distancia_zonas, how='inner'), 'distancia_al_trabajo',
k=6, legend_type='colorbar', binning='quantiles', cmap='RdPu',
cbar_location='lower center', cbar_height=0.1, cbar_width=6, linewidth=0, )
comunas_urbanas.plot(ax=ax, facecolor='none', edgecolor='black', linewidth=0.5)
add_labels_from_dataframe(ax, comunas_urbanas, 'NombreComuna', font_size=9, outline_width=1)
cax.set_title('Distancia al Trabajo')
fig.tight_layout()
# %%
fig, ax = figure_from_geodataframe(zones, height=15, remove_axes=True)
cx.add_basemap(ax, crs=zones.crs.to_string(), source="../data/processed/scl_toner_12.tif", interpolation='hanning', zorder=0)
ax, cax = choropleth_map(ax, zones.join(distancia_zonas, how='inner'), 'distancia_al_trabajo',
k=6, legend_type='colorbar', binning='quantiles', cmap='RdPu',
cbar_location='center right', cbar_height=4, cbar_width=0.2, linewidth=0,
cbar_orientation='vertical', alpha=0.8)
cax.set_title('Distancia al Trabajo')
ax.add_artist(ScaleBar(1, location='lower right', color='#abacab'))
x, y, arrow_length = 0.95, 0.1, 0.05
ax.annotate('N', xy=(x, y), xytext=(x, y-arrow_length),
arrowprops=dict(facecolor='#444444', width=5, headwidth=15),
ha='center', va='center', fontsize=20, fontname='Fira Sans Extra Condensed', color='#444444',
xycoords=ax.transAxes);
# %%
fig, axes = small_multiples_from_geodataframe(zones, 4, height=6, col_wrap=2, remove_axes=True)
for ax, modo in zip(axes, ['Auto', 'Bip!', 'Caminata', 'Bicicleta']):
#zones.plot(ax=ax, color='#efefef', edgecolor='#abacab', linewidth=0.5)
ax.set_title(f'Viajes al trabajo en {modo}')
cx.add_basemap(ax, crs=zones.crs.to_string(), source="../data/processed/scl_toner_12.tif", interpolation='hanning', zorder=0)
origenes_a_graficar = origenes_urbanos[(origenes_urbanos.Proposito == 'Al trabajo') &
(origenes_urbanos.ModoDifusion == modo)]
heat_map(ax, origenes_a_graficar, n_levels=10,
weight_column='PesoLaboral', alpha=0.75, palette='inferno',
# área de influencia
bandwidth=750,
# no pintar áreas con valores muy bajos
low_threshold=0.01,
legend_type=None,
return_heat=True
)
fig.tight_layout()
cax = fig.add_axes([0.25, -0.012, 0.5, 0.01])
cax.set_title('Magnitud relativa de los viajes, de menor a mayor', loc='center', fontsize=8)
cax.set_axis_off()
cb3 = colorbar.ColorbarBase(cax, cmap=colormap_from_palette('inferno', n_colors=10), alpha=0.75,
#norm=norm,
ticks=range(10),
spacing='uniform',
orientation='horizontal')
# %% [markdown]
# # 3. ¿Cómo se conecta la ciudad de acuerdo a las relaciones origen-destino?
# %%
matriz = (viajes_trabajo[(viajes_trabajo['ComunaOrigen'].isin(comunas_urbanas['NombreComuna']))
& (viajes_trabajo['ComunaDestino'].isin(comunas_urbanas['NombreComuna']))]
.groupby(['ComunaOrigen', 'ComunaDestino'])
.agg(n_viajes=('PesoLaboral', 'sum'))
.reset_index()
)
matriz.head()
# %%
fig, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(matriz.set_index(['ComunaOrigen', 'ComunaDestino'])['n_viajes'].unstack(fill_value=0).pipe(normalize_rows), cmap='inferno_r', linewidth=1)
# %%
comunas_urbanas.head()
# %%
geonodelink = GeographicalNodeLink.from_edgelist_and_geodataframe(
matriz[matriz['n_viajes'] > matriz['n_viajes'].quantile(0.25)],
comunas_urbanas,
source='ComunaOrigen',
target='ComunaDestino',
node_column='NombreComuna',
weight='n_viajes')
# %%
fig, ax = figure_from_geodataframe(zones, height=6, set_limits=True, remove_axes=True)
comunas_urbanas.plot(ax=ax, facecolor='none', edgecolor='#abacab', zorder=0)
geonodelink.plot_nodes(ax, color='white', edgecolor='black', size=250, zorder=5, use_weights='in-degree', min_size=5)
geonodelink.plot_weighted_edges(ax, palette='plasma', log_transform=False, weight_bins=4,
min_linewidth=1.0, linewidth=4, min_alpha=0.5, alpha=0.95,
with_arrows=True, arrow_shrink=3, arrow_scale=10, zorder=4)
ax.set_title('Viajes al trabajo en Santiago (en días laborales, EOD 2012)');
# %%
matriz_zonas = (viajes_trabajo[(viajes_trabajo['ZonaOrigen'] != viajes_trabajo['ZonaDestino'])
& (viajes_trabajo['ZonaOrigen'].isin(zones.index))
& (viajes_trabajo['ZonaDestino'].isin(zones.index))]
.groupby(['ComunaOrigen', 'ZonaOrigen', 'ZonaDestino'])
.agg(n_viajes=('PesoLaboral', 'sum'))
.sort_values('n_viajes', ascending=False)
.assign(cumsum_viajes=lambda x: x['n_viajes'].cumsum())
.assign(cumsum_viajes=lambda x: x['cumsum_viajes'] / x['cumsum_viajes'].max())
.reset_index()
)
matriz_zonas.head()
# %%
matriz_zonas['cumsum_viajes'].plot()
# %%
matriz_zonas = matriz_zonas[matriz_zonas['cumsum_viajes'] <= 0.5]
matriz_zonas.shape
# %%
merged_zones = zones.reset_index().dissolve('ID')
# %%
zone_nodelink = GeographicalNodeLink.from_edgelist_and_geodataframe(
matriz_zonas,
merged_zones,
source='ZonaOrigen',
target='ZonaDestino',
weight='n_viajes')
# %%
fig, ax = figure_from_geodataframe(zones, height=6, set_limits=True, remove_axes=True)
zones.plot(ax=ax, facecolor='none', edgecolor='#abacab', zorder=0)
comunas_urbanas.plot(ax=ax, facecolor='none', edgecolor='black', linewidth=0.5, zorder=0)
zone_nodelink.plot_nodes(ax, color='white', edgecolor='black', size=300, zorder=5, use_weights='in-degree', min_size=3)
zone_nodelink.plot_weighted_edges(ax, palette='plasma', log_transform=False, weight_bins=4,
min_linewidth=1.0, linewidth=4, min_alpha=0.5, alpha=0.95,
with_arrows=True, arrow_shrink=3, arrow_scale=10, zorder=4)
ax.set_title('Viajes al trabajo en Santiago (en días laborales, EOD 2012)');
# %%
bundled_zone_network = FDB(zone_nodelink,
# más alto más resistencia
K=1,
# más alto más jiggly las líneas
S=500,
I=10,
compatibility_threshold=0.6)
# %%
fig, ax = figure_from_geodataframe(zones, height=8, set_limits=True, remove_axes=True)
zones.plot(ax=ax, facecolor='none', edgecolor='#abacab', zorder=0)
comunas_urbanas.plot(ax=ax, facecolor='none', edgecolor='black', linewidth=0.5, zorder=0)
zone_nodelink.plot_nodes(ax, color='white', edgecolor='black', size=250, zorder=5, use_weights='in-degree', min_size=5)
zone_nodelink.plot_weighted_edges(ax,
palette='plasma', log_transform=False, weight_bins=10,
min_linewidth=0.5, linewidth=1.5, min_alpha=0.5, alpha=0.9,
with_arrows=True, arrow_shrink=3, arrow_scale=10, zorder=4)
# %%
fig, ax = figure_from_geodataframe(zones, height=8, set_limits=True, remove_axes=True)
cx.add_basemap(ax, crs=zones.crs.to_string(), source="../data/processed/scl_toner_12.tif", interpolation='hanning', zorder=0)
zone_nodelink.plot_nodes(ax, color='white', edgecolor='black', size=250, zorder=5, use_weights='in-degree', min_size=5)
zone_nodelink.plot_weighted_edges(ax,
palette='plasma', log_transform=False, weight_bins=10,
min_linewidth=0.5, linewidth=1.5, min_alpha=0.5, alpha=0.9,
with_arrows=True, arrow_shrink=3, arrow_scale=10, zorder=4)
|
py | b40bfd36e5c3abd76869562b368e2330d74a9497 | import os
from django.test import TestCase
from mock import patch
class InheritanceTests(TestCase):
@patch.dict(os.environ, clear=True,
DJANGO_CONFIGURATION='Inheritance',
DJANGO_SETTINGS_MODULE='tests.settings.single_inheritance')
def test_inherited(self):
from tests.settings import single_inheritance
self.assertEqual(
single_inheritance.ALLOWED_HOSTS,
['test']
)
@patch.dict(os.environ, clear=True,
DJANGO_CONFIGURATION='Inheritance',
DJANGO_SETTINGS_MODULE='tests.settings.multiple_inheritance')
def test_inherited2(self):
from tests.settings import multiple_inheritance
self.assertEqual(
multiple_inheritance.ALLOWED_HOSTS,
['test', 'test-test']
)
@patch.dict(os.environ, clear=True,
DJANGO_CONFIGURATION='Inheritance',
DJANGO_SETTINGS_MODULE='tests.settings.mixin_inheritance')
def test_inherited3(self):
from tests.settings import mixin_inheritance
self.assertEqual(
mixin_inheritance.ALLOWED_HOSTS,
['test1', 'test2', 'test3']
)
|
py | b40bfe1586b2223aa4cbbbed4c4029e7a2a6d0d7 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
class SIF:
def __init__(self, fname, width = 25, height = 6):
with open('input', 'r') as f:
self.data = [int(i) for i in f.read().strip()]
self.width = width
self.height = height
self.data = np.asarray(self.data)
self.layers = self.data.reshape(-1, height, width)
def collapse(self):
self.collapsed = np.zeros((self.height, self.width)).astype(int) - 1
for idx, v in np.ndenumerate(self.layers):
l, h, w = idx
if self.collapsed[(h, w)] == -1:
# not filled yet
if v != 2:
self.collapsed[(h, w)] = v
return self.collapsed
if __name__ == '__main__':
sif = SIF('input')
# part 1
zero_min_layer = np.argmin((sif.layers.reshape(-1, sif.width*sif.height) == 0).sum(axis = 1))
l = sif.layers[zero_min_layer]
print(np.sum(l == 1)*np.sum(l == 2))
# part 2
plt.imshow(sif.collapse())
plt.show()
|
py | b40c02bf34ed2d6e6c5d5932ab016b58f451040c | import socket
from contextlib import closing
import json
from tempfile import NamedTemporaryFile
from subprocess import Popen, PIPE
import logging
import zmq
from aiozmq import create_zmq_stream
import asyncio
def find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock.getsockname()[1]
async def test_kernel() -> None:
transport = 'tcp'
ip = '127.0.0.1'
shell_port = find_free_port()
control_port = find_free_port()
stdin_port = find_free_port()
iopub_port = find_free_port()
heartbeat_port = find_free_port()
configuration = {
'transport': transport,
'ip': ip,
'shell_port': shell_port,
'control_port': control_port,
'stdin_port': stdin_port,
'iopub_port': iopub_port,
'hb_port': heartbeat_port,
'key': 'a7d9f4f3-acad37f08d4fe05069a03422',
'signature_scheme': 'hmac-sha256'
}
configuration_json = json.dumps(configuration)
with NamedTemporaryFile() as configuration_file:
configuration_file.write(str.encode(configuration_json))
configuration_file.flush()
with Popen(['/app/build/apps/rocketjoe_kernel/rocketjoe_kernel', '--jupyter_connection', configuration_file.name]) as rocketjoe:
shell_stream = await create_zmq_stream(
zmq.DEALER,
connect=f'{transport}://{ip}:{shell_port}'
)
control_stream = await create_zmq_stream(
zmq.DEALER,
connect=f'{transport}://{ip}:{control_port}'
)
stdin_stream = await create_zmq_stream(
zmq.DEALER,
connect=f'{transport}://{ip}:{stdin_port}'
)
iopub_stream = await create_zmq_stream(
zmq.SUB,
connect=f'{transport}://{ip}:{iopub_port}'
)
iopub_stream.transport.subscribe(b'')
heartbeat_stream = await create_zmq_stream(
zmq.REQ,
connect=f'{transport}://{ip}:{heartbeat_port}'
)
rocketjoe.poll()
if rocketjoe.returncode is None:
#logging.info(await iopub_stream.read())
shell_stream.write([
b'<IDS|MSG>',
b'bd4f43215c9d72a4cb53599fd054d5c4cc21d6d0a6654fe4fe5aa0767637500c',
b'{"msg_id":"59435563b60d4158a5e96614bdcfb9f7_0","msg_type":"kernel_info_request","username":"username","session":"59435563b60d4158a5e96614bdcfb9f7","date":"2020-07-08T17:21:46.644570Z","version":"5.3"}',
b'{}',
b'{}',
b'{}'
])
raw_datas = await shell_stream.read()
data = [raw_data.decode('ascii') for raw_data in raw_datas]
header = json.loads(data[2])
parent_header = json.loads(data[3])
metadata = json.loads(data[4])
content = json.loads(data[5])
for key in ['date', 'msg_id', 'session', 'username', 'version']:
del header[key]
del parent_header[key]
del content['banner']
del content['language_info']['version']
assert header == {'msg_type': 'kernel_info_reply'}
assert parent_header == {'msg_type': 'kernel_info_request'}
assert metadata == {}
assert content == {
'help_links': '',
'implementation': 'ipython',
'implementation_version': 'ipython',
'language_info': {
'codemirror_mode': {
'name': 'ipython',
'version': 3
},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3'
},
'protocol_version': '5.3'
}
rocketjoe.kill()
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
loop.run_until_complete(test_kernel())
|
py | b40c03464195dd8b6ed89af9857a4b98d0e33046 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-04-29 09:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_post_content_html'),
]
operations = [
migrations.AddField(
model_name='post',
name='is_md',
field=models.BooleanField(default=False, verbose_name='markdown语法'),
),
]
|
py | b40c0463a6cbeeab09f195c82d8a9546420e3b4a | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import ray
def setup(*args):
if not hasattr(setup, "is_initialized"):
ray.init(num_workers=4, num_cpus=4)
setup.is_initialized = True
@ray.remote
def sleep(x):
time.sleep(x)
class WaitSuite(object):
timeout = 10
timer = time.time
def time_wait_task(self):
ray.wait([sleep.remote(0.1)])
def time_wait_many_tasks(self, num_returns):
tasks = [sleep.remote(i / 5) for i in range(4)]
ray.wait(tasks, num_returns=num_returns)
time_wait_many_tasks.params = list(range(1, 4))
time_wait_many_tasks.param_names = ["num_returns"]
def time_wait_timeout(self, timeout):
ray.wait([sleep.remote(0.5)], timeout=timeout)
time_wait_timeout.params = [200, 800]
time_wait_timeout.param_names = ["timeout_ms"]
|
py | b40c046ef7fbef3d0c89b8173ca79c3dce7d0f0e | # Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Trend Estimation
================
Trend estimation and removal is a common operation, particularly when dealing
with geophysical data. Moreover, some of the interpolation methods, like
:class:`verde.Spline`, can struggle with long-wavelength trends in the data.
The :class:`verde.Trend` class fits a 2D polynomial trend of arbitrary degree
to the data and can be used to remove it.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import numpy as np
import verde as vd
########################################################################################
# Our sample air temperature data from Texas has a clear trend from land to the ocean:
data = vd.datasets.fetch_texas_wind()
coordinates = (data.longitude, data.latitude)
plt.figure(figsize=(8, 6))
ax = plt.axes(projection=ccrs.Mercator())
plt.scatter(
data.longitude,
data.latitude,
c=data.air_temperature_c,
s=100,
cmap="plasma",
transform=ccrs.PlateCarree(),
)
plt.colorbar().set_label("Air temperature (C)")
vd.datasets.setup_texas_wind_map(ax)
plt.show()
########################################################################################
# We can estimate the polynomial coefficients for this trend:
trend = vd.Trend(degree=1).fit(coordinates, data.air_temperature_c)
print(trend.coef_)
########################################################################################
# More importantly, we can predict the trend values and remove them from our data:
trend_values = trend.predict(coordinates)
residuals = data.air_temperature_c - trend_values
fig, axes = plt.subplots(
1, 2, figsize=(10, 6), subplot_kw=dict(projection=ccrs.Mercator())
)
ax = axes[0]
ax.set_title("Trend")
tmp = ax.scatter(
data.longitude,
data.latitude,
c=trend_values,
s=60,
cmap="plasma",
transform=ccrs.PlateCarree(),
)
plt.colorbar(tmp, ax=ax, orientation="horizontal", pad=0.06)
vd.datasets.setup_texas_wind_map(ax)
ax = axes[1]
ax.set_title("Residuals")
maxabs = vd.maxabs(residuals)
tmp = ax.scatter(
data.longitude,
data.latitude,
c=residuals,
s=60,
cmap="bwr",
vmin=-maxabs,
vmax=maxabs,
transform=ccrs.PlateCarree(),
)
plt.colorbar(tmp, ax=ax, orientation="horizontal", pad=0.08)
vd.datasets.setup_texas_wind_map(ax)
plt.show()
########################################################################################
# The fitting, prediction, and residual calculation can all be done in a single step
# using the :meth:`~verde.Trend.filter` method:
# filter always outputs coordinates and weights as well, which we don't need and will
# ignore here.
__, res_filter, __ = vd.Trend(degree=1).filter(coordinates, data.air_temperature_c)
print(np.allclose(res_filter, residuals))
########################################################################################
# Additionally, :class:`verde.Trend` implements the :ref:`gridder interface <overview>`
# and has the :meth:`~verde.Trend.grid` and :meth:`~verde.Trend.profile` methods.
|
py | b40c060034ba64b675213792e527f8695e747a03 | from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError as DjangoValidationError
from django.http import HttpRequest
from django.urls.exceptions import NoReverseMatch
from django.utils.translation import gettext_lazy as _
from requests.exceptions import HTTPError
from rest_framework import serializers
from rest_framework.reverse import reverse
try:
from allauth.account import app_settings as allauth_settings
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.providers.base import AuthProcess
from allauth.utils import email_address_exists, get_username_max_length
except ImportError:
raise ImportError('allauth needs to be added to INSTALLED_APPS.')
class SocialAccountSerializer(serializers.ModelSerializer):
"""
serialize allauth SocialAccounts for use with a REST API
"""
class Meta:
model = SocialAccount
fields = (
'id',
'provider',
'uid',
'last_login',
'date_joined',
)
class SocialLoginSerializer(serializers.Serializer):
access_token = serializers.CharField(required=False, allow_blank=True)
code = serializers.CharField(required=False, allow_blank=True)
id_token = serializers.CharField(required=False, allow_blank=True)
def _get_request(self):
request = self.context.get('request')
if not isinstance(request, HttpRequest):
request = request._request
return request
def get_social_login(self, adapter, app, token, response):
"""
:param adapter: allauth.socialaccount Adapter subclass.
Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:returns: A populated instance of the
`allauth.socialaccount.SocialLoginView` instance
"""
request = self._get_request()
social_login = adapter.complete_login(request, app, token, response=response)
social_login.token = token
return social_login
def set_callback_url(self, view, adapter_class):
# first set url from view
self.callback_url = getattr(view, 'callback_url', None)
if not self.callback_url:
# auto generate base on adapter and request
try:
self.callback_url = reverse(
viewname=adapter_class.provider_id + '_callback',
request=self._get_request(),
)
except NoReverseMatch:
raise serializers.ValidationError(
_('Define callback_url in view'),
)
def validate(self, attrs):
view = self.context.get('view')
request = self._get_request()
if not view:
raise serializers.ValidationError(
_('View is not defined, pass it as a context variable'),
)
adapter_class = getattr(view, 'adapter_class', None)
if not adapter_class:
raise serializers.ValidationError(_('Define adapter_class in view'))
adapter = adapter_class(request)
app = adapter.get_provider().get_app(request)
# More info on code vs access_token
# http://stackoverflow.com/questions/8666316/facebook-oauth-2-0-code-and-token
access_token = attrs.get('access_token')
code = attrs.get('code')
# Case 1: We received the access_token
if access_token:
tokens_to_parse = {'access_token': access_token}
token = access_token
# For sign in with apple
id_token = attrs.get('id_token')
if id_token:
tokens_to_parse['id_token'] = id_token
# Case 2: We received the authorization code
elif code:
self.set_callback_url(view=view, adapter_class=adapter_class)
self.client_class = getattr(view, 'client_class', None)
if not self.client_class:
raise serializers.ValidationError(
_('Define client_class in view'),
)
provider = adapter.get_provider()
scope = provider.get_scope(request)
client = self.client_class(
request,
app.client_id,
app.secret,
adapter.access_token_method,
adapter.access_token_url,
self.callback_url,
scope,
scope_delimiter=adapter.scope_delimiter,
headers=adapter.headers,
basic_auth=adapter.basic_auth,
)
token = client.get_access_token(code)
access_token = token['access_token']
tokens_to_parse = {'access_token': access_token}
# If available we add additional data to the dictionary
for key in ['refresh_token', 'id_token', adapter.expires_in_key]:
if key in token:
tokens_to_parse[key] = token[key]
else:
raise serializers.ValidationError(
_('Incorrect input. access_token or code is required.'),
)
social_token = adapter.parse_token(tokens_to_parse)
social_token.app = app
try:
login = self.get_social_login(adapter, app, social_token, token)
complete_social_login(request, login)
except HTTPError:
raise serializers.ValidationError(_('Incorrect value'))
if not login.is_existing:
# We have an account already signed up in a different flow
# with the same email address: raise an exception.
# This needs to be handled in the frontend. We can not just
# link up the accounts due to security constraints
if allauth_settings.UNIQUE_EMAIL:
# Do we have an account already with this email address?
account_exists = get_user_model().objects.filter(
email=login.user.email,
).exists()
if account_exists:
raise serializers.ValidationError(
_('User is already registered with this e-mail address.'),
)
login.lookup()
login.save(request, connect=True)
attrs['user'] = login.account.user
return attrs
class SocialConnectMixin:
def get_social_login(self, *args, **kwargs):
"""
Set the social login process state to connect rather than login
Refer to the implementation of get_social_login in base class and to the
allauth.socialaccount.helpers module complete_social_login function.
"""
social_login = super().get_social_login(*args, **kwargs)
social_login.state['process'] = AuthProcess.CONNECT
return social_login
class SocialConnectSerializer(SocialConnectMixin, SocialLoginSerializer):
pass
class RegisterSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=get_username_max_length(),
min_length=allauth_settings.USERNAME_MIN_LENGTH,
required=allauth_settings.USERNAME_REQUIRED,
)
email = serializers.EmailField(required=allauth_settings.EMAIL_REQUIRED)
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise serializers.ValidationError(
_('A user is already registered with this e-mail address.'),
)
return email
def validate_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError(_("The two password fields didn't match."))
return data
def custom_signup(self, request, user):
pass
def get_cleaned_data(self):
return {
'username': self.validated_data.get('username', ''),
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', ''),
}
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
user = adapter.save_user(request, user, self, commit=False)
if "password1" in self.cleaned_data:
try:
adapter.clean_password(self.cleaned_data['password1'], user=user)
except DjangoValidationError as exc:
raise serializers.ValidationError(
detail=serializers.as_serializer_error(exc)
)
user.save()
self.custom_signup(request, user)
setup_user_email(request, user, [])
return user
class VerifyEmailSerializer(serializers.Serializer):
key = serializers.CharField()
class ResendEmailVerificationSerializer(serializers.Serializer):
email = serializers.EmailField(required=allauth_settings.EMAIL_REQUIRED)
|
bzl | b40c076204049585d94e0be35126acca6f0f12ee | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for building a Container image.
In addition to the base container_image rule, we expose its constituents
(attr, outputs, implementation) directly so that others may expose a
more specialized build leveraging the same implementation.
"""
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load(
"@bazel_tools//tools/build_defs/hash:hash.bzl",
_hash_tools = "tools",
_sha256 = "sha256",
)
load(
"@io_bazel_rules_docker//container:providers.bzl",
"ImageInfo",
"LayerInfo",
)
load(
"//container:layer.bzl",
_layer = "layer",
)
load(
"//container:layer_tools.bzl",
_assemble_image = "assemble",
_gen_img_args = "generate_args_for_image",
_get_layers = "get_from_target",
_incr_load = "incremental_load",
_layer_tools = "tools",
)
load(
"//skylib:filetype.bzl",
container_filetype = "container",
)
load(
"//skylib:label.bzl",
_string_to_label = "string_to_label",
)
load(
"//skylib:path.bzl",
_join_path = "join",
)
def _get_base_config(ctx, name, base):
if ctx.files.base or base:
# The base is the first layer in container_parts if provided.
layer = _get_layers(ctx, name, ctx.attr.base, base)
return layer.get("config")
return None
def _get_base_manifest(ctx, name, base):
if ctx.files.base or base:
# The base is the first layer in container_parts if provided.
layer = _get_layers(ctx, name, ctx.attr.base, base)
return layer.get("manifest")
return None
def _add_create_image_config_args(
ctx,
args,
inputs,
manifest,
config,
labels,
entrypoint,
cmd,
null_cmd,
null_entrypoint,
creation_time,
env,
workdir,
layer_names,
base_config,
base_manifest,
architecture,
operating_system,
os_version):
"""
Add args for the create_image_config Go binary.
"""
args.add("-outputConfig", config)
args.add("-outputManifest", manifest)
if null_entrypoint:
args.add("-nullEntryPoint")
if null_cmd:
args.add("-nullCmd")
args.add_all(entrypoint, before_each = "-entrypoint")
args.add_all(cmd, before_each = "-command")
args.add_all(ctx.attr.ports, before_each = "-ports")
args.add_all(ctx.attr.volumes, before_each = "-volumes")
stamp = None
# If base image is having enabled stamping then it is propagated
# to child images.
if ctx.attr.stamp == True:
stamp = ctx.attr.stamp
elif ctx.attr.base and ImageInfo in ctx.attr.base:
stamp = ctx.attr.base[ImageInfo].stamp
if creation_time:
args.add("-creationTime", creation_time)
elif stamp:
# If stamping is enabled, and the creation_time is not manually defined,
# default to '{BUILD_TIMESTAMP}'.
args.add("-creationTime", "{BUILD_TIMESTAMP}")
for key, value in labels.items():
args.add("-labels", "{}={}".format(key, value))
for key, value in env.items():
args.add("-env", "%s" % "=".join([
ctx.expand_make_variables("env", key, {}),
ctx.expand_make_variables("env", value, {}),
]))
if ctx.attr.user:
args.add("-user", ctx.attr.user)
if workdir:
args.add("-workdir", workdir)
inputs += layer_names
args.add_all(layer_names, before_each = "-layerDigestFile", format_each = "@%s")
if ctx.attr.label_files:
inputs += ctx.files.label_files
if base_config:
args.add("-baseConfig", base_config)
inputs.append(base_config)
if base_manifest:
args.add("-baseManifest", base_manifest)
inputs.append(base_manifest)
if architecture:
args.add("-architecture", architecture)
if operating_system:
args.add("-operatingSystem", operating_system)
if os_version:
args.add("-osVersion", os_version)
if stamp:
stamp_inputs = [ctx.info_file, ctx.version_file]
args.add_all(stamp_inputs, before_each = "-stampInfoFile")
inputs += stamp_inputs
if ctx.attr.launcher_args and not ctx.attr.launcher:
fail("launcher_args does nothing when launcher is not specified.", attr = "launcher_args")
if ctx.attr.launcher:
args.add("-entrypointPrefix", ctx.file.launcher.basename, format = "/%s")
args.add_all(ctx.attr.launcher_args, before_each = "-entrypointPrefix")
def _format_legacy_label(t):
return ("--labels=%s=%s" % (t[0], t[1]))
def _image_config(
ctx,
name,
layer_names,
entrypoint = None,
cmd = None,
creation_time = None,
env = None,
base_config = None,
base_manifest = None,
architecture = None,
operating_system = None,
os_version = None,
layer_name = None,
workdir = None,
null_entrypoint = False,
null_cmd = False):
"""Create the configuration for a new container image."""
config = ctx.actions.declare_file(name + "." + layer_name + ".config")
manifest = ctx.actions.declare_file(name + "." + layer_name + ".manifest")
label_file_dict = _string_to_label(
ctx.files.label_files,
ctx.attr.label_file_strings,
)
labels = dict()
for label in ctx.attr.labels:
fname = ctx.attr.labels[label]
if fname[0] == "@":
labels[label] = "@" + label_file_dict[fname[1:]].path
else:
labels[label] = fname
args = ctx.actions.args()
inputs = []
executable = None
_add_create_image_config_args(
ctx,
args,
inputs,
manifest,
config,
labels,
entrypoint,
cmd,
null_cmd,
null_entrypoint,
creation_time,
env,
workdir,
layer_names,
base_config,
base_manifest,
architecture,
operating_system,
os_version,
)
ctx.actions.run(
executable = ctx.executable.create_image_config,
arguments = [args],
inputs = inputs,
outputs = [config, manifest],
use_default_shell_env = True,
mnemonic = "ImageConfig",
)
return config, _sha256(ctx, config), manifest, _sha256(ctx, manifest)
def _repository_name(ctx):
"""Compute the repository name for the current rule."""
if ctx.attr.legacy_repository_naming:
# Legacy behavior, off by default.
return _join_path(ctx.attr.repository, ctx.label.package.replace("/", "_"))
# Newer Docker clients support multi-level names, which are a part of
# the v2 registry specification.
return _join_path(ctx.attr.repository, ctx.label.package)
def _assemble_image_digest(ctx, name, image, image_tarball, output_digest):
img_args, inputs = _gen_img_args(ctx, image)
args = ctx.actions.args()
args.add_all(img_args)
args.add("--dst", output_digest)
args.add("--format=Docker")
ctx.actions.run(
outputs = [output_digest],
inputs = inputs,
tools = ([image["legacy"]] if image.get("legacy") else []),
executable = ctx.executable._digester,
arguments = [args],
mnemonic = "ImageDigest",
progress_message = "Extracting image digest of %s" % image_tarball.short_path,
)
def _impl(
ctx,
name = None,
base = None,
files = None,
file_map = None,
empty_files = None,
empty_dirs = None,
directory = None,
entrypoint = None,
cmd = None,
creation_time = None,
symlinks = None,
env = None,
layers = None,
compression = None,
compression_options = None,
experimental_tarball_format = None,
debs = None,
tars = None,
architecture = None,
operating_system = None,
os_version = None,
output_executable = None,
output_tarball = None,
output_config = None,
output_config_digest = None,
output_digest = None,
output_layer = None,
workdir = None,
null_cmd = None,
null_entrypoint = None):
"""Implementation for the container_image rule.
You can write a customized container_image rule by writing something like:
load(
"@io_bazel_rules_docker//container:container.bzl",
_container="container",
)
def _impl(ctx):
...
return _container.image.implementation(ctx, ... kwarg overrides ...)
_foo_image = rule(
attrs = _container.image.attrs + {
# My attributes, or overrides of _container.image.attrs defaults.
...
},
executable = True,
outputs = _container.image.outputs,
implementation = _impl,
)
Args:
ctx: The bazel rule context
name: str, overrides ctx.label.name or ctx.attr.name
base: File, overrides ctx.attr.base and ctx.files.base[0]
files: File list, overrides ctx.files.files
file_map: Dict[str, File], defaults to {}
empty_files: str list, overrides ctx.attr.empty_files
empty_dirs: Dict[str, str], overrides ctx.attr.empty_dirs
directory: str, overrides ctx.attr.directory
entrypoint: str List, overrides ctx.attr.entrypoint
cmd: str List, overrides ctx.attr.cmd
creation_time: str, overrides ctx.attr.creation_time
symlinks: str Dict, overrides ctx.attr.symlinks
env: str Dict, overrides ctx.attr.env
layers: label List, overrides ctx.attr.layers
compression: str, overrides ctx.attr.compression
compression_options: str list, overrides ctx.attr.compression_options
experimental_tarball_format: str, overrides ctx.attr.experimental_tarball_format
debs: File list, overrides ctx.files.debs
tars: File list, overrides ctx.files.tars
architecture: str, overrides ctx.attr.architecture
operating_system: Operating system to target (e.g. linux, windows)
os_version: Operating system version to target
output_executable: File to use as output for script to load docker image
output_tarball: File, overrides ctx.outputs.out
output_config: File, overrides ctx.outputs.config
output_config_digest: File, overrides ctx.outputs.config_digest
output_digest: File, overrides ctx.outputs.digest
output_layer: File, overrides ctx.outputs.layer
workdir: str, overrides ctx.attr.workdir
null_cmd: bool, overrides ctx.attr.null_cmd
null_entrypoint: bool, overrides ctx.attr.null_entrypoint
"""
name = name or ctx.label.name
entrypoint = entrypoint or ctx.attr.entrypoint
cmd = cmd or ctx.attr.cmd
architecture = architecture or ctx.attr.architecture
compression = compression or ctx.attr.compression
compression_options = compression_options or ctx.attr.compression_options
experimental_tarball_format = experimental_tarball_format or ctx.attr.experimental_tarball_format
operating_system = operating_system or ctx.attr.operating_system
os_version = os_version or ctx.attr.os_version
creation_time = creation_time or ctx.attr.creation_time
build_executable = output_executable or ctx.outputs.build_script
output_tarball = output_tarball or ctx.outputs.out
output_digest = output_digest or ctx.outputs.digest
output_config = output_config or ctx.outputs.config
output_config_digest = output_config_digest or ctx.outputs.config_digest
output_layer = output_layer or ctx.outputs.layer
build_script = ctx.outputs.build_script
null_cmd = null_cmd or ctx.attr.null_cmd
null_entrypoint = null_entrypoint or ctx.attr.null_entrypoint
# If this target specifies docker_run_flags, they are always used.
# Fall back to the base image's run flags if present, otherwise use the default value.
#
# We do not use the default argument of attrs.string() in order to distinguish between
# an image using the default and an image intentionally overriding the base's run flags.
# Since this is a string attribute, the default value is the empty string.
if ctx.attr.docker_run_flags != "":
docker_run_flags = ctx.attr.docker_run_flags
elif ctx.attr.base and ImageInfo in ctx.attr.base:
docker_run_flags = ctx.attr.base[ImageInfo].docker_run_flags
else:
# Run the container using host networking, so that the service is
# available to the developer without having to poke around with
# docker inspect.
docker_run_flags = "-i --rm --network=host"
if ctx.attr.launcher:
if not file_map:
file_map = {}
file_map["/" + ctx.file.launcher.basename] = ctx.file.launcher
# composite a layer from the container_image rule attrs,
image_layer = _layer.implementation(
ctx = ctx,
name = name,
files = files,
file_map = file_map,
empty_files = empty_files,
empty_dirs = empty_dirs,
directory = directory,
symlinks = symlinks,
compression = compression,
compression_options = compression_options,
debs = debs,
tars = tars,
env = env,
operating_system = operating_system,
output_layer = output_layer,
)
layer_providers = layers or ctx.attr.layers
layers = [provider[LayerInfo] for provider in layer_providers] + image_layer
# Get the layers and shas from our base.
# These are ordered as they'd appear in the v2.2 config,
# so they grow at the end.
parent_parts = _get_layers(ctx, name, ctx.attr.base, base)
zipped_layers = parent_parts.get("zipped_layer", []) + [layer.zipped_layer for layer in layers]
shas = parent_parts.get("blobsum", []) + [layer.blob_sum for layer in layers]
unzipped_layers = parent_parts.get("unzipped_layer", []) + [layer.unzipped_layer for layer in layers]
layer_diff_ids = [layer.diff_id for layer in layers]
diff_ids = parent_parts.get("diff_id", []) + layer_diff_ids
new_files = [f for f in file_map or []]
new_emptyfiles = empty_files or []
new_symlinks = [f for f in symlinks or []]
parent_transitive_files = parent_parts.get("transitive_files", depset())
transitive_files = depset(new_files + new_emptyfiles + new_symlinks, transitive = [parent_transitive_files])
# Get the config for the base layer
config_file = _get_base_config(ctx, name, base)
config_digest = None
# Get the manifest for the base layer
manifest_file = _get_base_manifest(ctx, name, base)
manifest_digest = None
# Generate the new config layer by layer, using the attributes specified and the diff_id
for i, layer in enumerate(layers):
config_file, config_digest, manifest_file, manifest_digest = _image_config(
ctx,
name = name,
layer_names = [layer_diff_ids[i]],
entrypoint = entrypoint,
cmd = cmd,
creation_time = creation_time,
env = layer.env,
base_config = config_file,
base_manifest = manifest_file,
architecture = architecture,
operating_system = operating_system,
os_version = os_version,
layer_name = str(i),
workdir = workdir or ctx.attr.workdir,
null_entrypoint = null_entrypoint,
null_cmd = null_cmd,
)
# Construct a temporary name based on the build target.
tag_name = "{}:{}".format(_repository_name(ctx), name)
# These are the constituent parts of the Container image, which each
# rule in the chain must preserve.
container_parts = {
# A list of paths to the layer digests.
"blobsum": shas,
# The path to the v2.2 configuration file.
"config": config_file,
"config_digest": config_digest,
# A list of paths to the layer diff_ids.
"diff_id": diff_ids,
# The File containing digest of the image.
"digest": output_digest,
# At the root of the chain, we support deriving from a tarball
# base image.
"legacy": parent_parts.get("legacy"),
# The path to the v2.2 manifest file.
"manifest": manifest_file,
"manifest_digest": manifest_digest,
# Keep track of all files/emptyfiles/symlinks that we have already added to the image layers.
"transitive_files": transitive_files,
# A list of paths to the layer .tar files
"unzipped_layer": unzipped_layers,
# A list of paths to the layer .tar.gz files
"zipped_layer": zipped_layers,
}
# We support incrementally loading or assembling this single image
# with a temporary name given by its build rule.
images = {
tag_name: container_parts,
}
_incr_load(
ctx,
images,
build_executable,
run = not ctx.attr.legacy_run_behavior,
run_flags = docker_run_flags,
)
_assemble_image(
ctx,
images,
output_tarball,
experimental_tarball_format,
)
_assemble_image_digest(ctx, name, container_parts, output_tarball, output_digest)
# Copy config file and its sha file for usage in tests
ctx.actions.run_shell(
outputs = [output_config],
inputs = [config_file],
command = "cp %s %s" % (config_file.path, output_config.path),
)
ctx.actions.run_shell(
outputs = [output_config_digest],
inputs = [config_digest],
command = "cp %s %s" % (config_digest.path, output_config_digest.path),
)
runfiles = ctx.runfiles(
files = unzipped_layers + diff_ids + [config_file, config_digest, output_config_digest] +
([container_parts["legacy"]] if container_parts["legacy"] else []),
)
# Stamp attribute needs to be propagated between definitions to enhance actions
# with ability to determine properly whether root image has activated stamping.
#
# This covers the following example case:
# container_image(
# name = “base_image”,
# base = “@base//image”,
# stamp = True,
# )
#
# lang_image(
# base = “:base_image”,
# )
stamp = None
if ctx.attr.stamp:
stamp = ctx.attr.stamp
elif ctx.attr.base and ImageInfo in ctx.attr.base:
stamp = ctx.attr.base[ImageInfo].stamp
return [
ImageInfo(
container_parts = container_parts,
legacy_run_behavior = ctx.attr.legacy_run_behavior,
docker_run_flags = docker_run_flags,
stamp = stamp,
),
DefaultInfo(
executable = build_executable,
files = depset([output_layer]),
runfiles = runfiles,
),
coverage_common.instrumented_files_info(
ctx,
dependency_attributes = ["files"],
),
]
_attrs = dicts.add(_layer.attrs, {
"architecture": attr.string(
doc = "The desired CPU architecture to be used as label in the container image.",
default = "amd64",
),
"base": attr.label(
allow_files = container_filetype,
doc = "The base layers on top of which to overlay this layer, equivalent to FROM.",
),
"cmd": attr.string_list(
doc = """List of commands to execute in the image.
See https://docs.docker.com/engine/reference/builder/#cmd
The behavior between using `""` and `[]` may differ.
Please see [#1448](https://github.com/bazelbuild/rules_docker/issues/1448)
for more details.
Set `cmd` to `None`, `[]` or `""` will set the `Cmd` of the image to be
`null`.
This field supports stamp variables.""",
),
"compression": attr.string(
default = "gzip",
doc = """Compression method for image layer. Currently only gzip is supported.
This affects the compressed layer, which is by the `container_push` rule.
It doesn't affect the layers specified by the `layers` attribute.""",
),
"compression_options": attr.string_list(
doc = """Command-line options for the compression tool. Possible values depend on `compression` method.
This affects the compressed layer, which is used by the `container_push` rule.
It doesn't affect the layers specified by the `layers` attribute.""",
),
"create_image_config": attr.label(
default = Label("//container/go/cmd/create_image_config:create_image_config"),
cfg = "host",
executable = True,
allow_files = True,
),
"creation_time": attr.string(
doc = """The image's creation timestamp.
Acceptable formats: Integer or floating point seconds since Unix Epoch, RFC 3339 date/time.
This field supports stamp variables.
If not set, defaults to {BUILD_TIMESTAMP} when stamp = True, otherwise 0""",
),
"docker_run_flags": attr.string(
doc = """Optional flags to use with `docker run` command.
Only used when `legacy_run_behavior` is set to `False`.""",
),
"entrypoint": attr.string_list(
doc = """List of entrypoints to add in the image.
See https://docs.docker.com/engine/reference/builder/#entrypoint
Set `entrypoint` to `None`, `[]` or `""` will set the `Entrypoint` of the image
to be `null`.
The behavior between using `""` and `[]` may differ.
Please see [#1448](https://github.com/bazelbuild/rules_docker/issues/1448)
for more details.
This field supports stamp variables.""",
),
"experimental_tarball_format": attr.string(
values = [
"legacy",
"compressed",
],
default = "legacy",
doc = ("The tarball format to use when producing an image .tar file. " +
"Defaults to \"legacy\", which contains uncompressed layers. " +
"If set to \"compressed\", the resulting tarball will contain " +
"compressed layers, but is only loadable by newer versions of " +
"docker. This is an experimental attribute, which is subject " +
"to change or removal: do not depend on its exact behavior."),
),
"label_file_strings": attr.string_list(),
# Implicit/Undocumented dependencies.
"label_files": attr.label_list(
allow_files = True,
),
"labels": attr.string_dict(
doc = """Dictionary from custom metadata names to their values.
See https://docs.docker.com/engine/reference/builder/#label
You can also put a file name prefixed by '@' as a value.
Then the value is replaced with the contents of the file.
Example:
labels = {
"com.example.foo": "bar",
"com.example.baz": "@metadata.json",
...
},
The values of this field support stamp variables.""",
),
"launcher": attr.label(
allow_single_file = True,
doc = """If present, prefix the image's ENTRYPOINT with this file.
Note that the launcher should be a container-compatible (OS & Arch)
single executable file without any runtime dependencies (as none
of its runfiles will be included in the image).""",
),
"launcher_args": attr.string_list(
default = [],
doc = """Optional arguments for the `launcher` attribute.
Only valid when `launcher` is specified.""",
),
"layers": attr.label_list(
doc = """List of `container_layer` targets.
The data from each `container_layer` will be part of container image,
and the environment variable will be available in the image as well.""",
providers = [LayerInfo],
),
"legacy_repository_naming": attr.bool(
default = False,
doc = """Whether to use the legacy strategy for setting the repository name
embedded in the resulting tarball.
e.g. `bazel/{target.replace('/', '_')}` vs. `bazel/{target}`""",
),
"legacy_run_behavior": attr.bool(
# TODO(mattmoor): Default this to False.
default = True,
doc = ("If set to False, `bazel run` will directly invoke `docker run` " +
"with flags specified in the `docker_run_flags` attribute. " +
"Note that it defaults to False when using <lang>_image rules."),
),
# null_cmd and null_entrypoint are hidden attributes from users.
# They are needed because specifying cmd or entrypoint as {None, [] or ""}
# and not specifying them at all in the container_image rule would both make
# ctx.attr.cmd or ctx.attr.entrypoint to be [].
# We need these flags to distinguish them.
"null_cmd": attr.bool(default = False),
"null_entrypoint": attr.bool(default = False),
"os_version": attr.string(
doc = "The desired OS version to be used in the container image config.",
),
# Starlark doesn't support int_list...
"ports": attr.string_list(
doc = """List of ports to expose.
See https://docs.docker.com/engine/reference/builder/#expose""",
),
"repository": attr.string(
default = "bazel",
doc = """The repository for the default tag for the image.
Images generated by `container_image` are tagged by default to
`bazel/package_name:target` for a `container_image` target at
`//package/name:target`.
Setting this attribute to `gcr.io/dummy` would set the default tag to
`gcr.io/dummy/package_name:target`.""",
),
"stamp": attr.bool(
default = False,
doc = """If true, enable use of workspace status variables
(e.g. `BUILD_USER`, `BUILD_EMBED_LABEL`,
and custom values set using `--workspace_status_command`)
in tags.
These fields are specified in attributes using Python format
syntax, e.g. `foo{BUILD_USER}bar`.""",
),
"user": attr.string(
doc = """The user that the image should run as.
See https://docs.docker.com/engine/reference/builder/#user
Because building the image never happens inside a Docker container,
this user does not affect the other actions (e.g., adding files).
This field supports stamp variables.""",
),
"volumes": attr.string_list(
doc = """List of volumes to mount.
See https://docs.docker.com/engine/reference/builder/#volumes""",
),
"workdir": attr.string(
doc = """Initial working directory when running the Docker image.
See https://docs.docker.com/engine/reference/builder/#workdir
Because building the image never happens inside a Docker container,
this working directory does not affect the other actions (e.g., adding files).
This field supports stamp variables.""",
),
"_digester": attr.label(
default = "//container/go/cmd/digester",
cfg = "host",
executable = True,
),
}, _hash_tools, _layer_tools)
_outputs = dict(_layer.outputs)
_outputs["out"] = "%{name}.tar"
_outputs["digest"] = "%{name}.digest"
_outputs["config"] = "%{name}.json"
_outputs["config_digest"] = "%{name}.json.sha256"
_outputs["build_script"] = "%{name}.executable"
image = struct(
attrs = _attrs,
outputs = _outputs,
implementation = _impl,
)
container_image_ = rule(
attrs = _attrs,
doc = "Called by the `container_image` macro with **kwargs, see below",
executable = True,
outputs = _outputs,
toolchains = ["@io_bazel_rules_docker//toolchains/docker:toolchain_type"],
implementation = _impl,
)
# This validates the two forms of value accepted by
# ENTRYPOINT and CMD, turning them into a canonical
# python list form.
#
# The Dockerfile construct:
# ENTRYPOINT "/foo" for Linux:
# Results in:
# "Entrypoint": [
# "/bin/sh",
# "-c",
# "\"/foo\""
# ],
# ENTRYPOINT "foo" for Windows:
# Results in:
# "Entrypoint": [
# "%WinDir%\system32\cmd.exe",
# "/c",
# "\"foo\""
# ],
# Whereas:
# ENTRYPOINT ["/foo", "a"]
# Results in:
# "Entrypoint": [
# "/foo",
# "a"
# ],
# NOTE: prefacing a command with 'exec' just ends up with the former
def _validate_command(name, argument, operating_system):
if type(argument) == type(""):
if (operating_system == "windows"):
return ["%WinDir%\\system32\\cmd.exe", "/c", argument]
else:
return ["/bin/sh", "-c", argument]
elif type(argument) == type([]):
return argument
elif argument:
fail("The %s attribute must be a string or list, if specified." % name)
else:
return None
def container_image(**kwargs):
"""Package a docker image.
Produces a new container image tarball compatible with 'docker load', which
is a single additional layer atop 'base'. The goal is to have relatively
complete support for building container image, from the Dockerfile spec.
For more information see the 'Config' section of the image specification:
https://github.com/opencontainers/image-spec/blob/v0.2.0/serialization.md
Only 'name' is required. All other fields have sane defaults.
container_image(
name="...",
visibility="...",
# The base layers on top of which to overlay this layer,
# equivalent to FROM.
base="//another/build:rule",
# The base directory of the files, defaulted to
# the package of the input.
# All files structure relatively to that path will be preserved.
# A leading '/' mean the workspace root and this path is relative
# to the current package by default.
data_path="...",
# The directory in which to expand the specified files,
# defaulting to '/'.
# Only makes sense accompanying one of files/tars/debs.
directory="...",
# The set of archives to expand, or packages to install
# within the chroot of this layer
files=[...],
tars=[...],
debs=[...],
# The set of symlinks to create within a given layer.
symlinks = {
"/path/to/link": "/path/to/target",
...
},
# Other layers built from container_layer rule
layers = [":c-lang-layer", ":java-lang-layer", ...]
# https://docs.docker.com/engine/reference/builder/#entrypoint
entrypoint="...", or
entrypoint=[...], -- exec form
Set entrypoint to None, [] or "" will set the Entrypoint of the image to
be null.
# https://docs.docker.com/engine/reference/builder/#cmd
cmd="...", or
cmd=[...], -- exec form
Set cmd to None, [] or "" will set the Cmd of the image to be null.
# https://docs.docker.com/engine/reference/builder/#expose
ports=[...],
# https://docs.docker.com/engine/reference/builder/#user
# NOTE: the normal directive affects subsequent RUN, CMD,
# and ENTRYPOINT
user="...",
# https://docs.docker.com/engine/reference/builder/#volume
volumes=[...],
# https://docs.docker.com/engine/reference/builder/#workdir
# NOTE: the normal directive affects subsequent RUN, CMD,
# ENTRYPOINT, ADD, and COPY, but this attribute only affects
# the entry point.
workdir="...",
# https://docs.docker.com/engine/reference/builder/#env
env = {
"var1": "val1",
"var2": "val2",
...
"varN": "valN",
},
# Compression method and command-line options.
compression = "gzip",
compression_options = ["--fast"],
experimental_tarball_format = "compressed",
)
This rule generates a sequence of genrules the last of which is named 'name',
so the dependency graph works out properly. The output of this rule is a
tarball compatible with 'docker save/load' with the structure:
{layer-name}:
layer.tar
VERSION
json
{image-config-sha256}.json
...
manifest.json
repositories
top # an implementation detail of our rules, not consumed by Docker.
This rule appends a single new layer to the tarball of this form provided
via the 'base' parameter.
The images produced by this rule are always named `bazel/tmp:latest` when
loaded (an internal detail). The expectation is that the images produced
by these rules will be uploaded using the `docker_push` rule below.
The implicit output targets are:
- `[name].tar`: A full Docker image containing all the layers, identical to
what `docker save` would return. This is only generated on demand.
- `[name].digest`: An image digest that can be used to refer to that image. Unlike tags,
digest references are immutable i.e. always refer to the same content.
- `[name]-layer.tar`: A Docker image containing only the layer corresponding to
that target. It is used for incremental loading of the layer.
**Note:** this target is not suitable for direct consumption.
It is used for incremental loading and non-docker rules should
depend on the Docker image (`[name].tar`) instead.
- `[name]`: The incremental image loader. It will load only changed
layers inside the Docker registry.
This rule references the `@io_bazel_rules_docker//toolchains/docker:toolchain_type`.
See [How to use the Docker Toolchain](toolchains/docker/readme.md#how-to-use-the-docker-toolchain) for details.
Args:
**kwargs: Attributes are described by `container_image_` above.
"""
operating_system = None
if ("operating_system" in kwargs):
operating_system = kwargs["operating_system"]
if operating_system != "linux" and operating_system != "windows":
fail(
"invalid operating_system(%s) specified. Must be 'linux' or 'windows'" % operating_system,
attr = operating_system,
)
reserved_attrs = [
"label_files",
"label_file_strings",
"null_cmd",
"null_entrypoint",
]
for reserved in reserved_attrs:
if reserved in kwargs:
fail("reserved for internal use by container_image macro", attr = reserved)
if "labels" in kwargs:
files = sorted({v[1:]: None for v in kwargs["labels"].values() if v[0] == "@"}.keys())
kwargs["label_files"] = files
kwargs["label_file_strings"] = files
# If cmd is set but set to None, [] or "",
# we interpret it as users want to set it to null.
if "cmd" in kwargs:
if not kwargs["cmd"]:
kwargs["null_cmd"] = True
# _impl defines "cmd" as string_list. Turn "" into [] before
# passing to it.
if kwargs["cmd"] == "":
kwargs["cmd"] = []
else:
kwargs["cmd"] = _validate_command("cmd", kwargs["cmd"], operating_system)
# If entrypoint is set but set to None, [] or "",
# we interpret it as users want to set it to null.
if "entrypoint" in kwargs:
if not kwargs["entrypoint"]:
kwargs["null_entrypoint"] = True
# _impl defines "entrypoint" as string_list. Turn "" into [] before
# passing to it.
if kwargs["entrypoint"] == "":
kwargs["entrypoint"] = []
else:
kwargs["entrypoint"] = _validate_command("entrypoint", kwargs["entrypoint"], operating_system)
container_image_(**kwargs)
|
py | b40c085c437adf40285fd5f3120a2656d2aad5e5 | import warnings
from abc import ABC, abstractmethod
from typing import Dict, Generator, Optional, Union
import numpy as np
import torch as th
from gym import spaces
try:
# Check memory used by replay buffer when possible
import psutil
except ImportError:
psutil = None
from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape
from stable_baselines3.common.type_aliases import ReplayBufferSamples, RolloutBufferSamples
from stable_baselines3.common.vec_env import VecNormalize
class BaseBuffer(ABC):
"""
Base class that represent a buffer (rollout or replay)
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device: PyTorch device
to which the values will be converted
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
n_envs: int = 1,
):
super(BaseBuffer, self).__init__()
self.buffer_size = buffer_size
self.observation_space = observation_space
self.action_space = action_space
self.obs_shape = get_obs_shape(observation_space)
self.action_dim = get_action_dim(action_space)
self.pos = 0
self.full = False
self.device = device
self.n_envs = n_envs
@staticmethod
def swap_and_flatten(arr: np.ndarray) -> np.ndarray:
"""
Swap and then flatten axes 0 (buffer_size) and 1 (n_envs)
to convert shape from [n_steps, n_envs, ...] (when ... is the shape of the features)
to [n_steps * n_envs, ...] (which maintain the order)
:param arr:
:return:
"""
shape = arr.shape
if len(shape) < 3:
shape = shape + (1,)
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])
def size(self) -> int:
"""
:return: The current size of the buffer
"""
if self.full:
return self.buffer_size
return self.pos
def add(self, *args, **kwargs) -> None:
"""
Add elements to the buffer.
"""
raise NotImplementedError()
def extend(self, *args, **kwargs) -> None:
"""
Add a new batch of transitions to the buffer
"""
# Do a for loop along the batch axis
for data in zip(*args):
self.add(*data)
def reset(self) -> None:
"""
Reset the buffer.
"""
self.pos = 0
self.full = False
def sample(self, batch_size: int, env: Optional[VecNormalize] = None):
"""
:param batch_size: Number of element to sample
:param env: associated gym VecEnv
to normalize the observations/rewards when sampling
:return:
"""
upper_bound = self.buffer_size if self.full else self.pos
batch_inds = np.random.randint(0, upper_bound, size=batch_size)
return self._get_samples(batch_inds, env=env)
@abstractmethod
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> Union[ReplayBufferSamples, RolloutBufferSamples]:
"""
:param batch_inds:
:param env:
:return:
"""
raise NotImplementedError()
def to_torch(self, array: np.ndarray, copy: bool = True) -> th.Tensor:
"""
Convert a numpy array to a PyTorch tensor.
Note: it copies the data by default
:param array:
:param copy: Whether to copy or not the data
(may be useful to avoid changing things be reference)
:return:
"""
if copy:
return th.tensor(array).to(self.device)
return th.as_tensor(array).to(self.device)
@staticmethod
def _normalize_obs(
obs: Union[np.ndarray, Dict[str, np.ndarray]], env: Optional[VecNormalize] = None
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
if env is not None:
return env.normalize_obs(obs)
return obs
@staticmethod
def _normalize_reward(reward: np.ndarray, env: Optional[VecNormalize] = None) -> np.ndarray:
if env is not None:
return env.normalize_reward(reward).astype(np.float32)
return reward
class ReplayBuffer(BaseBuffer):
"""
Replay buffer used in off-policy algorithms like SAC/TD3.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device:
:param n_envs: Number of parallel environments
:param optimize_memory_usage: Enable a memory efficient variant
of the replay buffer which reduces by almost a factor two the memory used,
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
n_envs: int = 1,
optimize_memory_usage: bool = False,
):
super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)
assert n_envs == 1, "Replay buffer only support single environment for now"
# Check that the replay buffer can fit into the memory
if psutil is not None:
mem_available = psutil.virtual_memory().available
self.optimize_memory_usage = optimize_memory_usage
self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)
if optimize_memory_usage:
# `observations` contains also the next observation
self.next_observations = None
else:
self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)
self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
if psutil is not None:
total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes
if self.next_observations is not None:
total_memory_usage += self.next_observations.nbytes
if total_memory_usage > mem_available:
# Convert to GB
total_memory_usage /= 1e9
mem_available /= 1e9
warnings.warn(
"This system does not have apparently enough memory to store the complete "
f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB"
)
def add(self, obs: np.ndarray, next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray) -> None:
# Copy to avoid modification by reference
self.observations[self.pos] = np.array(obs).copy()
if self.optimize_memory_usage:
self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy()
else:
self.next_observations[self.pos] = np.array(next_obs).copy()
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.dones[self.pos] = np.array(done).copy()
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
self.pos = 0
def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:
"""
Sample elements from the replay buffer.
Custom sampling when using memory efficient variant,
as we should not sample the element with index `self.pos`
See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274
:param batch_size: Number of element to sample
:param env: associated gym VecEnv
to normalize the observations/rewards when sampling
:return:
"""
if not self.optimize_memory_usage:
return super().sample(batch_size=batch_size, env=env)
# Do not sample the element with index `self.pos` as the transitions is invalid
# (we use only one array to store `obs` and `next_obs`)
if self.full:
batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size
else:
batch_inds = np.random.randint(0, self.pos, size=batch_size)
return self._get_samples(batch_inds, env=env)
def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:
if self.optimize_memory_usage:
next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env)
else:
next_obs = self._normalize_obs(self.next_observations[batch_inds, 0, :], env)
data = (
self._normalize_obs(self.observations[batch_inds, 0, :], env),
self.actions[batch_inds, 0, :],
next_obs,
self.dones[batch_inds],
self._normalize_reward(self.rewards[batch_inds], env),
)
return ReplayBufferSamples(*tuple(map(self.to_torch, data)))
class PrioritizedReplayBuffer(BaseBuffer):
"""
Replay buffer used in off-policy algorithms like SAC/TD3.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device:
:param n_envs: Number of parallel environments
:param optimize_memory_usage: Enable a memory efficient variant
of the replay buffer which reduces by almost a factor two the memory used,
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
n_envs: int = 1,
optimize_memory_usage: bool = False,
):
super(PrioritizedReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)
assert n_envs == 1, "Replay buffer only support single environment for now"
self.priorities1 = np.zeros(self.buffer_size)
self.max_prio1 = 1.0
self.priorities2 = np.zeros(self.buffer_size)
self.max_prio2 = 1.0
# Check that the replay buffer can fit into the memory
if psutil is not None:
mem_available = psutil.virtual_memory().available
self.optimize_memory_usage = optimize_memory_usage
self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)
if optimize_memory_usage:
# `observations` contains also the next observation
self.next_observations = None
else:
self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)
self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
if psutil is not None:
total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes
if self.next_observations is not None:
total_memory_usage += self.next_observations.nbytes
if total_memory_usage > mem_available:
# Convert to GB
total_memory_usage /= 1e9
mem_available /= 1e9
warnings.warn(
"This system does not have apparently enough memory to store the complete "
f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB"
)
def add(self, obs: np.ndarray, next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray) -> None:
# Copy to avoid modification by reference
self.observations[self.pos] = np.array(obs).copy()
if self.optimize_memory_usage:
self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy()
else:
self.next_observations[self.pos] = np.array(next_obs).copy()
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.dones[self.pos] = np.array(done).copy()
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
self.pos = 0
self.priorities1[self.pos] = self.max_prio1
self.priorities2[self.pos] = self.max_prio2
else:
self.priorities1[self.pos] = self.max_prio1
self.priorities2[self.pos] = self.max_prio2
def sample(self, batch_size: int, env: Optional[VecNormalize] = None):
"""
Sample elements from the replay buffer.
Custom sampling when using memory efficient variant,
as we should not sample the element with index `self.pos`
See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274
:param batch_size: Number of element to sample
:param env: associated gym VecEnv
to normalize the observations/rewards when sampling
:return:
"""
#if not self.optimize_memory_usage:
# print("ok!")
# return super().sample(batch_size=batch_size, env=env)
# Do not sample the element with index `self.pos` as the transitions is invalid
# (we use only one array to store `obs` and `next_obs`)
if self.full:
batch_inds0 = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size
not_pos = False
while not_pos == False:
probs1 = self.priorities1/np.sum(self.priorities1)
print(probs1.shape)
print(np.arange(self.buffer_size).shape)
batch_inds1 = np.random.choice(self.buffer_size, batch_size, p=probs1)
if self.pos not in batch_inds1:
not_pos = True
not_pos = False
while not_pos == False:
probs2 = self.priorities2/np.sum(self.priorities2)
batch_inds2 = np.random.choice(self.buffer_size, batch_size, p=probs2)
if self.pos not in batch_inds2:
not_pos = True
else:
batch_inds0 = np.random.randint(0, self.pos, size=batch_size)
probs1 = self.priorities1/np.sum(self.priorities1)
#print(probs1.shape)
#print(np.arange(self.buffer_size).shape)
batch_inds1 = np.random.choice(self.buffer_size, batch_size, p=probs1)
probs2 = self.priorities2/np.sum(self.priorities2)
batch_inds2 = np.random.choice(self.buffer_size, batch_size, p=probs2)
selected_probs1 = probs1[batch_inds1]
selected_probs2 = probs2[batch_inds2]
return (self._get_samples(batch_inds1, env=env), batch_inds1, selected_probs1, self._get_samples(batch_inds2, env=env), batch_inds2, selected_probs2, self._get_samples(batch_inds0, env=env))
def update_priorities1(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities1[idx] = abs(prio)
if abs(prio) > self.max_prio1:
self.max_prio1 = abs(prio)
def update_priorities2(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities2[idx] = abs(prio)
if abs(prio) > self.max_prio2:
self.max_prio2 = abs(prio)
def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:
if self.optimize_memory_usage:
next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env)
else:
next_obs = self._normalize_obs(self.next_observations[batch_inds, 0, :], env)
data = (
self._normalize_obs(self.observations[batch_inds, 0, :], env),
self.actions[batch_inds, 0, :],
next_obs,
self.dones[batch_inds],
self._normalize_reward(self.rewards[batch_inds], env),
)
return ReplayBufferSamples(*tuple(map(self.to_torch, data)))
class RolloutBuffer(BaseBuffer):
"""
Rollout buffer used in on-policy algorithms like A2C/PPO.
It corresponds to ``buffer_size`` transitions collected
using the current policy.
This experience will be discarded after the policy update.
In order to use PPO objective, we also store the current value of each state
and the log probability of each taken action.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
Hence, it is only involved in policy and value function training but not action selection.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device:
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)
self.gae_lambda = gae_lambda
self.gamma = gamma
self.observations, self.actions, self.rewards, self.advantages = None, None, None, None
self.returns, self.dones, self.values, self.log_probs = None, None, None, None
self.generator_ready = False
self.reset()
def reset(self) -> None:
self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32)
self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.generator_ready = False
super(RolloutBuffer, self).reset()
def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None:
"""
Post-processing step: compute the returns (sum of discounted rewards)
and GAE advantage.
Adapted from Stable-Baselines PPO2.
Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)
to compute the advantage. To obtain vanilla advantage (A(s) = R - V(S))
where R is the discounted reward with value bootstrap,
set ``gae_lambda=1.0`` during initialization.
:param last_values:
:param dones:
"""
# convert to numpy
last_values = last_values.clone().cpu().numpy().flatten()
last_gae_lam = 0
for step in reversed(range(self.buffer_size)):
if step == self.buffer_size - 1:
next_non_terminal = 1.0 - dones
next_values = last_values
else:
next_non_terminal = 1.0 - self.dones[step + 1]
next_values = self.values[step + 1]
delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step]
last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam
self.advantages[step] = last_gae_lam
self.returns = self.advantages + self.values
def add(
self, obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray, value: th.Tensor, log_prob: th.Tensor
) -> None:
"""
:param obs: Observation
:param action: Action
:param reward:
:param done: End of episode signal.
:param value: estimated value of the current state
following the current policy.
:param log_prob: log probability of the action
following the current policy.
"""
if len(log_prob.shape) == 0:
# Reshape 0-d tensor to avoid error
log_prob = log_prob.reshape(-1, 1)
# Reshape needed when using multiple envs with discrete observations
# as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)
if isinstance(self.observation_space, spaces.Discrete):
obs = obs.reshape((self.n_envs,) + self.obs_shape)
self.observations[self.pos] = np.array(obs).copy()
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.dones[self.pos] = np.array(done).copy()
self.values[self.pos] = value.clone().cpu().numpy().flatten()
self.log_probs[self.pos] = log_prob.clone().cpu().numpy()
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
def get(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSamples, None, None]:
assert self.full, ""
indices = np.random.permutation(self.buffer_size * self.n_envs)
# Prepare the data
if not self.generator_ready:
for tensor in ["observations", "actions", "values", "log_probs", "advantages", "returns"]:
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
self.generator_ready = True
# Return everything, don't create minibatches
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
yield self._get_samples(indices[start_idx : start_idx + batch_size])
start_idx += batch_size
def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> RolloutBufferSamples:
data = (
self.observations[batch_inds],
self.actions[batch_inds],
self.values[batch_inds].flatten(),
self.log_probs[batch_inds].flatten(),
self.advantages[batch_inds].flatten(),
self.returns[batch_inds].flatten(),
)
return RolloutBufferSamples(*tuple(map(self.to_torch, data)))
|
py | b40c0afb6b53a17e6162ed8306e5b30b3c500b0d | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains some utility functions"""
import tensorflow as tf
import time
import os
from tensorflow.python.client import device_lib
FLAGS = tf.app.flags.FLAGS
def get_available_gpus():
gpus_list_file = os.path.join(os.getcwd(), "gpus.txt")
lines = open(os.path.realpath(gpus_list_file), "r").readlines()
gpus = [ str(line) for line in lines if not str(line).startswith("#") ]
if( gpus[0] == "all"):
local_device_protos = device_lib.list_local_devices()
gpus = [str(x.name) for x in local_device_protos if x.device_type == 'GPU']
return gpus if len(gpus) > 0 else ['/gpu:0']
def get_config():
"""Returns config for tf.session"""
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
return config
def load_ckpt(saver, sess, ckpt_dir="train"):
"""Load checkpoint from the ckpt_dir (if unspecified, this is train dir) and restore it to saver and sess, waiting 10 secs in the case of failure. Also returns checkpoint name."""
while True:
try:
latest_filename = "checkpoint_best" if ckpt_dir=="eval" else None
ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)
ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
return ckpt_state.model_checkpoint_path
except:
tf.logging.info("Failed to load checkpoint from %s. Sleeping for %i secs...", ckpt_dir, 10)
time.sleep(10)
|
py | b40c0c20ee9baa70c489f2bfcea5decddb8c5b3e | import numpy as np
import chainer
from chainer import cuda, Function, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers, reporter
from chainer.dataset import convert
from chainer.dataset import iterator as iterator_module
from chainer.datasets import tuple_dataset
from chainer import Link, Chain, ChainList
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
import os.path
# Check version
# Python 2.7.12 on win32 (Windows version)
# numpy (1.14.0)
# chainer (1.20.0.1)
class SimpleCNN2(Chain):
def __init__(self,num_filter, size_filter, stride0=1):
super(SimpleCNN2, self).__init__(
conv1 = L.Convolution2D(1, num_filter, size_filter, stride=stride0),
conv2 = L.Convolution2D(num_filter, num_filter * 2, size_filter, stride=stride0),
l1 = L.Linear(5408, 10),
)
self.train = True
#print ('num_filter', num_filter)
#print ('size_filter', size_filter)
def __call__(self, x):
h1 = F.relu(self.conv1(x))
#print ('h1.shape ', h1.data.shape)
h2 = F.relu(self.conv2(h1))
#print ('h2.shape ', h2.data.shape)
h3 = self.l1(h2)
return h3
def get_dataset(IN_DIR='DataSet', train_ratio=9):
# load data set and convert to tuple in this.py
train_data = np.load(os.path.join(IN_DIR,'train_data.npy'))
train_label = np.load(os.path.join(IN_DIR,'train_label.npy'))
#print ( train_data.shape[0])
# dvide train and test per the ratio
threshold = np.int32(train_data.shape[0]/10*train_ratio)
train = tuple_dataset.TupleDataset(train_data[0:threshold], train_label[0:threshold])
test = tuple_dataset.TupleDataset(train_data[threshold:], train_label[threshold:])
return train, test
if __name__=='__main__':
model = L.Classifier(SimpleCNN2(num_filter=16, size_filter=6,stride0=2))
train,test=get_dataset()
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
train_iter = chainer.iterators.SerialIterator(train, batch_size=30)
test_iter = chainer.iterators.SerialIterator(test, batch_size=30, repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=-1)
### output dir "logs"
OUT_DIR='result'
trainer = training.Trainer(updater, (10, 'epoch'), out=OUT_DIR)
trainer.extend(extensions.Evaluator(test_iter, model, device=-1))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], x_key='epoch', file_name='loss.png'))
trainer.extend(extensions.PlotReport(['main/accuracy','validation/main/accuracy'], x_key='epoch', file_name='accuracy.png'))
# output result as save_npz
trainer.extend(extensions.snapshot())
trainer.run()
# This file uses TAB
|
py | b40c0c94e6ddaf80892a69fa9d6c2b057c3614dd | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import REMAINDER, ArgumentParser, Namespace
from typing import List, Optional
from idb.cli import ClientCommand
from idb.common.signal import signal_handler_event
from idb.common.types import Client
class LogCommand(ClientCommand):
@property
def description(self) -> str:
return "Obtain logs from the target"
@property
def name(self) -> str:
return "log"
def add_parser_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"log_arguments",
help="""\
Example: idb log -- --style json
Possible arguments:
--system | --process (pid|process) | --parent (pid|process) ]
[ --level default|info|debug][ --predicate <predicate> ]
[ --source ][ --style (syslog|json) ]
[ --timeout <num>[m|h|d] ][ --type activity|log|trace ]
Examples:
log stream --level=info
log stream --predicate examples:
--predicate 'eventMessage contains "my message"'
--predicate 'eventType == logEvent and messageType == info'
--predicate 'processImagePath endswith "d"'
--predicate 'not processImagePath contains[c] "some spammer"'
--predicate 'processID < 100'
--predicate 'senderImagePath beginswith "my sender"'""",
default=[],
nargs=REMAINDER,
)
super().add_parser_arguments(parser)
async def run_with_client(self, args: Namespace, client: Client) -> None:
async for chunk in client.tail_logs(
stop=signal_handler_event("log"),
arguments=self.normalise_log_arguments(args.log_arguments),
):
print(chunk, end="")
print("")
def normalise_log_arguments(
self, log_arguments: Optional[List[str]]
) -> Optional[List[str]]:
if log_arguments is None:
return None
if len(log_arguments) > 0 and log_arguments[0] == "--":
log_arguments = log_arguments[1:]
return log_arguments
class CompanionLogCommand(ClientCommand):
@property
def description(self) -> str:
return "Obtain logs from the companion"
@property
def name(self) -> str:
return "log"
async def run_with_client(self, args: Namespace, client: Client) -> None:
async for chunk in client.tail_companion_logs(stop=signal_handler_event("log")):
print(chunk, end="")
print("")
|
py | b40c0db703cc0cf7e4f6f8fa3915a534c6602fb3 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def test_config_file_parser(self):
# Assume node is stopped
inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf')
with open(os.path.join(self.nodes[0].datadir, 'litecoinz.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file_path))
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli',
extra_args=['-dash_cli=1'],
)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('dash_conf=1\n')
with self.nodes[0].assert_debug_log(expected_msgs=['Ignoring unknown configuration value dash_conf']):
self.start_node(0)
self.stop_node(0)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('-dash=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
if self.is_wallet_compiled():
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
conf.write("wallet=foo\n")
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on regtest network when in [regtest] section.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('regtest=0\n') # mainnet
conf.write('acceptnonstdtxn=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nmain.rpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\n[main]\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 4, using # in rpcpassword can be ambiguous and should be avoided')
inc_conf_file2_path = os.path.join(self.nodes[0].datadir, 'include2.conf')
with open(os.path.join(self.nodes[0].datadir, 'litecoinz.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file2_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('testnot.datadir=1\n')
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('[testnet]\n')
self.restart_node(0)
self.nodes[0].stop_node(expected_stderr='Warning: ' + inc_conf_file_path + ':1 Section [testnot] is not recognized.' + os.linesep + 'Warning: ' + inc_conf_file2_path + ':1 Section [testnet] is not recognized.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
def test_log_buffer(self):
with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -connect=0\n']):
self.start_node(0, extra_args=['-noconnect=0'])
self.stop_node(0)
def run_test(self):
self.stop_node(0)
self.test_log_buffer()
self.test_config_file_parser()
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = self.nodes[0].datadir
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.nodes[0].assert_start_raises_init_error(['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "litecoinz.conf")
# datadir needs to be set before [regtest] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
f.write(conf_file_contents)
self.nodes[0].assert_start_raises_init_error(['-conf=' + conf_file], 'Error: Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'blocks'))
if self.is_wallet_compiled():
assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'blocks'))
if self.is_wallet_compiled():
assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
|
py | b40c0df8c91a0f52b412b094d70135e7dbeb74eb | from BlockDevice import BlockDevice
import os.path
import os
class PartBlockDevice(BlockDevice):
def __init__(self, raw_blkdev, part_blk, auto_close=False):
self.raw_blkdev = raw_blkdev
self.part_blk = part_blk
self.blk_off = 0
self.auto_close = auto_close
def open(self):
# extract geometry from partition block
dos_env = self.part_blk.dos_env
lo_cyl = dos_env.low_cyl
hi_cyl = dos_env.high_cyl
cyls = hi_cyl - lo_cyl + 1
heads = dos_env.surfaces
secs = dos_env.blk_per_trk
block_bytes = dos_env.block_size * 4
reserved = dos_env.reserved
boot_blocks = dos_env.boot_blocks
if boot_blocks == 0:
boot_blocks = 2
self._set_geometry(cyls, heads, secs, block_bytes, reserved, boot_blocks)
# calc block offset of partition
self.blk_off = heads * secs * lo_cyl
return True
def flush(self):
self.raw_blkdev.flush()
def close(self):
# auto close containing rdisk
if self.auto_close:
self.raw_blkdev.close()
def read_block(self, blk_num):
if blk_num >= self.num_blocks:
raise ValueError("Invalid Part block num: got %d but max is %d" % (blk_num, self.num_blocks))
return self.raw_blkdev.read_block(self.blk_off + blk_num)
def write_block(self, blk_num, data):
if blk_num >= self.num_blocks:
raise ValueError("Invalid Part block num: got %d but max is %d" % (blk_num, self.num_blocks))
if len(data) != self.block_bytes:
raise ValueError("Invalid Part block size written: got %d but size is %d" % (len(data), self.block_bytes))
self.raw_blkdev.write_block(self.blk_off + blk_num, data)
|
py | b40c0ead5c221e4b3c98dc0a7f268049b5a224b3 | """
Generate base anchors on index 0
"""
from __future__ import print_function
import sys
import numpy as np
def anchors_plane(height, width, stride, base_anchors):
"""
Parameters
----------
height: height of plane
width: width of plane
stride: stride ot the original image
anchors_base: (A, 4) a base set of anchors
Returns
-------
all_anchors: (height, width, A, 4) ndarray of anchors spreading over the plane
"""
A = base_anchors.shape[0]
all_anchors = np.zeros((height, width, A, 4), dtype=int)
iw = 0
ih = 0
k = 0
sh = 0
sw = 0
for iw in range(width):
sw = iw * stride
for ih in range(height):
sh = ih * stride
for k in range(A):
all_anchors[ih, iw, k, 0] = base_anchors[k, 0] + sw
all_anchors[ih, iw, k, 1] = base_anchors[k, 1] + sh
all_anchors[ih, iw, k, 2] = base_anchors[k, 2] + sw
all_anchors[ih, iw, k, 3] = base_anchors[k, 3] + sh
return all_anchors
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2 ** np.arange(3, 6), stride=16, dense_anchor=False):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
if dense_anchor:
assert stride%2==0
anchors2 = anchors.copy()
anchors2[:,:] += int(stride/2)
anchors = np.vstack( (anchors, anchors2) )
#print('GA',base_anchor.shape, ratio_anchors.shape, anchors.shape)
return anchors
def generate_anchors_fpn(cfg, dense_anchor=False):
#assert(False)
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
RPN_FEAT_STRIDE = []
for k in cfg:
RPN_FEAT_STRIDE.append( int(k) )
RPN_FEAT_STRIDE = sorted(RPN_FEAT_STRIDE, reverse=True)
anchors = []
for k in RPN_FEAT_STRIDE:
v = cfg[str(k)]
bs = v['BASE_SIZE']
__ratios = np.array(v['RATIOS'])
__scales = np.array(v['SCALES'])
stride = int(k)
#print('anchors_fpn', bs, __ratios, __scales, file=sys.stderr)
r = generate_anchors(bs, __ratios, __scales, stride, dense_anchor)
#print('anchors_fpn', r.shape, file=sys.stderr)
anchors.append(r)
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
|
py | b40c0eb2dc2202c5217ce6382e5450a493f0f648 | from django.apps import apps
from django.db import connection
from django.db.models.signals import post_save, pre_delete
from django_rq import job
import django_rq
from haystack.signals import BaseSignalProcessor
from cache_tools.jobs import auto_invalidate_cache, get_stale_objects
from .search_indexes import get_haystack_index
def auto_update_haystack(action, instance):
for obj in get_stale_objects(instance, all_relations=True):
model = obj.__class__
index = get_haystack_index(model)
if index is None:
continue
if action == 'delete':
index.remove_object(obj)
else:
index.update_object(obj)
@job
def auto_invalidate(action, app_label, model_name, pk):
model = apps.get_model(app_label, model_name)
if action == 'delete':
# Quand un objet est supprimé, la seule chose à faire est de supprimer
# l'entrée du moteur de recherche. En effet, aucun autre objet ne
# de devrait être impacté car on a protégé les FK pour éviter
# les suppressions en cascade.
# WARNING: À surveiller tout de même.
index = get_haystack_index(model)
if index is not None:
index.remove_object('%s.%s.%s' % (app_label, model_name, pk))
return
instance = model._default_manager.get(pk=pk)
auto_invalidate_cache(instance)
auto_update_haystack(action, instance)
class AutoInvalidatorSignalProcessor(BaseSignalProcessor):
def setup(self):
post_save.connect(self.enqueue_save)
pre_delete.connect(self.enqueue_delete)
def teardown(self):
post_save.disconnect(self.enqueue_save)
pre_delete.disconnect(self.enqueue_delete)
def enqueue_save(self, sender, instance, created, **kwargs):
def inner():
if created:
return self.enqueue('create', instance, sender, **kwargs)
return self.enqueue('save', instance, sender, **kwargs)
return connection.on_commit(inner)
def enqueue_delete(self, sender, instance, **kwargs):
return self.enqueue('delete', instance, sender, **kwargs)
def enqueue(self, action, instance, sender, **kwargs):
if sender._meta.label in ('admin.LogEntry', 'sessions.Session',
'reversion.Revision', 'reversion.Version'):
return
django_rq.enqueue(
auto_invalidate,
args=(action,
instance._meta.app_label, instance._meta.model_name,
instance.pk),
result_ttl=0, # Doesn't store result
timeout=3600, # Avoids never-ending jobs
)
|
py | b40c0f3868d13c415a030ce5da9ba4ada7c5c4ec | from Objects.Item import Item
from Objects.Projectile import Projectile
from Engine.World import CreateObject, Object
class Taser(Item):
defName = "Taser"
defSprite = "taser"
def InteractWith(self, object):
if object is None:
return False
if not object.position or object.position == self.position:
return False
self.CreateProjectile(object.position - self.position)
return True
def ClickTile(self, tile):
if tile is None:
return False
if not tile.pos:
return False
self.CreateProjectile(tile.pos.xy() - self.position)
return True
def CreateProjectile(self, direction):
projectile = CreateObject("Objects.Projectile.Projectile", self.tile)
projectile.SetShotDirection(direction)
|
py | b40c12136bbe4725b66feb2ed867437b3a16c8e9 | from collections import defaultdict
import json
import os
import logging
logger = logging.getLogger('LoRaPark-rules')
class Rules:
_rules = dict()
def __init__(self, config):
self._config = config
logger.info('Loading rules...')
for file in os.listdir(config['rules_directory']):
if file.endswith('.json'):
with open(os.path.join(config['rules_directory'], file)) as json_file:
rule = json.load(json_file)
id = file.removesuffix('.json')
rule['id'] = id
self._rules[id] = rule
logger.info('Done.')
def get_ids(self):
return list(self._rules.keys())
def get_rules(self, ids=None):
if ids == None:
ids = self.get_ids()
return [self.get_rule(id) for id in ids]
def get_rule(self, id):
if not id in self._rules:
return None
return self._rules[id]
def get_rule_description(self, id):
rule = self.get_rule(id)
if not rule:
return None
return {k: rule[k] for k in rule if k in ['id', 'name', 'description']}
def get_rules_description(self, ids=None):
if ids == None:
ids = self.get_ids()
return [self.get_rule_description(id) for id in ids]
|
py | b40c13e2b185a197d24615cfbe6b3023e7391704 | import pymel.core as pm
import unittest
from luna import static
from luna.test import TestCase
import luna_rig
class CharacterTests(TestCase):
def setUp(self):
pm.newFile(f=1)
def tearDown(self):
super(CharacterTests, self).tearDown()
pm.newFile(f=1)
def test_create_default(self):
instance = luna_rig.components.Character.create()
# Assertions
# Metanode attributes
self.assertTrue(pm.hasAttr(instance.pynode, "rootCtl"))
self.assertTrue(pm.hasAttr(instance.pynode, "characterName"))
self.assertTrue(pm.hasAttr(instance.pynode, "rootCtl"))
self.assertTrue(pm.hasAttr(instance.pynode, "controlRig"))
self.assertTrue(pm.hasAttr(instance.pynode, "deformationRig"))
self.assertTrue(pm.hasAttr(instance.pynode, "geometryGroup"))
self.assertTrue(pm.hasAttr(instance.pynode, "locatorsGroup"))
self.assertTrue(pm.hasAttr(instance.pynode, "worldLocator"))
# Instance members
self.assertEqual(instance.tag, "character")
self.assertEqual(str(instance.control_rig), static.CharacterMembers.control_rig.value)
self.assertEqual(str(instance.deformation_rig), static.CharacterMembers.deformation_rig.value)
self.assertEqual(str(instance.geometry_grp), static.CharacterMembers.geometry.value)
self.assertEqual(str(instance.locators_grp), static.CharacterMembers.locators.value)
self.assertTrue(pm.objExists(instance.world_locator))
# Save test scene
pm.renameFile(self.get_temp_filename("character_component_test_create_default.ma"))
pm.saveFile(f=1)
def test_intance_from_meta(self):
new_character = luna_rig.components.Character.create()
instance = luna_rig.components.Character(new_character.pynode.name())
# Assertions
# Meta node attributes
self.assertTrue(pm.hasAttr(instance.pynode, "rootCtl"))
self.assertTrue(pm.hasAttr(instance.pynode, "characterName"))
self.assertTrue(pm.hasAttr(instance.pynode, "rootCtl"))
self.assertTrue(pm.hasAttr(instance.pynode, "controlRig"))
self.assertTrue(pm.hasAttr(instance.pynode, "deformationRig"))
self.assertTrue(pm.hasAttr(instance.pynode, "geometryGroup"))
self.assertTrue(pm.hasAttr(instance.pynode, "locatorsGroup"))
self.assertTrue(pm.hasAttr(instance.pynode, "worldLocator"))
# Main groups
self.assertEqual(str(instance.control_rig), static.CharacterMembers.control_rig.value)
self.assertEqual(str(instance.deformation_rig), static.CharacterMembers.deformation_rig.value)
self.assertEqual(str(instance.geometry_grp), static.CharacterMembers.geometry.value)
self.assertEqual(str(instance.locators_grp), static.CharacterMembers.locators.value)
self.assertTrue(pm.objExists(instance.world_locator))
# Data struct
self.assertEqual(instance.side, "char")
self.assertEqual(instance.name, "character")
self.assertEqual(instance.tag, "character")
# Save test scene
pm.renameFile(self.get_temp_filename("character_component_test_instance_from_meta.ma"))
pm.saveFile(f=1)
if __name__ == "__main__":
unittest.main(exit=False)
|
py | b40c151d1849e417d1a0d197923225e1394b01ef | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Ovito(Package):
"""OVITO is a scientific visualization and analysis software for atomistic
and particle simulation data. It helps scientists gain better insights into
materials phenomena and physical processes. OVITO Basic is freely available
for all major platforms under an open source license. It has served in a
growing number of computational simulation studies as a powerful tool to
analyze, understand and illustrate simulation results."""
homepage = "https://www.ovito.org"
url = "https://www.ovito.org/download/master/ovito-basic-3.6.0-x86_64.tar.xz"
version('3.6.0', '6ac43a3a39b1ec3cccab577602756a8b7010cc1f1f046c4f6a939590d12f0339')
def install(self, spec, prefix):
# Once we've unpacked the tarball, copy it's contents to the prefix
copy_tree('.', prefix)
|
py | b40c173c96aee151966dccb45ad06d09a181ad5d | # Authors: Soledad Galli <[email protected]>
# License: BSD 3 clause
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from feature_engine.variable_manipulation import _define_variables
from feature_engine.base_transformers import BaseNumericalTransformer
class DecisionTreeDiscretiser(BaseNumericalTransformer):
"""
The DecisionTreeDiscretiser() divides continuous numerical variables into discrete,
finite, values estimated by a decision tree.
The methods is inspired by the following article from the winners of the KDD
2009 competition:
http://www.mtome.com/Publications/CiML/CiML-v3-book.pdf
At the moment, this transformer only works for binary classification or
regression. Multi-class classification is not supported.
The DecisionTreeDiscretiser() works only with numerical variables.
A list of variables can be passed as an argument. Alternatively, the
discretiser will automatically select all numerical variables.
The DecisionTreeDiscretiser() first trains a decision tree for each variable,
fit.
The DecisionTreeDiscretiser() then transforms the variables, that is,
makes predictions based on the variable values, using the trained decision
tree, transform.
Parameters
----------
cv : int, default=3
Desired number of cross-validation fold to be used to fit the decision
tree.
scoring: str, default='neg_mean_squared_error'
Desired metric to optimise the performance for the tree. Comes from
sklearn metrics. See DecisionTreeRegressor or DecisionTreeClassifier
model evaluation documentation for more options:
https://scikit-learn.org/stable/modules/model_evaluation.html
variables : list
The list of numerical variables that will be transformed. If None, the
discretiser will automatically select all numerical type variables.
regression : boolean, default=True
Indicates whether the discretiser should train a regression or a classification
decision tree.
param_grid : dictionary, default=None
The list of parameters over which the decision tree should be optimised
during the grid search. The param_grid can contain any of the permitted
parameters for Scikit-learn's DecisionTreeRegressor() or
DecisionTreeClassifier().
If None, then param_grid = {'max_depth': [1, 2, 3, 4]}
random_state : int, default=None
The random_state to initialise the training of the decision tree. It is one
of the parameters of the Scikit-learn's DecisionTreeRegressor() or
DecisionTreeClassifier(). For reproducibility it is recommended to set
the random_state to an integer.
"""
def __init__(
self,
cv=3,
scoring="neg_mean_squared_error",
variables=None,
param_grid=None,
regression=True,
random_state=None,
):
if param_grid is None:
param_grid = {"max_depth": [1, 2, 3, 4]}
if not isinstance(cv, int) or cv < 0:
raise ValueError("cv can only take only positive integers")
if not isinstance(regression, bool):
raise ValueError("regression can only take True or False")
self.cv = cv
self.scoring = scoring
self.regression = regression
self.variables = _define_variables(variables)
self.param_grid = param_grid
self.random_state = random_state
def fit(self, X, y):
"""
Fits the decision trees. One tree per variable to be transformed.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just the variables to transform.
y : pandas series.
Target variable. Required to train the decision tree.
Attributes
----------
binner_dict_: dictionary
The dictionary containing the {variable: fitted tree} pairs.
scores_dict_ : dictionary
The score of the best decision tree, over the train set.
Provided in case the user wishes to understand the performance of the
decision tree.
"""
# check input dataframe
X = super().fit(X, y)
self.binner_dict_ = {}
self.scores_dict_ = {}
for var in self.variables:
if self.regression:
model = DecisionTreeRegressor(random_state=self.random_state)
else:
model = DecisionTreeClassifier(random_state=self.random_state)
tree_model = GridSearchCV(
model, cv=self.cv, scoring=self.scoring, param_grid=self.param_grid
)
# fit the model to the variable
tree_model.fit(X[var].to_frame(), y)
self.binner_dict_[var] = tree_model
self.scores_dict_[var] = tree_model.score(X[var].to_frame(), y)
self.input_shape_ = X.shape
return self
def transform(self, X):
"""
Returns the predictions of the tree, based of the variable original
values. The tree outcome is finite, aka, discrete.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The input samples.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features]
The dataframe with transformed variables.
"""
# check input dataframe and if class was fitted
X = super().transform(X)
for feature in self.variables:
if self.regression:
X[feature] = self.binner_dict_[feature].predict(X[feature].to_frame())
else:
tmp = self.binner_dict_[feature].predict_proba(X[feature].to_frame())
X[feature] = tmp[:, 1]
return X
|
py | b40c17d093a863c6056e226d5d31141ef2f3a963 | import datetime
from typing import Dict, Optional
def handle_result(
created_on: datetime.datetime,
category: str,
test_suite: str,
test_name: str,
status: str,
results: Dict,
artifacts: Dict,
last_logs: str,
team: str,
) -> Optional[str]:
assert test_suite == "xgboost_tests"
time_taken = results.get("time_taken", float("inf"))
num_terminated = results.get("trial_states", {}).get("TERMINATED", 0)
if test_name in ["distributed_api_test", "ft_small_elastic", "ft_small_nonelastic"]:
if not status == "finished":
return f"Test script did not finish successfully ({status})."
return None
elif test_name.startswith("tune_"):
msg = ""
if test_name == "tune_small":
target_terminated = 4
target_time = 90
elif test_name == "tune_4x32":
target_terminated = 4
target_time = 120
elif test_name == "tune_32x4":
target_terminated = 32
target_time = 600
else:
return None
if num_terminated < target_terminated:
msg += (
f"Some trials failed "
f"(num_terminated={num_terminated} < {target_terminated}). "
)
if time_taken > target_time:
msg += (
f"Took too long to complete "
f"(time_taken={time_taken} > {target_time}). "
)
return msg or None
else:
# train scripts
if test_name == "train_small":
# Leave a couple of seconds for ray connect setup
# (without connect it should finish in < 30)
target_time = 45
elif test_name == "train_moderate":
target_time = 60
elif test_name == "train_gpu":
target_time = 40
else:
return None
if time_taken > target_time:
return (
f"Took too long to complete "
f"(time_taken={time_taken:.2f} > {target_time}). "
)
return None
|
py | b40c19df5b2707e95a784e1ca7255ed71fcea1e4 | import copy
import datetime
import unittest
from unittest import mock
from unittest.mock import Mock, MagicMock
from freezegun import freeze_time
from airflow.models import TaskInstance
from airflow.models import Connection
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.state import State
from sentry_sdk import configure_scope
from sentry_airflow.hooks.sentry_hook import (
SentryHook,
get_task_instances,
add_tagging,
add_breadcrumbs,
get_dsn,
)
EXECUTION_DATE = timezone.utcnow()
DAG_ID = "test_dag"
TASK_ID = "test_task"
OPERATOR = "test_operator"
RUN_ID = "example-run-id"
STATE = State.SUCCESS
DURATION = None
TEST_SCOPE = {
"dag_id": DAG_ID,
"task_id": TASK_ID,
"execution_date": EXECUTION_DATE,
"operator": OPERATOR,
}
TASK_DATA = TEST_SCOPE.copy()
TASK_DATA.update({"state": STATE, "operator": OPERATOR, "duration": DURATION})
CRUMB_DATE = datetime.datetime(2019, 5, 15)
CRUMB = {
"timestamp": CRUMB_DATE,
"type": "default",
"category": "completed_tasks",
"data": TASK_DATA,
"level": "info",
}
class MockQuery:
"""
Mock Query for when session is called.
"""
def __init__(self, task_instance):
task_instance.state = STATE
self.arr = [task_instance]
def filter(self, *args, **kwargs):
return self
def all(self):
return self.arr
def first(self):
return self.arr[0]
def delete(self):
pass
# TODO: Update to use pytest fixtures
class TestSentryHook(unittest.TestCase):
@mock.patch("sentry_airflow.hooks.sentry_hook.SentryHook.get_connection")
def setUp(self, mock_get_connection):
self.assertEqual(TaskInstance._sentry_integration_, True)
mock_get_connection.return_value = Connection(host="https://[email protected]/123")
self.sentry_hook = SentryHook("sentry_default")
self.assertEqual(TaskInstance._sentry_integration_, True)
self.dag = Mock(dag_id=DAG_ID)
self.dag.task_ids = [TASK_ID]
self.task = Mock(dag=self.dag, dag_id=DAG_ID, task_id=TASK_ID)
self.task.__class__.__name__ = OPERATOR
self.session = Session()
self.ti = TaskInstance(self.task, execution_date=EXECUTION_DATE)
self.ti.operator = OPERATOR
self.session.query = MagicMock(return_value=MockQuery(self.ti))
def test_add_tags(self):
"""
Test adding tags.
"""
add_tagging(self.ti, run_id=RUN_ID)
with configure_scope() as scope:
for key, value in scope._tags.items():
if key is "executor":
self.assertEqual(value, "SequentialExecutor")
elif key is "run_id":
self.assertEqual(value, RUN_ID)
else:
self.assertEqual(TEST_SCOPE[key], value)
def test_get_task_instances(self):
"""
Test getting instances that have already completed.
"""
ti = get_task_instances(DAG_ID, [TASK_ID], EXECUTION_DATE, self.session)
self.assertEqual(ti[0], self.ti)
@freeze_time(CRUMB_DATE.isoformat())
def test_add_breadcrumbs(self):
"""
Test adding breadcrumbs.
"""
add_breadcrumbs(self.ti, self.session)
with configure_scope() as scope:
test_crumb = scope._breadcrumbs.pop()
self.assertEqual(CRUMB, test_crumb)
def test_get_dsn_host(self):
"""
Test getting dsn just from host
"""
conn = Connection(host="https://[email protected]/123")
dsn = get_dsn(conn)
self.assertEqual(dsn, "https://[email protected]/123")
def test_get_dsn_env_var(self):
"""
Test getting dsn from host, conn_type, login and schema
"""
conn = Connection(
conn_type="http", login="bar", host="getsentry.io", schema="987"
)
dsn = get_dsn(conn)
self.assertEqual(dsn, "http://[email protected]/987")
def test_get_dsn_from_host_with_none(self):
"""
Test getting dsn from host if other parameters are None
"""
conn = Connection(
conn_type="http", login=None, host="https://[email protected]/123"
)
dsn = get_dsn(conn)
self.assertEqual(dsn, "https://[email protected]/123")
|
py | b40c1ac4d302b2063c07b44e43dca21aed22c531 | from functools import wraps
def exception_resistant(func):
num_fails = 0
max_fails = 6
@wraps(func)
def wrapper(*args, **kwargs):
nonlocal num_fails
func_name = func.__name__
try:
return func(*args, **kwargs)
except Exception as e:
num_fails += 1
if num_fails == 1:
print(('Something went wrong in `{}`. ' +
'The process will continue to ' +
'execute.').format(func_name))
if num_fails <= max_fails:
print('`{}`: {}'.format(func_name, e))
elif num_fails == max_fails + 1:
print(('The rest of the `{}` errors ' +
'are hidden.').format(func_name))
return wrapper
|
py | b40c1b502ebedec37cb317db228aa1da88e2c776 | #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import sys
from airbyte_cdk.entrypoint import launch
from source_file import SourceFile
if __name__ == "__main__":
source = SourceFile()
launch(source, sys.argv[1:])
|
py | b40c1c164a052154aff730f1abd73f8c3c082214 | from multiprocessing import Pool
import time
COUNT = 50000000
def countdown(n):
while n>0:
n -= 1
if __name__ == '__main__':
pool = Pool(processes=2)
start = time.time()
r1 = pool.apply_async(countdown, [COUNT//2])
r2 = pool.apply_async(countdown, [COUNT//2])
pool.close()
pool.join()
end = time.time()
print('Time taken in seconds -', end - start)
# output = 'Time taken in seconds - ' 1.8036038875579834 |
py | b40c1d3bf94079d0093ced566458026d28b477ba | from django.contrib import admin
from .base_admin import BaseAttachmentAdmin
from .models import Attachment
class AttachmentAdmin(BaseAttachmentAdmin):
list_display = ('id', 'file', 'name', 'download_link', 'download_key')
admin.site.register(Attachment, AttachmentAdmin)
|
py | b40c1d600ad797037c275aab02b8c35ec1bf77cf | def entity_data_cleanse(entity: str, type: str, term: str, ):
"""
Ignores twitter handles, quantity, date, original search term, links
"""
return "@" not in entity and \
type != "QUANTITY" and \
type != "DATE" and \
entity != term.lower() and \
"http:" not in entity and \
"https:" not in entity
|
py | b40c1d7da44111860df2a8c86b12fa9325373318 | # Generated by Django 4.0.2 on 2022-02-10 08:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=100)),
('balance', models.IntegerField()),
],
),
]
|
py | b40c1daf8bb57831668ff6f18130452e2025bf4d | __author__ = 'admin'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.navigation.open_home_page()
# login
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
def safe_login(self, username, password):
# logout
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as_user(username):
return
else:
self.logout()
self.login(username, password)
def logout(self):
# logout
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def safe_logout(self):
# logout
wd = self.app.wd
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as_user(self, username):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text == "("+ username +")"
|
py | b40c1dd2ce522c2f2497e93d9095e9beefcec550 |
class ListSecure(list):
def get(self, index, default=None):
try:
return self.__getitem__(index)
except IndexError:
return default
|
py | b40c1f867009f65f2f54e02d23ab959f2094c4a1 | """
Description: Find peptide which matches across the different conditions
"""
# Import packages
import pandas as pd
from tqdm import tqdm
def find_matching_hits(hits37: pd.DataFrame, hits42: pd.DataFrame, hits42zn: pd.DataFrame, output: str) -> pd.DataFrame:
hit_list: pd.DataFrame = pd.DataFrame(columns=['Sequence', 'Modifications', 'Start', 'End', 'Charge',
'14N m/z', '15N m/z',
'14N Mass (Exp)', '15N Mass (Exp)'])
for _, hit37 in tqdm(hits37.iterrows(), total=hits37.shape[0]):
for _, hit42 in hits42.iterrows():
if hit37['Sequence'] == hit42['Sequence'] and \
hit37['Modifications'] == hit42['Modifications'] and \
hit37['Start'] == hit42['Start']:
for _, hit42zn in hits42zn.iterrows():
if hit37['Sequence'] == hit42['Sequence'] == hit42zn['Sequence'] and \
hit37['Modifications'] == hit42['Modifications'] == hit42zn['Modifications'] and \
hit37['Start'] == hit42['Start'] == hit42zn['Start']:
hit_list = hit_list.append(pd.DataFrame([[hit37['Sequence'], hit37['Modifications'],
hit37['Start'], hit37['End'], hit37['Charge'],
hit37['14N m/z'], hit37['15N m/z'],
hit37['14N Mass (Exp)'], hit37['15N Mass (Exp)']]],
columns=['Sequence', 'Modifications', 'Start', 'End',
'Charge', '14N m/z', '15N m/z',
'14N Mass (Exp)',
'15N Mass (Exp)']),
ignore_index=True)
hit_list = hit_list.drop_duplicates(subset=['Sequence', 'Modifications'])
hit_list = hit_list.sort_values(by=['Start'])
hit_list = hit_list.reset_index(drop=True)
hit_list.to_excel(output, sheet_name="List")
if __name__ == '__main__':
hits37_df: pd.DataFrame = pd.read_excel(io=r"C:\Users\Mads\Desktop\ISA_Spring_2021\Autodigestion\Hits\hits_37.xlsx",
sheet_name="List")
hits42_df: pd.DataFrame = pd.read_excel(io=r"C:\Users\Mads\Desktop\ISA_Spring_2021\Autodigestion\Hits\hits_42.xlsx",
sheet_name="List")
hits42_zn_df: pd.DataFrame = pd.read_excel(io=r"C:\Users\Mads\Desktop\ISA_Spring_2021\Autodigestion\Hits\hits_42_Zn"
r".xlsx",
sheet_name="List")
output_file: str = r"C:\Users\Mads\Desktop\ISA_Spring_2021\Autodigestion\Hits\MatchingHits.xlsx"
matching_hits: pd.DataFrame = find_matching_hits(hits37=hits37_df, hits42=hits42_df, hits42zn=hits42_zn_df,
output=output_file)
|
py | b40c1fcaa94f3641a2cc32477d9e9b5812113d15 | r"""Functions for the branching ratios and effective lifetimes of the leptonic
decays $B_q \to \ell^+\ell^-$, where $q=d$ or $s$ and $\ell=e$, $\mu$. or
$\tau$."""
from math import pi,sqrt
from flavio.physics import ckm
from flavio.physics.running import running
from flavio.physics.bdecays.common import meson_quark, lambda_K
from flavio.classes import Observable, Prediction
from flavio.config import config
from flavio.physics.bdecays.wilsoncoefficients import wctot_dict
def br_lifetime_corr(y, ADeltaGamma):
r"""Correction factor relating the experimentally measured branching ratio
(time-integrated) to the theoretical one (instantaneous), see e.g. eq. (8)
of arXiv:1204.1735.
Parameters
----------
- `y`: relative decay rate difference, $y_q = \tau_{B_q} \Delta\Gamma_q /2$
- `ADeltaGamma`: $A_{\Delta\Gamma_q}$ as defined, e.g., in arXiv:1204.1735
Returns
-------
$\frac{1-y_q^2}{1+A_{\Delta\Gamma_q} y_q}$
"""
return (1 - y**2)/(1 + ADeltaGamma*y)
def amplitudes(par, wc, B, l1, l2):
r"""Amplitudes P and S entering the $B_q\to\ell_1^+\ell_2^-$ observables.
Parameters
----------
- `par`: parameter dictionary
- `B`: should be `'Bs'` or `'B0'`
- `l1` and `l2`: should be `'e'`, `'mu'`, or `'tau'`
Returns
-------
`(P, S)` where for the special case `l1 == l2` one has
- $P = \frac{2m_\ell}{m_{B_q}} (C_{10}-C_{10}') + m_{B_q} (C_P-C_P')$
- $S = m_{B_q} (C_S-C_S')$
"""
scale = config['renormalization scale']['bll']
# masses
ml1 = par['m_'+l1]
ml2 = par['m_'+l2]
mB = par['m_'+B]
mb = running.get_mb(par, scale, nf_out=5)
# get the mass of the spectator quark
if B=='Bs':
mspec = running.get_ms(par, scale, nf_out=5)
elif B=='B0':
mspec = running.get_md(par, scale, nf_out=5)
# Wilson coefficients
qqll = meson_quark[B] + l1 + l2
# For LFV expressions see arXiv:1602.00881 eq. (5)
C9m = wc['C9_'+qqll] - wc['C9p_'+qqll] # only relevant for l1 != l2!
C10m = wc['C10_'+qqll] - wc['C10p_'+qqll]
CPm = wc['CP_'+qqll] - wc['CPp_'+qqll]
CSm = wc['CS_'+qqll] - wc['CSp_'+qqll]
P = (ml2 + ml1)/mB * C10m + mB * mb/(mb + mspec) * CPm
S = (ml2 - ml1)/mB * C9m + mB * mb/(mb + mspec) * CSm
return P, S
def ADeltaGamma(par, wc, B, lep):
P, S = amplitudes(par, wc, B, lep, lep)
# cf. eq. (17) of arXiv:1204.1737
return ((P**2).real - (S**2).real)/(abs(P)**2 + abs(S)**2)
def br_inst(par, wc, B, l1, l2):
r"""Branching ratio of $B_q\to\ell_1^+\ell_2^-$ in the absence of mixing.
Parameters
----------
- `par`: parameter dictionary
- `B`: should be `'Bs'` or `'B0'`
- `lep`: should be `'e'`, `'mu'`, or `'tau'`
"""
# paramaeters
GF = par['GF']
alphaem = running.get_alpha(par, 4.8)['alpha_e']
ml1 = par['m_'+l1]
ml2 = par['m_'+l2]
mB = par['m_'+B]
tauB = par['tau_'+B]
fB = par['f_'+B]
# appropriate CKM elements
if B == 'Bs':
xi_t = ckm.xi('t','bs')(par)
elif B == 'B0':
xi_t = ckm.xi('t','bd')(par)
N = xi_t * 4*GF/sqrt(2) * alphaem/(4*pi)
beta = sqrt(lambda_K(mB**2,ml1**2,ml2**2))/mB**2
beta_p = sqrt(1 - (ml1 + ml2)**2/mB**2)
beta_m = sqrt(1 - (ml1 - ml2)**2/mB**2)
prefactor = abs(N)**2 / 32. / pi * mB**3 * tauB * beta * fB**2
P, S = amplitudes(par, wc, B, l1, l2)
return prefactor * ( beta_m**2 * abs(P)**2 + beta_p**2 * abs(S)**2 )
def br_timeint(par, wc, B, l1, l2):
r"""Time-integrated branching ratio of $B_q\to\ell^+\ell^-$."""
if l1 != l2:
raise ValueError("Time-integrated branching ratio only defined for equal lepton flavours")
lep = l1
br0 = br_inst(par, wc, B, lep, lep)
y = par['DeltaGamma/Gamma_'+B]/2.
ADG = ADeltaGamma(par, wc, B, lep)
corr = br_lifetime_corr(y, ADG)
return br0 / corr
def bqll_obs(function, wc_obj, par, B, l1, l2):
scale = config['renormalization scale']['bll']
label = meson_quark[B]+l1+l2
if l1 == l2:
# include SM contributions for LF conserving decay
wc = wctot_dict(wc_obj, label, scale, par)
else:
wc = wc_obj.get_wc(label, scale, par)
return function(par, wc, B, l1, l2)
def bqll_obs_lsum(function, wc_obj, par, B, l1, l2):
if l1 == l2:
raise ValueError("This function is defined only for LFV decays")
scale = config['renormalization scale']['bll']
wc12 = wc_obj.get_wc(meson_quark[B]+l1+l2, scale, par)
wc21 = wc_obj.get_wc(meson_quark[B]+l2+l1, scale, par)
return function(par, wc12, B, l1, l2) + function(par, wc21, B, l2, l1)
def bqll_obs_function(function, B, l1, l2):
return lambda wc_obj, par: bqll_obs(function, wc_obj, par, B, l1, l2)
def bqll_obs_function_lsum(function, B, l1, l2):
return lambda wc_obj, par: bqll_obs_lsum(function, wc_obj, par, B, l1, l2)
# Bs -> l+l- effective lifetime
def tau_ll(wc, par, B, lep):
r"""Effective B->l+l- lifetime as defined in eq. (26) of arXiv:1204.1737 .
This formula one either gets by integrating eq. (21) or by inverting eq. (27) of arXiv:1204.1737.
Parameters
----------
- `wc` : dict of Wilson coefficients
- `par` : parameter dictionary
- `B` : should be `'Bs'` or `'B0'`
- `lep` : lepton: 'e', 'mu' or 'tau'
Returns
-------
$-\frac{\tau_{B_s} \left(y_s^2+2 A_{\Delta\Gamma_q} ys+1\right)}{\left(ys^2-1\right) (A_{\Delta\Gamma_q} ys+1)}$
"""
ADG = ADeltaGamma(par, wc, B, lep)
y = .5*par['DeltaGamma/Gamma_'+B]
tauB = par['tau_'+B]
return -(((1 + y**2 + 2*y*ADG)*tauB)/((-1 + y**2)*(1 + y*ADG)))
def tau_ll_func(wc_obj, par, B, lep):
scale = config['renormalization scale']['bll']
label = meson_quark[B]+lep+lep
wc = wctot_dict(wc_obj, label, scale, par)
return tau_ll(wc, par, B, lep)
def ADG_func(wc_obj, par, B, lep):
scale = config['renormalization scale']['bll']
label = meson_quark[B]+lep+lep
wc = wctot_dict(wc_obj, label, scale, par)
return ADeltaGamma(par, wc, B, lep)
def ADeltaGamma_func(B, lep):
def ADG_func(wc_obj, par):
scale = config['renormalization scale']['bll']
label = meson_quark[B]+lep+lep
wc = wctot_dict(wc_obj, label, scale, par)
return ADeltaGamma(par, wc, B, lep)
return ADG_func
# Observable and Prediction instances
_tex = {'e': 'e', 'mu': '\mu', 'tau': r'\tau'}
for l in ['e', 'mu', 'tau']:
_process_taxonomy = r'Process :: $b$ hadron decays :: FCNC decays :: $B\to\ell^+\ell^-$ :: $'
# For the B^0 decay, we take the time-integrated branching ratio
_obs_name = "BR(Bs->"+l+l+")"
_obs = Observable(_obs_name)
_process_tex = r"B_s\to "+_tex[l]+r"^+"+_tex[l]+r"^-"
_obs.set_description(r"Time-integrated branching ratio of $" + _process_tex + r"$.")
_obs.tex = r"$\overline{\text{BR}}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
Prediction(_obs_name, bqll_obs_function(br_timeint, 'Bs', l, l))
# Add the effective lifetimes for Bs
_obs_name = 'tau_'+l+l
_obs = Observable(_obs_name)
_obs.set_description(r"Effective lifetime for $"+ _process_tex + r"$.")
_obs.tex = r"$\tau_{B_s \to " +_tex[l] +_tex[l] + "}$"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
if l=='e':
Prediction(_obs_name, lambda wc_obj, par: tau_ll_func(wc_obj, par, 'Bs', 'e'))
if l=='mu':
Prediction(_obs_name, lambda wc_obj, par: tau_ll_func(wc_obj, par, 'Bs', 'mu'))
if l=='tau':
Prediction(_obs_name, lambda wc_obj, par: tau_ll_func(wc_obj, par, 'Bs', 'tau'))
_obs_name = 'ADeltaGamma(Bs->'+l+l+')'
_obs = Observable(_obs_name)
_process_tex = r"B_s\to "+_tex[l]+r"^+"+_tex[l]+r"^-"
_obs.set_description(r"Mass-eigenstate rate asymmetry in $" + _process_tex + r"$.")
_obs.tex = r"$A_{\Delta\Gamma}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
Prediction(_obs_name, ADeltaGamma_func('Bs', l))
# For the B^0 decay, we take the prompt branching ratio since DeltaGamma is negligible
_obs_name = "BR(B0->"+l+l+")"
_obs = Observable(_obs_name)
_process_tex = r"B^0\to "+_tex[l]+r"^+"+_tex[l]+r"^-"
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy + _process_tex + r"$")
Prediction(_obs_name, bqll_obs_function(br_inst, 'B0', l, l))
_tex_l = {'e': 'e', 'mu': r'\mu', 'tau': r'\tau'}
_tex_B = {'B0': r'\bar B^0', 'Bs': r'\bar B_s'}
_tex_lfv = {'emu': r'e^+\mu^-', 'mue': r'\mu^+e^-',
'taue': r'\tau^+e^-', 'etau': r'e^+\tau^-',
'taumu': r'\tau^+\mu^-', 'mutau': r'\mu^+\tau^-'}
for ll_1 in [('e','mu'), ('e','tau'), ('mu','tau'),]:
for B in ['Bs', 'B0']:
ll_2 = ll_1[::-1] # now if ll_1 is (e, mu), ll_2 is (mu, e)
for ll in [ll_1, ll_2]:
# the individual BRs
_obs_name = "BR("+B+"->"+''.join(ll)+")"
_obs = Observable(_obs_name)
_process_tex = _tex_B[B]+r"\to "+_tex_lfv[''.join(ll)]
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.add_taxonomy(r'Process :: $b$ hadron decays :: FCNC decays :: $B\to\ell^+\ell^-$ :: $' + _process_tex + r'$')
Prediction(_obs_name, bqll_obs_function(br_inst, B, ll[0], ll[1]))
# the individual BR where ll' and l'l are added
_obs_name = "BR("+B+"->"+''.join(ll_1)+","+''.join(ll_2)+")"
_obs = Observable(_obs_name)
for ll in [ll_1, ll_1]:
_process_tex = _tex_B[B]+r"\to "+_tex_lfv[''.join(ll)]
_obs.add_taxonomy(r'Process :: $b$ hadron decays :: FCNC decays :: $B\to\ell^+\ell^-$ :: $' + _process_tex + r'$')
_process_tex = _tex_B[B]+r"\to "+_tex_l[ll_1[0]]+r"^\pm "+_tex_l[ll_1[1]]+r"^\mp"
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
Prediction(_obs_name, bqll_obs_function_lsum(br_inst, B, ll_1[0], ll_1[1]))
|
py | b40c203a3e0b436f61564e616a361f4692e9a98c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import sys
from pathlib import Path
from typing import Set
from setuptools import find_packages, setup
def read_reqs(reqs_path: Path) -> Set[str]:
return {
r
for r in re.findall(
r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)",
reqs_path.read_text(),
re.MULTILINE,
)
if isinstance(r, str)
}
CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
NAME = "simcore-service-api-server"
VERSION = (CURRENT_DIR / "VERSION").read_text().strip()
AUTHORS = "Pedro Crespo-Valero (pcrespov)"
DESCRIPTION = "Platform's API Server for external clients"
README = (CURRENT_DIR / "README.md").read_text()
PROD_REQUIREMENTS = tuple(
read_reqs(CURRENT_DIR / "requirements" / "_base.txt")
| {
"simcore-models-library",
"simcore-postgres-database",
"simcore-sdk>=1.1.0",
"simcore-service-library[fastapi]",
"simcore-settings-library",
}
)
TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt"))
SETUP = dict(
name=NAME,
version=VERSION,
author=AUTHORS,
description=DESCRIPTION,
long_description=README,
license="MIT license",
python_requires="~=3.8",
packages=find_packages(where="src"),
package_dir={
"": "src",
},
include_package_data=True,
install_requires=PROD_REQUIREMENTS,
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
extras_require={"test": TEST_REQUIREMENTS},
entry_points={
"console_scripts": [
"simcore-service-api-server = simcore_service_api_server.cli:main",
],
},
)
if __name__ == "__main__":
setup(**SETUP)
|
py | b40c21409340f6e4185715473c03801c8cd77f57 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_event.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_event.proto',
package='proxy',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x10list_event.proto\x12\x05proxy\x1a\x1cgoogle/protobuf/struct.proto\"f\n\x10ListEventRequest\x12\x11\n\tclusterId\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x15\n\rfieldSelector\x18\x03 \x01(\t\x12\x15\n\rlabelSelector\x18\x04 \x01(\t\"s\n\x18ListEventResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12%\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Structb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_LISTEVENTREQUEST = _descriptor.Descriptor(
name='ListEventRequest',
full_name='proxy.ListEventRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='clusterId', full_name='proxy.ListEventRequest.clusterId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='proxy.ListEventRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fieldSelector', full_name='proxy.ListEventRequest.fieldSelector', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labelSelector', full_name='proxy.ListEventRequest.labelSelector', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=159,
)
_LISTEVENTRESPONSEWRAPPER = _descriptor.Descriptor(
name='ListEventResponseWrapper',
full_name='proxy.ListEventResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='proxy.ListEventResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='proxy.ListEventResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='proxy.ListEventResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='proxy.ListEventResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=276,
)
_LISTEVENTRESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['ListEventRequest'] = _LISTEVENTREQUEST
DESCRIPTOR.message_types_by_name['ListEventResponseWrapper'] = _LISTEVENTRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListEventRequest = _reflection.GeneratedProtocolMessageType('ListEventRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTEVENTREQUEST,
'__module__' : 'list_event_pb2'
# @@protoc_insertion_point(class_scope:proxy.ListEventRequest)
})
_sym_db.RegisterMessage(ListEventRequest)
ListEventResponseWrapper = _reflection.GeneratedProtocolMessageType('ListEventResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTEVENTRESPONSEWRAPPER,
'__module__' : 'list_event_pb2'
# @@protoc_insertion_point(class_scope:proxy.ListEventResponseWrapper)
})
_sym_db.RegisterMessage(ListEventResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
py | b40c2149ca0e86c92ec76209983036720d8d40c8 | # from __future__ import division
import stl_path
from trex.stl.api import *
import json
import argparse
import sys
import time
import math
import imp
from copy import deepcopy
class Rate:
def __init__(self, rate):
"""
A rate repressenting class.
:parameters:
rate: float
Rate in bps
"""
self.rate = float(rate)
def convert_percent_to_rate(self, percent_of_rate):
"""
Converts percent of the rate (defined in init) to an actual rate.
:parameters:
percent_of_rate: float
Percentage of rate that defined when class initialized.
:returns:
float,Rate in bps
"""
return (float(percent_of_rate) / 100.00) * self.rate
def convert_rate_to_percent_of_max_rate(self, rate_portion):
"""
Converts a rate in bps to percentage of rate that was defined in init.
:parameters:
rate_portion: float
Rate in bps
:returns:
float, percentage
"""
return (float(rate_portion) / float(self.rate)) * 100.00
def is_close(self, rate, rel_tol=0.05, abs_tol=1000000):
"""
Returns if a rate is close to the rate that was defined in init.
:parameters:
rate: float
Rate to compare to the rate that was defined upon initialization.
rel_tol: float
is a relative tolerance, it is multiplied by the greater of the magnitudes of the two arguments;
as the values get larger, so does the allowed difference between them while still considering them equal.
Default Value = 5%.
abs_tol: int
is an absolute tolerance that is applied as-is in all cases.
If the difference is less than either of those tolerances, the values are considered equal.
Default value = 1 Mbit
:returns:
A boolean flag indicating it the given rate is close.
"""
return abs(self.rate-rate) <= max(rel_tol * max(abs(self.rate), abs(rate)), abs_tol)
class NdrBenchConfig:
def __init__(self, ports, title='Title', cores=1, iteration_duration=20.00,
q_full_resolution=2.00, first_run_duration=20.00, pdr=0.1,
pdr_error=1.0, ndr_results=1, max_iterations=10,
max_latency=0, lat_tolerance=0, verbose=False, bi_dir=False,
plugin_file=None, tunables={}, opt_binary_search=False,
opt_binary_search_percentage=5, **kwargs):
"""
Configuration parameters for the benchmark.
:parameters:
ports: list
List of ports to transmit. Even ports will transmit and odd ones will receive, unless bi-directional traffic.
title: string
Title of the benchmark.
cores: int
Number of cores.
iteration_duration: float
Duration of the iteration.
q_full_resolution: float
Percent of queue full allowed.
first_run_duration: float
Duration of the first run.
pdr: float
Percentage of drop rate.
pdr_error: float
Percentage of allowed error in pdr.
ndr_results: int
Calculates the benchmark at each point scaled linearly under NDR [1-10]. Number of points
max_iterations: int
Max number of iterations allowed.
max_latency: int
Max value of latency allowed in msec.
lat_tolerance: int
Percentage of latency packets allowed above max latency. Default value is 0 %. (Max Latency will be compared against total max)
verbose: boolean
Verbose mode
bi_dir: boolean
Bi-directional traffic if true, else uni-directional.
plugin_file: string
Path to the plugin file.
tunables: dict
Tunables for the plugin file.
opt_binary_search: boolean
Flag to indicate if to search using the optimized binary mode or not.
opt_binary_search_percentage: int
Percentage around assumed ndr allowed to search.
kwargs: dict
"""
self.bi_dir = bi_dir
# The sleep call divides the duration by 2
self.iteration_duration = (iteration_duration * 2)
self.q_full_resolution = q_full_resolution
self.first_run_duration = first_run_duration
self.pdr = pdr # desired percent of drop-rate. pdr = 0 is NO drop-rate
self.pdr_error = pdr_error
self.ndr_results = ndr_results
self.max_iterations = max_iterations
self.verbose = verbose
self.title = title
self.cores = cores
self.ports = list(ports)
self.transmit_ports = [self.ports[i] for i in range(0, len(self.ports), 2)]
self.receive_ports = [self.ports[i] for i in range(1, len(self.ports), 2)]
if self.bi_dir:
self.transmit_ports = self.ports
self.receive_ports = self.ports
self.transmit_core_masks = self.get_optimal_core_mask(num_of_cores=self.cores, num_of_ports=len(self.ports))
self.plugin_file = plugin_file
self.plugin_enabled = False
if self.plugin_file is not None:
self.plugin_enabled = True
self.plugin = self.load_plugin(self.plugin_file)
self.tunables = tunables
self.opt_binary_search = opt_binary_search
self.opt_binary_search_percentage = opt_binary_search_percentage
self.latency = False
self.max_latency = max_latency
self.lat_tolerance = lat_tolerance
self.max_latency_set = True if self.max_latency != 0 else False
def get_optimal_core_mask(self, num_of_cores, num_of_ports):
"""
Optimal core mask in case of bi directional traffic.
:parameters:
num_of_cores: int
Number of cores per dual port
num_of_ports: int
Number of ports
:returns:
List of masks per each port to offer optimal traffic.
"""
if not self.bi_dir:
return None
else:
# Half of the cores are given to the first port and the second half to second port.
# In case of an odd number of cores, one core (the middle one) is shared.
half_of_cores = int(math.ceil(num_of_cores / 2.00))
first_mask = (1 << num_of_cores) - (1 << (num_of_cores - half_of_cores))
second_mask = (1 << half_of_cores) - 1
return [first_mask, second_mask] * int(num_of_ports / 2)
@classmethod
def load_plugin(cls, plugin_file):
"""
Load dynamically a plugin module so that we can provide the user with pre and post iteration API.
:parameters:
plugin_file: string
Path to the plugin file.
"""
# check filename
if not os.path.isfile(plugin_file):
raise TRexError("File '{0}' does not exist".format(plugin_file))
basedir = os.path.dirname(plugin_file)
sys.path.insert(0, basedir)
try:
file = os.path.basename(plugin_file).split('.')[0]
module = __import__(file, globals(), locals(), [], 0)
imp.reload(module) # reload the update
plugin = module.register()
return plugin
except Exception as e:
a, b, tb = sys.exc_info()
x =''.join(traceback.format_list(traceback.extract_tb(tb)[1:])) + a.__name__ + ": " + str(b) + "\n"
summary = "\nPython Traceback follows:\n\n" + x
raise TRexError(summary)
finally:
sys.path.remove(basedir)
def config_to_dict(self):
"""
Create a dictionary of the configuration.
:returns:
Dictionary of configurations
"""
config_dict = {'iteration_duration': self.iteration_duration, 'q_full_resolution': self.q_full_resolution,
'first_run_duration': self.first_run_duration, 'pdr': self.pdr, 'pdr_error': self.pdr_error,
'ndr_results': self.ndr_results, 'max_iterations': self.max_iterations,
'ports': self.ports, 'cores': self.cores, 'verbose': self.verbose, 'title': self.title,
'bi_dir' : self.bi_dir, 'plugin_file': self.plugin_file, 'tunables': self.tunables,
'opt_binary_search': self.opt_binary_search,
'opt_binary_search_percentage': self.opt_binary_search_percentage}
return config_dict
class NdrBenchResults:
def __init__(self, config=None, results={}):
"""
NDR Bench Results
:parameters:
config: :class:`.NdrBenchConfig`
User configuration of parameters. Default value is none.
results: dict
A dictionary containing partial or full results from a run.
"""
self.stats = dict(results)
self.init_time = float(time.time())
self.config = config
self.stats['total_iterations'] = 0
def update(self, updated_dict):
"""
Updates the elapsed time, and the stats of the class with the parameter
:parameters:
updated_dict: dict
Dictionary that we use as an reference to update our stats.
"""
updated_dict['Elapsed Time'] = (float(time.time()) - self.init_time)
self.stats.update(updated_dict)
def convert_rate(self, rate_bps, packet=False):
"""
Converts rate from bps or pps to a string.
:parameters:
rate_bps: float
Value of of rate in bps or pps based on the packet parameter.
packet: boolean
If true then the rate is PPS, else bps.
:returns:
Rate as a formatted string.
"""
converted = float(rate_bps)
magnitude = 0
while converted > 1000.00:
magnitude += 1
converted /= 1000.00
converted = round(converted, 2)
if packet:
postfix = "PPS"
else:
postfix = "bps"
if magnitude == 0:
return str(converted) + postfix
elif magnitude == 1:
converted = round(converted)
return str(converted) + " K" + postfix
elif magnitude == 2:
return str(converted) + " M" + postfix
elif magnitude == 3:
return str(converted) + " G" + postfix
def print_latency(self):
"""
Prints the latency stats in case there are any.
"""
try:
for k in self.stats['latency'].keys():
print("Latency stats on port/pg_id :%d" % k)
print (" Average :%0.2f" % self.stats['latency'][k]['average'])
print (" Jitter :%0.2f" % self.stats['latency'][k]['jitter'])
print (" Total Max :%0.2f" % self.stats['latency'][k]['total_max'])
print (" Total Min :%0.2f" % self.stats['latency'][k]['total_min'])
print (" Histogram :%s " % self.stats['latency'][k]['histogram'])
except TypeError:
pass
def print_run_stats(self):
"""
Prints the T-REX stats after a run (transmission).
"""
if self.config.bi_dir:
traffic_dir = "bi-directional "
else:
traffic_dir = "uni-directional"
print("Elapsed Time :%0.2f seconds" % self.stats['Elapsed Time'])
print("BW Per Core :%0.2f Gbit/Sec @100%% per core" % float(self.stats['bw_per_core']))
print("TX PPS %s :%s" % (traffic_dir, self.convert_rate(float(self.stats['tx_pps']), True)))
print("RX PPS %s :%s" % (traffic_dir, self.convert_rate(float(self.stats['rx_pps']), True)))
print("TX Utilization :%0.2f %%" % self.stats['tx_util'])
print("TRex CPU :%0.2f %%" % self.stats['cpu_util'])
print("Total TX L1 %s :%s " % (traffic_dir, self.convert_rate(float(self.stats['total_tx_L1']))))
print("Total RX L1 %s :%s " % (traffic_dir, self.convert_rate(float(self.stats['total_rx_L1']))))
print("Total TX L2 %s :%s " % (traffic_dir, self.convert_rate(float(self.stats['tx_bps']))))
print("Total RX L2 %s :%s " % (traffic_dir, self.convert_rate(float(self.stats['rx_bps']))))
if 'rate_difference' in self.stats:
print("Distance from current Optimum :%0.2f %%" % self.stats['rate_difference'])
if self.config.latency:
self.print_latency()
def print_iteration_data(self):
"""
Prints data regarding the current iteration.
"""
if 'title' in self.stats:
print("\nTitle :%s" % self.stats['title'])
if 'iteration' in self.stats:
print("Iteration :{}".format(self.stats['iteration']))
print("Running Rate :%s" % self.convert_rate(float(self.stats['rate_tx_bps'])))
print("Running Rate (%% of max) :%0.2f %%" % self.stats['rate_p'])
print("Max Rate :%s " % self.convert_rate(float(self.stats['max_rate_bps'])))
print("Drop Rate :%0.5f %% of oPackets" % self.stats['drop_rate_percentage'])
print("Queue Full :%0.2f %% of oPackets" % self.stats['queue_full_percentage'])
if self.config.latency and self.config.max_latency_set:
print("Valid Latency :%s" % self.stats['valid_latency'])
self.print_run_stats()
def print_final(self):
"""
Prints the final data regarding where the NDR is found.
"""
if self.config.bi_dir:
traffic_dir = "bi-directional "
else:
traffic_dir = "uni-directional"
print("\nTitle :%s" % self.stats['title'])
if 'iteration' in self.stats:
print("Total Iterations :{}".format(self.stats['total_iterations']))
print("Max Rate :%s " % self.convert_rate(float(self.stats['max_rate_bps'])))
print("Optimal P-Drop Rate :%s" % self.convert_rate(float(self.stats['rate_tx_bps'])))
print("P-Drop Rate (%% of max) :%0.2f %%" % self.stats['rate_p'])
print("Drop Rate at Optimal P-Drop Rate :%0.5f %% of oPackets" % self.stats['drop_rate_percentage'])
print("Queue Full at Optimal P-Drop Rate :%0.2f %% of oPackets" % self.stats['queue_full_percentage'])
if self.config.latency and self.config.max_latency_set:
print("Valid Latency at Opt. P-Drop Rate :%s" % self.stats['valid_latency'])
self.print_run_stats()
for x in self.stats['ndr_points']:
print("NDR(s) %s :%s " % (traffic_dir, self.convert_rate(x)))
def to_json(self):
"""
Output the results to a json.
"""
total_output = {'results': self.stats, 'config': self.config.config_to_dict()}
return json.dumps(total_output)
@staticmethod
def print_state(state, high_bound, low_bound):
print("\n\nStatus :%s" % state)
if high_bound:
print("Interval :[%d,%d]" % (low_bound, high_bound))
def human_readable_dict(self):
"""
Return a human readable dictionary of the results.
"""
hu_dict = {'Queue Full [%]': str(round(self.stats['queue_full_percentage'], 2)) + "%",
'BW per core [Gbit/sec @100% per core]': str(
round(float(self.stats['bw_per_core']), 2)) + 'Gbit/Sec @100% per core',
'RX [MPPS]': self.convert_rate(float(self.stats['rx_pps']), True),
'TX [MPPS]': self.convert_rate(float(self.stats['tx_pps']), True),
'Line Utilization [%]': str(round(self.stats['tx_util'], 2)),
'CPU Utilization [%]': str(round(self.stats['cpu_util'],2)),
'Total TX L1': self.convert_rate(float(self.stats['total_tx_L1'])),
'Total RX L1': self.convert_rate(float(self.stats['total_rx_L1'])),
'TX [bps]': self.convert_rate(float(self.stats['tx_bps'])),
'RX [bps]': self.convert_rate(float(self.stats['rx_bps'])),
'OPT TX Rate [bps]': self.convert_rate(float(self.stats['rate_tx_bps'])),
'OPT RX Rate [bps]': self.convert_rate(float(self.stats['rate_rx_bps'])),
'OPT Rate (Multiplier) [%]': str(self.stats['rate_p']),
'Max Rate [bps]': self.convert_rate(float(self.stats['max_rate_bps'])),
'Drop Rate [%]': str(round(self.stats['drop_rate_percentage'], 2)),
'Elapsed Time [Sec]': str(round(self.stats['Elapsed Time'], 2)),
'NDR points': [self.convert_rate(float(x)) for x in self.stats['ndr_points']],
'Total Iterations': self.stats.get('iteration', None),
'Title': self.stats['title'],
'latency': dict(self.stats['latency']),
'valid_latency': self.stats['valid_latency']}
return hu_dict
class NdrBench:
def __init__(self, stl_client, config):
"""
NDR Bench
:parameters:
stl_client: :class:`.STLClient`
STL Client
config: :class:`.NdrBenchConfig`
Configurations and parameters for the benchmark
"""
self.config = config
self.results = NdrBenchResults(config)
self.stl_client = stl_client
self.results.update({'title': self.config.title})
self.opt_run_stats = {}
self.opt_run_stats['rate_p'] = 0 # percent
def plugin_pre_iteration(self, finding_max_rate, run_results=None, **kwargs):
"""
Plugin pre iteration wrapper in order to pass the plugin a deep copy of the run results,
since the user might change the actual run results. Consult the Plugin API for more information.
"""
self.config.plugin.pre_iteration(finding_max_rate, deepcopy(run_results), **kwargs)
def plugin_post_iteration(self, finding_max_rate, run_results, **kwargs):
"""
Plugin pre iteration wrapper in order to pass the plugin a deep copy of the run results,
since the user might change the actual run results. Consult the Plugin API for more information.
"""
return self.config.plugin.post_iteration(finding_max_rate, deepcopy(run_results), **kwargs)
def max_iterations_reached(self, current_run_stats, high_bound, low_bound):
if current_run_stats['iteration'] == self.config.max_iterations:
self.opt_run_stats.update(current_run_stats)
if self.config.verbose:
self.results.print_state("Max Iterations reached. Results might not be fully accurate", high_bound,
low_bound)
return True
return False
def calculate_max_latency_received(self, latency_data):
"""
Calculates the max latency of a run.
Call this only if latency tolerance is 0%.
:parameters:
latency_data: dict
The latency field of get_stats() of the :class:`.STLClient`
:returns:
The max latency in that run.
"""
max_latency = 0
for pg_id in latency_data.keys():
if type(pg_id) != int:
continue
max_latency = max(latency_data[pg_id]['latency']['total_max'], max_latency)
return max_latency
def calculate_latency_percentage(self, latency_data):
"""
Calculates the percentage of latency packets beyond the max latency parameter.
The percentage is calculated independently for each pg id and the maximal percentage is returned.
Call this only if latency tolerance is more than 0%.
:parameters:
latency_data: dict
The latency field of get_stats() of the :class:`.STLClient`
:returns:
A float represeting the percentage of latency packets above max latency.
"""
latency_percentage = 0
for pg_id in latency_data.keys():
if type(pg_id) != int:
continue
pg_id_histogram = latency_data[pg_id]['latency']['histogram']
total_packets = sum(pg_id_histogram.values())
packets_above_max_latency = sum(pg_id_histogram[index] for index in pg_id_histogram.keys() if index >= self.config.max_latency)
packets_above_max_latency_percentage = float(packets_above_max_latency) / total_packets * 100
latency_percentage = max(latency_percentage, packets_above_max_latency_percentage)
return latency_percentage
def is_valid_latency(self, latency_data):
"""
Returns a boolean flag indicating if the latency of a run is valid.
In case latency was not set then it returns True.
:parameters:
latency_data: dict
The latency field of get_stats() of the :class:`.STLClient`
:returns:
A boolean flag indiciating the latency was valid.
"""
if self.config.latency and self.config.max_latency_set:
if self.config.lat_tolerance == 0:
return self.config.max_latency >= self.calculate_max_latency_received(latency_data)
else:
return self.config.lat_tolerance >= self.calculate_latency_percentage(latency_data)
else:
return True
def calculate_ndr_points(self):
"""
Calculates NDR points based on the ndr_results parameter in the :class:`.NdrBenchConfig` object.
"""
ndr_res = [self.results.stats['tx_bps']]
if self.config.ndr_results > 1:
ndr_range = range(1, self.config.ndr_results + 1, 1)
ndr_range.reverse()
ndr_res = [float((self.results.stats['tx_bps'])) / float(t) for t in ndr_range]
self.results.update({'ndr_points': ndr_res})
def update_opt_stats(self, new_stats):
"""
Updates the optimal stats if the new_stats are better.
:parameters:
new_stats: dict
Statistics of some run.
"""
if new_stats['queue_full_percentage'] <= self.config.q_full_resolution and new_stats['drop_rate_percentage'] <= self.config.pdr \
and new_stats['valid_latency']:
if new_stats['rate_p'] > self.opt_run_stats['rate_p']:
self.opt_run_stats.update(new_stats)
def optimized_binary_search(self, lost_percentage, lost_allowed_percentage, stat_type):
"""
Performs the Optimized Binary search algorithm.
:parameters:
lost_percentage: float
Percentage of drops/queue full in the previous run.
lost_allowed_percentage: float
Percentage/Resolution of allowed drops/queue full.
stat_type: string
Type of statistic that had drops. stat_type should be either "drop_rate_percentage", "queue_full_percentage" or "valid_latency".
Drops have priority over queue full which has priority over valid_latency,
:returns:
Dictionary of the optimal run stats found in the interval, based on the criterions defined in :class:`.NdrBenchConfig`,
"""
max_rate = Rate(self.results.stats['max_rate_bps'])
plugin_enabled = self.config.plugin_enabled
plugin_stop = False
current_run_stats = deepcopy(self.results.stats)
assumed_ndr_rate = Rate(max_rate.convert_percent_to_rate(100 - lost_percentage))
upper_bound = min(max_rate.rate, assumed_ndr_rate.convert_percent_to_rate(100 + self.config.opt_binary_search_percentage))
upper_bound_percentage_of_max_rate = max_rate.convert_rate_to_percent_of_max_rate(upper_bound)
if not max_rate.is_close(upper_bound):
# in case we are not close to the max rate, try with the upper bound of the assumed NDR
current_run_stats['rate_p'] = upper_bound_percentage_of_max_rate
current_run_stats['rate_tx_bps'] = upper_bound
current_run_stats['iteration'] = "Upper bound of assumed NDR"
if plugin_enabled:
self.plugin_pre_iteration(finding_max_rate=False, run_results=current_run_stats, **self.config.tunables)
if self.config.verbose:
self.results.print_state("Trying upper bound of assumed rate!", None, None)
current_run_stats.update(self.perf_run(upper_bound_percentage_of_max_rate))
self.results.update(current_run_stats)
self.update_opt_stats(current_run_stats)
if self.config.verbose:
self.results.print_iteration_data()
if plugin_enabled:
plugin_stop = self.plugin_post_iteration(finding_max_rate=False, run_results=current_run_stats, **self.config.tunables)
if plugin_stop:
if self.config.verbose:
self.results.print_state("Plugin decided to stop trying upper bound of assumed rate!", None, None)
return current_run_stats
if stat_type == "valid_latency":
upper_bound_valid = current_run_stats[stat_type]
else:
upper_bound_valid = current_run_stats[stat_type] <= lost_allowed_percentage
if upper_bound_valid:
if self.config.verbose:
self.results.print_state("Upper bound of assumed NDR drops are below desired rate :)",\
100, upper_bound_percentage_of_max_rate)
return self.perf_run_interval(100, upper_bound_percentage_of_max_rate)
# if you got here -> upper bound of assumed ndr has too many drops
lower_bound = assumed_ndr_rate.convert_percent_to_rate(100 - self.config.opt_binary_search_percentage)
lower_bound_percentage_of_max_rate = max_rate.convert_rate_to_percent_of_max_rate(lower_bound)
current_run_stats['rate_p'] = lower_bound_percentage_of_max_rate
current_run_stats['rate_tx_bps'] = lower_bound
current_run_stats['iteration'] = "Lower bound of assumed NDR"
if plugin_enabled:
self.plugin_pre_iteration(finding_max_rate=False, run_results=current_run_stats, **self.config.tunables)
if self.config.verbose:
self.results.print_state("Trying lower bound of assumed rate!", None, None)
current_run_stats.update(self.perf_run(lower_bound_percentage_of_max_rate))
self.results.update(current_run_stats)
self.update_opt_stats(current_run_stats)
if self.config.verbose:
self.results.print_iteration_data()
if plugin_enabled:
plugin_stop = self.plugin_post_iteration(finding_max_rate=False, run_results=current_run_stats, **self.config.tunables)
if plugin_stop:
if self.config.verbose:
self.results.print_state("Plugin decided to stop trying lower bound of assumed rate!", None, None)
return current_run_stats
if stat_type == "valid_latency":
lower_bound_valid = current_run_stats[stat_type]
else:
lower_bound_valid = current_run_stats[stat_type] <= lost_allowed_percentage
if lower_bound_valid:
self.results.print_state("Lower bound of assumed NDR drops are below desired rate :)",\
upper_bound_percentage_of_max_rate, lower_bound_percentage_of_max_rate)
return self.perf_run_interval(upper_bound_percentage_of_max_rate, lower_bound_percentage_of_max_rate)
# if you got here -> lower bound of assumed ndr has too many drops
else:
self.results.print_state("Lower bound of assumed NDR drops are beyond desired rate :(",\
lower_bound_percentage_of_max_rate, 0)
return self.perf_run_interval(lower_bound_percentage_of_max_rate, 0)
def perf_run(self, rate_mb_percent, run_max=False):
"""
Transmits traffic through the STL client object in the class.
:parameters:
rate_mb_percent: float
Rate of transmit in Mbit.
run_max: boolean
Flag indicating if we are transmitting the maximal rate.
:returns:
Dictionary with the results of the run.
"""
self.stl_client.stop(ports=self.config.ports)
# allow time for counters to settle from previous runs
time.sleep(15)
self.stl_client.clear_stats()
duration = 0
if run_max:
duration = self.config.first_run_duration
self.stl_client.start(ports=self.config.transmit_ports, mult="100%",
duration=duration, core_mask=self.config.transmit_core_masks)
rate_mb_percent = 100
else:
m_rate = Rate(self.results.stats['max_rate_bps'])
if rate_mb_percent == 0:
rate_mb_percent += 1
run_rate = m_rate.convert_percent_to_rate(rate_mb_percent)
duration = self.config.iteration_duration
self.stl_client.start(ports=self.config.transmit_ports, mult=str(run_rate) + "bps",
duration=duration, core_mask=self.config.transmit_core_masks)
time.sleep(duration / 2)
stats = self.stl_client.get_stats()
self.stl_client.stop(ports=self.config.ports)
opackets = stats['total']['opackets']
ipackets = stats['total']['ipackets']
lost_p = opackets - ipackets
lost_p_percentage = (float(lost_p) / float(opackets)) * 100.00
if lost_p_percentage < 0:
lost_p_percentage = 0
q_full_packets = stats['global']['queue_full']
q_full_percentage = float((q_full_packets / float(opackets)) * 100.000)
latency_stats = stats['latency']
if run_max and latency_stats:
# first run & latency -> update that we have latency traffic
self.config.latency = True
self.results.latency = True
latency_groups = {}
if self.config.latency:
for i in latency_stats.keys():
if type(i) != int:
continue
latency_dict = latency_stats[i]['latency']
latency_groups[i] = latency_dict
tx_bps = [stats[x]['tx_bps'] for x in self.config.transmit_ports]
rx_bps = [stats[x]['rx_bps'] for x in self.config.receive_ports]
tx_util_norm = sum([stats[x]['tx_util'] for x in self.config.transmit_ports]) / len(self.config.transmit_ports)
self.results.stats['total_iterations'] = self.results.stats['total_iterations'] + 1 if not run_max else self.results.stats['total_iterations']
run_results = {'queue_full_percentage': q_full_percentage, 'drop_rate_percentage': lost_p_percentage,
'valid_latency': self.is_valid_latency(latency_stats),
'rate_tx_bps': min(tx_bps),
'rate_rx_bps': min(rx_bps),
'tx_util': tx_util_norm, 'latency': latency_groups,
'cpu_util': stats['global']['cpu_util'], 'tx_pps': stats['total']['tx_pps'],
'bw_per_core': stats['global']['bw_per_core'], 'rx_pps': stats['total']['rx_pps'],
'rate_p': float(rate_mb_percent), 'total_tx_L1': stats['total']['tx_bps_L1'],
'total_rx_L1': stats['total']['rx_bps_L1'], 'tx_bps': stats['total']['tx_bps'],
'rx_bps': stats['total']['rx_bps'],
'total_iterations': self.results.stats['total_iterations']}
return run_results
def __find_max_rate(self):
"""
Finds the maximal rate the hardware can transmit. This rate might have drops or queue full.
:returns:
Dictionary with the results of the iteration, it also updates the :class:`.NdrBenchResults` object in the class.
"""
if self.config.verbose:
self.results.print_state("Calculation of max rate for DUT", None, None)
run_results = self.perf_run(100, True)
run_results['max_rate_bps'] = run_results['rate_tx_bps']
self.results.update(run_results)
if self.results.stats['drop_rate_percentage'] < 0:
self.results.stats['drop_rate_percentage'] = 0
if self.config.verbose:
if run_results['rate_tx_bps'] < run_results['rate_rx_bps']:
self.results.print_state("TX rate is slower than RX rate", None, None)
self.results.print_iteration_data()
return run_results
def perf_run_interval(self, high_bound, low_bound):
"""
Searches for NDR in an given interval bounded by the two parameters. Based on the number of iterations which is supplied in the :class:`.NdrBenchConfig`
object of the class will perform multiple transmitting runs until one of the stopping conditions is met.
:parameters:
high_bound: float
In percents of maximal rate.
low_bound: float
In percents of maximal rate
:returns:
Dictionary of the optimal run stats found in the interval, based on the criterions defined in :class:`.NdrBenchConfig`,
"""
current_run_results = NdrBenchResults(self.config)
current_run_stats = self.results.stats
max_rate = Rate(self.results.stats['max_rate_bps'])
current_run_stats['max_rate_bps'] = max_rate.rate
current_run_stats['iteration'] = 0
plugin_enabled = self.config.plugin_enabled
plugin_stop = False
while current_run_stats['iteration'] <= self.config.max_iterations:
current_run_stats['rate_p'] = float((high_bound + low_bound)) / 2.00
current_run_stats['rate_tx_bps'] = max_rate.convert_percent_to_rate(current_run_stats['rate_p'])
if plugin_enabled:
self.plugin_pre_iteration(finding_max_rate=False, run_results=current_run_stats, **self.config.tunables)
current_run_stats.update(self.perf_run(str(current_run_stats['rate_p'])))
if plugin_enabled:
plugin_stop = self.plugin_post_iteration(finding_max_rate=False, run_results=current_run_stats, **self.config.tunables)
lost_p_percentage = current_run_stats['drop_rate_percentage']
q_full_percentage = current_run_stats['queue_full_percentage']
valid_latency = current_run_stats['valid_latency']
current_run_stats['rate_difference'] = abs(current_run_stats['rate_p'] - self.opt_run_stats['rate_p'])
current_run_results.update(current_run_stats)
if self.config.verbose:
if q_full_percentage > self.config.q_full_resolution:
current_run_results.print_state("Queue Full Occurred", high_bound, low_bound)
elif lost_p_percentage > self.config.pdr:
current_run_results.print_state("Drops beyond Desired rate occurred", high_bound, low_bound)
elif not valid_latency:
current_run_results.print_state("Invalid Latency", high_bound, low_bound)
else:
current_run_results.print_state("Looking for NDR", high_bound, low_bound)
current_run_results.print_iteration_data()
if plugin_stop:
if self.config.verbose:
current_run_results.print_state("Plugin decided to stop after the iteration!", high_bound, low_bound)
self.update_opt_stats(current_run_stats)
break
if q_full_percentage <= self.config.q_full_resolution and lost_p_percentage <= self.config.pdr and valid_latency:
if current_run_stats['rate_p'] > self.opt_run_stats['rate_p']:
self.opt_run_stats.update(current_run_stats)
if current_run_stats['rate_difference'] <= self.config.pdr_error:
break
low_bound = current_run_stats['rate_p']
current_run_stats['iteration'] += 1
if self.max_iterations_reached(current_run_stats, high_bound, low_bound):
break
else:
continue
else:
break
else:
if current_run_stats['rate_difference'] <= self.config.pdr_error:
break
high_bound = current_run_stats['rate_p']
current_run_stats['iteration'] += 1
if self.max_iterations_reached(current_run_stats, high_bound, low_bound):
break
self.opt_run_stats['iteration'] = current_run_stats['iteration']
self.opt_run_stats['total_iterations'] = current_run_stats['total_iterations']
self.opt_run_stats['rate_difference'] = 0
return self.opt_run_stats
def find_ndr(self):
"""
Finds the NDR of the STL client that the class received. The function updates the :class:`.NdrBenchResults` object that
:class:`.NdrBench` contains. Decisions which algorithms to choose or if to apply a plugin are based on the object of type
:class:`.NdrBenchConfig` that this class contains.
This is the top level function that finds the NDR and the user can use.
"""
plugin_enabled = self.config.plugin_enabled
plugin_stop = False
if plugin_enabled:
self.plugin_pre_iteration(finding_max_rate=True, run_results=None, **self.config.tunables)
first_run_results = self.__find_max_rate()
self.update_opt_stats(first_run_results)
if plugin_enabled:
plugin_stop = self.plugin_post_iteration(finding_max_rate=True, run_results=first_run_results, **self.config.tunables)
if plugin_stop:
if self.config.verbose:
self.results.print_state("Plugin decided to stop after trying to find the max rate!", None, None)
self.calculate_ndr_points()
return
drop_percent = self.results.stats['drop_rate_percentage']
q_full_percent = self.results.stats['queue_full_percentage']
valid_latency = self.results.stats['valid_latency']
if drop_percent > self.config.pdr:
if self.config.opt_binary_search:
if self.config.verbose:
self.results.print_state("Drops happened, searching for NDR with optimized binary search", None, None)
self.results.update(self.optimized_binary_search(drop_percent, self.config.pdr, 'drop_rate_percentage'))
else:
if self.config.verbose:
self.results.print_state("Drops happened, searching for NDR", 100, 0)
self.results.update(self.perf_run_interval(100, 0))
elif q_full_percent >= self.config.q_full_resolution:
if self.config.opt_binary_search:
if self.config.verbose:
self.results.print_state("DUT Queue is Full, Looking for no queue full rate with optimized binary search", None, None)
self.results.update(self.optimized_binary_search(q_full_percent, self.config.q_full_resolution, 'queue_full_percentage'))
else:
if self.config.verbose:
self.results.print_state("DUT Queue is Full, Looking for no queue full rate", 100, 0)
self.results.update(self.perf_run_interval(100.00, 0.00))
elif not valid_latency:
if self.config.opt_binary_search:
if self.config.verbose:
self.results.print_state("Invalid Latency, looking for NDR latency with optimized binary search", None, None)
self.results.update(self.optimized_binary_search(q_full_percent, 0, 'valid_latency'))
else:
if self.config.verbose:
self.results.print_state("Invalid Latency, Looking for latency below tolerance", None, None)
self.results.update(self.perf_run_interval(100.00, 0.00))
else:
if self.config.verbose:
self.results.print_state("NDR found at max rate", None, None)
self.calculate_ndr_points()
if __name__ == '__main__':
print('Designed to be imported, not as stand-alone script.')
|
py | b40c214c00e091a3ba96ebbfcdfe8b88dd3013f7 | import requests
from lxml.html import fromstring
def Get_Headers():
url = 'https://www.whatismybrowser.com/guides/the-latest-user-agent/chrome'
response = requests.get(url)
parser = fromstring(response.text)
Headers = parser.xpath('//tbody//td[2]/span/text()')
return Headers[0]
print(Get_Headers()) |
py | b40c221e063a5f55aed65e3693226dad19d8efa9 | from sqlalchemy import and_
from .db import models
from openstates.metadata import lookup
def jurisdiction_filter(j: str, *, jid_field):
if not j:
# an empty object can't equal anything
return False
# check either by Jurisdiction.name or a specified field's jurisdiction_id
if len(j) == 2:
try:
return jid_field == lookup(abbr=j).jurisdiction_id
except KeyError:
return and_(
models.Jurisdiction.name == j,
models.Jurisdiction.classification == "state",
)
elif j.startswith("ocd-jurisdiction"):
return jid_field == j
else:
return and_(
models.Jurisdiction.name == j, models.Jurisdiction.classification == "state"
)
|
py | b40c229a64ed32dd0b706bf75cdefa9a4b10414b | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# Test numpy bindings
import numpy as np
from ....testing import *
from .. import (c_types, fff_type, npy_type, copy_vector,
pass_vector, pass_vector_via_iterator)
MAX_TEST_SIZE = 30
def random_shape(size):
"""
Output random dimensions in the range (2, MAX_TEST_SIZE)
"""
aux = np.random.randint(MAX_TEST_SIZE-1, size=size) + 2
if size==1:
return aux
else:
return tuple(aux)
#
# Test type conversions
#
def test_type_conversions_to_fff():
# use np.sctypes for testing numpy types, np.typeDict.values
# contains a lot of duplicates. There are 140 values in
# np.typeDict, but only 21 unique numpy types. But only 11 fff
# types in c_types.
for type_key in np.sctypes:
for npy_t in np.sctypes[type_key]:
t, nbytes = fff_type(np.dtype(npy_t))
if not t == 'unknown type':
yield assert_equal, nbytes, np.dtype(npy_t).itemsize
def test_type_conversions_in_C():
for t in c_types:
npy_t, nbytes = npy_type(t)
yield assert_equal, npy_t, t
#
# Test bindings
#
def _test_copy_vector(x):
# use fff
y0 = copy_vector(x, 0)
# use numpy
y1 = copy_vector(x, 1)
yield assert_equal, y0, x
yield assert_equal, y1, x
def test_copy_vector_contiguous():
x = (1000*np.random.rand(1e6)).astype('int32')
_test_copy_vector(x)
def test_copy_vector_strided():
x0 = (1000*np.random.rand(2e6)).astype('int32')
x = x0[::2]
_test_copy_vector(x)
"""
def test_copy_vector_int32():
x = np.random.rand(1e6).astype('int32')
print('int32 buffer copy')
_test_copy_vector(x)
def test_copy_vector_uint8():
x = np.random.rand(1e6).astype('uint8')
print('uint8 buffer copy')
_test_copy_vector(x)
"""
def _test_pass_vector(x):
y = pass_vector(x)
assert_equal(y, x)
def test_pass_vector():
x = np.random.rand(random_shape(1))-.5
_test_pass_vector(x)
def test_pass_vector_int32():
x = (1000*(np.random.rand(random_shape(1))-.5)).astype('int32')
_test_pass_vector(x)
def test_pass_vector_uint8():
x = (256*(np.random.rand(random_shape(1)))).astype('uint8')
_test_pass_vector(x)
def _test_pass_matrix(x):
y = pass_matrix(x)
yield assert_equal, y, x
y = pass_matrix(x.T)
yield assert_equal, y, x.T
def test_pass_matrix():
d0, d1 = random_shape(2)
x = np.random.rand(d0, d1)-.5
_test_pass_matrix(x)
def test_pass_matrix_int32():
d0, d1 = random_shape(2)
x = (1000*(np.random.rand(d0, d1)-.5)).astype('int32')
_test_pass_matrix(x)
def test_pass_matrix_uint8():
d0, d1 = random_shape(2)
x = (256*(np.random.rand(d0, d1))).astype('uint8')
_test_pass_matrix(x)
def _test_pass_array(x):
y = pass_array(x)
yield assert_equal, y, x
y = pass_array(x.T)
yield assert_equal, y, x.T
def test_pass_array():
d0, d1, d2, d3 = random_shape(4)
x = np.random.rand(d0, d1, d2, d3)-.5
_test_pass_array(x)
def test_pass_array_int32():
d0, d1, d2, d3 = random_shape(4)
x = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32')
_test_pass_array(x)
def test_pass_array_uint8():
d0, d1, d2, d3 = random_shape(4)
x = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8')
_test_pass_array(x)
#
# Multi-iterator testing
#
def _test_pass_vector_via_iterator(X, pos=0):
"""
Assume X.ndim == 2
"""
# axis == 0
x = pass_vector_via_iterator(X, axis=0, niters=pos)
yield assert_equal, x, X[:, pos]
# axis == 1
x = pass_vector_via_iterator(X, axis=1, niters=pos)
yield assert_equal, x, X[pos, :]
def test_pass_vector_via_iterator():
d0, d1 = random_shape(2)
X = np.random.rand(d0, d1)-.5
_test_pass_vector_via_iterator(X)
def test_pass_vector_via_iterator_int32():
d0, d1 = random_shape(2)
X = (1000*(np.random.rand(d0, d1)-.5)).astype('int32')
_test_pass_vector_via_iterator(X)
def test_pass_vector_via_iterator_uint8():
d0, d1 = random_shape(2)
X = (100*(np.random.rand(d0, d1))).astype('uint8')
_test_pass_vector_via_iterator(X)
def test_pass_vector_via_iterator_shift():
d0, d1 = random_shape(2)
X = np.random.rand(d0, d1)-.5
_test_pass_vector_via_iterator(X, pos=1)
def test_pass_vector_via_iterator_shift_int32():
d0, d1 = random_shape(2)
X = (1000*(np.random.rand(d0, d1)-.5)).astype('int32')
_test_pass_vector_via_iterator(X, pos=1)
def test_pass_vector_via_iterator_shift_uint8():
d0, d1 = random_shape(2)
X = (100*(np.random.rand(d0, d1))).astype('uint8')
_test_pass_vector_via_iterator(X, pos=1)
def _test_copy_via_iterators(Y):
for axis in range(4):
Z = copy_via_iterators(Y, axis)
yield assert_equal, Z, Y
ZT = copy_via_iterators(Y.T, axis)
yield assert_equal, ZT, Y.T
def test_copy_via_iterators():
d0, d1, d2, d3 = random_shape(4)
Y = np.random.rand(d0, d1, d2, d3)
_test_copy_via_iterators(Y)
def test_copy_via_iterators_int32():
d0, d1, d2, d3 = random_shape(4)
Y = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32')
_test_copy_via_iterators(Y)
def test_copy_via_iterators_uint8():
d0, d1, d2, d3 = random_shape(4)
Y = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8')
_test_copy_via_iterators(Y)
def _test_sum_via_iterators(Y):
for axis in range(4):
Z = sum_via_iterators(Y, axis)
yield assert_almost_equal, Z, Y.sum(axis)
ZT = sum_via_iterators(Y.T, axis)
yield assert_almost_equal, ZT, Y.T.sum(axis)
def test_sum_via_iterators():
d0, d1, d2, d3 = random_shape(4)
Y = np.random.rand(d0, d1, d2, d3)
_test_sum_via_iterators(Y)
def test_sum_via_iterators_int32():
d0, d1, d2, d3 = random_shape(4)
Y = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32')
_test_sum_via_iterators(Y)
def test_sum_via_iterators_uint8():
d0, d1, d2, d3 = random_shape(4)
Y = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8')
_test_sum_via_iterators(Y)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
py | b40c231501f77fb9ca6e4fda135a869e8b74eeef | # Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""UI Color Styles for ConsoleApp."""
import logging
from dataclasses import dataclass
from prompt_toolkit.styles import Style
from prompt_toolkit.filters import has_focus
_LOG = logging.getLogger(__package__)
@dataclass
class HighContrastDarkColors:
# pylint: disable=too-many-instance-attributes
default_bg = '#100f10'
default_fg = '#ffffff'
dim_bg = '#000000'
dim_fg = '#e0e6f0'
button_active_bg = '#4e4e4e'
button_inactive_bg = '#323232'
active_bg = '#323232'
active_fg = '#f4f4f4'
inactive_bg = '#1e1e1e'
inactive_fg = '#bfc0c4'
line_highlight_bg = '#2f2f2f'
dialog_bg = '#3c3c3c'
red_accent = '#ffc0bf'
orange_accent = '#f5ca80'
yellow_accent = '#d2e580'
green_accent = '#88ef88'
cyan_accent = '#60e7e0'
blue_accent = '#92d9ff'
purple_accent = '#cfcaff'
magenta_accent = '#ffb8ff'
@dataclass
class DarkColors:
# pylint: disable=too-many-instance-attributes
default_bg = '#2e2e2e'
default_fg = '#bbc2cf'
dim_bg = '#262626'
dim_fg = '#dfdfdf'
button_active_bg = '#626262'
button_inactive_bg = '#525252'
active_bg = '#525252'
active_fg = '#dfdfdf'
inactive_bg = '#3f3f3f'
inactive_fg = '#bfbfbf'
line_highlight_bg = '#1e1e1e'
dialog_bg = '#3c3c3c'
red_accent = '#ff6c6b'
orange_accent = '#da8548'
yellow_accent = '#ffcc66'
green_accent = '#98be65'
cyan_accent = '#66cccc'
blue_accent = '#6699cc'
purple_accent = '#a9a1e1'
magenta_accent = '#c678dd'
@dataclass
class NordColors:
# pylint: disable=too-many-instance-attributes
default_bg = '#2e3440'
default_fg = '#eceff4'
dim_bg = '#272c36'
dim_fg = '#e5e9f0'
button_active_bg = '#4c566a'
button_inactive_bg = '#434c5e'
active_bg = '#434c5e'
active_fg = '#eceff4'
inactive_bg = '#373e4c'
inactive_fg = '#d8dee9'
line_highlight_bg = '#191c25'
dialog_bg = '#2c333f'
red_accent = '#bf616a'
orange_accent = '#d08770'
yellow_accent = '#ebcb8b'
green_accent = '#a3be8c'
cyan_accent = '#88c0d0'
blue_accent = '#81a1c1'
purple_accent = '#a9a1e1'
magenta_accent = '#b48ead'
@dataclass
class NordLightColors:
# pylint: disable=too-many-instance-attributes
default_bg = '#e5e9f0'
default_fg = '#3b4252'
dim_bg = '#d8dee9'
dim_fg = '#2e3440'
button_active_bg = '#aebacf'
button_inactive_bg = '#b8c5db'
active_bg = '#b8c5db'
active_fg = '#3b4252'
inactive_bg = '#c2d0e7'
inactive_fg = '#60728c'
line_highlight_bg = '#f0f4fc'
dialog_bg = '#d8dee9'
red_accent = '#99324b'
orange_accent = '#ac4426'
yellow_accent = '#9a7500'
green_accent = '#4f894c'
cyan_accent = '#398eac'
blue_accent = '#3b6ea8'
purple_accent = '#842879'
magenta_accent = '#97365b'
@dataclass
class MoonlightColors:
# pylint: disable=too-many-instance-attributes
default_bg = '#212337'
default_fg = '#c8d3f5'
dim_bg = '#191a2a'
dim_fg = '#b4c2f0'
button_active_bg = '#444a73'
button_inactive_bg = '#2f334d'
active_bg = '#2f334d'
active_fg = '#c8d3f5'
inactive_bg = '#222436'
inactive_fg = '#a9b8e8'
line_highlight_bg = '#383e5c'
dialog_bg = '#1e2030'
red_accent = '#d95468'
orange_accent = '#d98e48'
yellow_accent = '#8bd49c'
green_accent = '#ebbf83'
cyan_accent = '#70e1e8'
blue_accent = '#5ec4ff'
purple_accent = '#b62d65'
magenta_accent = '#e27e8d'
_THEME_NAME_MAPPING = {
'moonlight': MoonlightColors(),
'nord': NordColors(),
'nord-light': NordLightColors(),
'dark': DarkColors(),
'high-contrast-dark': HighContrastDarkColors(),
} # yapf: disable
def get_theme_colors(theme_name=''):
theme = _THEME_NAME_MAPPING.get(theme_name, DarkColors())
return theme
def generate_styles(theme_name='dark'):
"""Return prompt_toolkit styles for the given theme name."""
# Use DarkColors() if name not found.
theme = _THEME_NAME_MAPPING.get(theme_name, DarkColors())
pw_console_styles = {
# Default text and background.
'default': 'bg:{} {}'.format(theme.default_bg, theme.default_fg),
# Dim inactive panes.
'pane_inactive': 'bg:{} {}'.format(theme.dim_bg, theme.dim_fg),
# Use default for active panes.
'pane_active': 'bg:{} {}'.format(theme.default_bg, theme.default_fg),
# Brighten active pane toolbars.
'toolbar_active': 'bg:{} {}'.format(theme.active_bg, theme.active_fg),
'toolbar_inactive': 'bg:{} {}'.format(theme.inactive_bg,
theme.inactive_fg),
# Dimmer toolbar.
'toolbar_dim_active': 'bg:{} {}'.format(theme.active_bg,
theme.active_fg),
'toolbar_dim_inactive': 'bg:{} {}'.format(theme.default_bg,
theme.inactive_fg),
# Used for pane titles
'toolbar_accent': theme.cyan_accent,
'toolbar-button-decoration': '{}'.format(theme.cyan_accent),
'toolbar-setting-active': 'bg:{} {}'.format(
theme.green_accent,
theme.active_bg,
),
'toolbar-button-active': 'bg:{}'.format(theme.button_active_bg),
'toolbar-button-inactive': 'bg:{}'.format(theme.button_inactive_bg),
# prompt_toolkit scrollbar styles:
'scrollbar.background': 'bg:{} {}'.format(theme.default_bg,
theme.default_fg),
# Scrollbar handle, bg is the bar color.
'scrollbar.button': 'bg:{} {}'.format(theme.purple_accent,
theme.default_bg),
'scrollbar.arrow': 'bg:{} {}'.format(theme.default_bg,
theme.blue_accent),
# Unstyled scrollbar classes:
# 'scrollbar.start'
# 'scrollbar.end'
# Top menu bar styles
'menu-bar': 'bg:{} {}'.format(theme.inactive_bg, theme.inactive_fg),
'menu-bar.selected-item': 'bg:{} {}'.format(theme.blue_accent,
theme.inactive_bg),
# Menu background
'menu': 'bg:{} {}'.format(theme.dialog_bg, theme.dim_fg),
# Menu item separator
'menu-border': theme.magenta_accent,
# Top bar logo + keyboard shortcuts
'logo': '{} bold'.format(theme.magenta_accent),
'keybind': '{} bold'.format(theme.purple_accent),
'keyhelp': theme.dim_fg,
# Help window styles
'help_window_content': 'bg:{} {}'.format(theme.dialog_bg, theme.dim_fg),
'frame.border': 'bg:{} {}'.format(theme.dialog_bg, theme.purple_accent),
'pane_indicator_active': 'bg:{}'.format(theme.magenta_accent),
'pane_indicator_inactive': 'bg:{}'.format(theme.inactive_bg),
'pane_title_active': '{} bold'.format(theme.magenta_accent),
'pane_title_inactive': '{}'.format(theme.purple_accent),
'window-tab-active': 'bg:{} {}'.format(theme.active_bg,
theme.cyan_accent),
'window-tab-inactive': 'bg:{} {}'.format(theme.inactive_bg,
theme.inactive_fg),
'pane_separator': 'bg:{} {}'.format(theme.default_bg,
theme.purple_accent),
# Search matches
'search': 'bg:{} {}'.format(theme.cyan_accent, theme.default_bg),
'search.current': 'bg:{} {}'.format(theme.cyan_accent,
theme.default_bg),
# Highlighted line styles
'selected-log-line': 'bg:{}'.format(theme.line_highlight_bg),
'cursor-line': 'bg:{} nounderline'.format(theme.line_highlight_bg),
# Messages like 'Window too small'
'warning-text': 'bg:{} {}'.format(theme.default_bg,
theme.yellow_accent),
'log-time': 'bg:{} {}'.format(theme.default_fg,
theme.default_bg),
# Apply foreground only for level and column values. This way the text
# can inherit the background color of the parent window pane or line
# selection.
'log-level-{}'.format(logging.CRITICAL): '{} bold'.format(
theme.red_accent),
'log-level-{}'.format(logging.ERROR): '{}'.format(theme.red_accent),
'log-level-{}'.format(logging.WARNING): '{}'.format(
theme.yellow_accent),
'log-level-{}'.format(logging.INFO): '{}'.format(theme.purple_accent),
'log-level-{}'.format(logging.DEBUG): '{}'.format(theme.blue_accent),
'log-table-column-0': '{}'.format(theme.cyan_accent),
'log-table-column-1': '{}'.format(theme.green_accent),
'log-table-column-2': '{}'.format(theme.yellow_accent),
'log-table-column-3': '{}'.format(theme.magenta_accent),
'log-table-column-4': '{}'.format(theme.purple_accent),
'log-table-column-5': '{}'.format(theme.blue_accent),
'log-table-column-6': '{}'.format(theme.orange_accent),
'log-table-column-7': '{}'.format(theme.red_accent),
'search-bar-title': 'bg:{} {}'.format(theme.cyan_accent,
theme.default_bg),
'search-bar-setting': '{}'.format(theme.cyan_accent),
'search-bar': 'bg:{}'.format(theme.inactive_bg),
'filter-bar-title': 'bg:{} {}'.format(theme.red_accent,
theme.default_bg),
'filter-bar-setting': '{}'.format(theme.cyan_accent),
'filter-bar-delete': '{}'.format(theme.red_accent),
'filter-bar': 'bg:{}'.format(theme.inactive_bg),
'filter-bar-delimiter': '{}'.format(theme.purple_accent),
# Progress Bar Styles
# Entire set of ProgressBars - no title is used in pw_console
'title': '',
# Actual bar title
'label': 'bold',
'percentage': '{}'.format(theme.green_accent),
'bar': '{}'.format(theme.magenta_accent),
# Filled part of the bar
'bar-a': '{} bold'.format(theme.cyan_accent),
# End of current progress
'bar-b': '{} bold'.format(theme.purple_accent),
# Empty part of the bar
'bar-c': '',
# current/total counts
'current': '{}'.format(theme.cyan_accent),
'total': '{}'.format(theme.cyan_accent),
'time-elapsed': '{}'.format(theme.purple_accent),
'time-left': '{}'.format(theme.magenta_accent),
# Named theme color classes for use in user plugins.
'theme-fg-red': '{}'.format(theme.red_accent),
'theme-fg-orange': '{}'.format(theme.orange_accent),
'theme-fg-yellow': '{}'.format(theme.yellow_accent),
'theme-fg-green': '{}'.format(theme.green_accent),
'theme-fg-cyan': '{}'.format(theme.cyan_accent),
'theme-fg-blue': '{}'.format(theme.blue_accent),
'theme-fg-purple': '{}'.format(theme.purple_accent),
'theme-fg-magenta': '{}'.format(theme.magenta_accent),
'theme-bg-red': 'bg:{}'.format(theme.red_accent),
'theme-bg-orange': 'bg:{}'.format(theme.orange_accent),
'theme-bg-yellow': 'bg:{}'.format(theme.yellow_accent),
'theme-bg-green': 'bg:{}'.format(theme.green_accent),
'theme-bg-cyan': 'bg:{}'.format(theme.cyan_accent),
'theme-bg-blue': 'bg:{}'.format(theme.blue_accent),
'theme-bg-purple': 'bg:{}'.format(theme.purple_accent),
'theme-bg-magenta': 'bg:{}'.format(theme.magenta_accent),
'theme-bg-active': 'bg:{}'.format(theme.active_bg),
'theme-fg-active': '{}'.format(theme.active_fg),
'theme-bg-inactive': 'bg:{}'.format(theme.inactive_bg),
'theme-fg-inactive': '{}'.format(theme.inactive_fg),
'theme-fg-default': '{}'.format(theme.default_fg),
'theme-bg-default': 'bg:{}'.format(theme.default_bg),
'theme-fg-dim': '{}'.format(theme.dim_fg),
'theme-bg-dim': 'bg:{}'.format(theme.dim_bg),
'theme-bg-dialog': 'bg:{}'.format(theme.dialog_bg),
'theme-bg-line-highlight': 'bg:{}'.format(theme.line_highlight_bg),
'theme-bg-button-active': 'bg:{}'.format(theme.button_active_bg),
'theme-bg-button-inactive': 'bg:{}'.format(theme.button_inactive_bg),
} # yapf: disable
return Style.from_dict(pw_console_styles)
def get_toolbar_style(pt_container, dim=False) -> str:
"""Return the style class for a toolbar if pt_container is in focus."""
if has_focus(pt_container.__pt_container__())():
return 'class:toolbar_dim_active' if dim else 'class:toolbar_active'
return 'class:toolbar_dim_inactive' if dim else 'class:toolbar_inactive'
def get_button_style(pt_container) -> str:
"""Return the style class for a toolbar if pt_container is in focus."""
if has_focus(pt_container.__pt_container__())():
return 'class:toolbar-button-active'
return 'class:toolbar-button-inactive'
def get_pane_style(pt_container) -> str:
"""Return the style class for a pane title if pt_container is in focus."""
if has_focus(pt_container.__pt_container__())():
return 'class:pane_active'
return 'class:pane_inactive'
def get_pane_indicator(pt_container,
title,
mouse_handler=None,
hide_indicator=False):
"""Return formatted text for a pane indicator and title."""
if mouse_handler:
inactive_indicator = ('class:pane_indicator_inactive', ' ',
mouse_handler)
active_indicator = ('class:pane_indicator_active', ' ', mouse_handler)
inactive_title = ('class:pane_title_inactive', title, mouse_handler)
active_title = ('class:pane_title_active', title, mouse_handler)
else:
inactive_indicator = ('class:pane_indicator_inactive', ' ')
active_indicator = ('class:pane_indicator_active', ' ')
inactive_title = ('class:pane_title_inactive', title)
active_title = ('class:pane_title_active', title)
fragments = []
if has_focus(pt_container.__pt_container__())():
if not hide_indicator:
fragments.append(active_indicator)
fragments.append(active_title)
else:
if not hide_indicator:
fragments.append(inactive_indicator)
fragments.append(inactive_title)
return fragments
|
py | b40c23b5ad8fc07a3e58d9bc78d7d486cea525c1 | #
# This file is part of Evergreen. See the NOTICE for more information.
#
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
import _jega
from jega.event import Event
from jega.locks import Condition, Lock
# from jega.log import log
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + _time()
with _AcquireFutures(fs):
finished = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - _time()
if wait_timeout < 0:
raise TimeoutError('%d (of %d) futures unfinished' % (len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A 2-tuple of sets. The first set, contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, contains uncompleted futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return (done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done if not f.cancelled() and f.exception() is not None):
return (done, not_done)
if len(done) == len(fs):
return (done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return (done, set(fs) - done)
class Future(object):
def __init__(self):
self._condition = Condition()
self._state = PENDING
self._used = False
self._result = None
self._exception = None
self._callbacks = []
self._waiters = []
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
text = 'raised %s' % self._exception.__class__.__name__
else:
text = 'returned %s' % self._result.__class__.__name__
return '<%s at %s state=%s %s>' % (
self.__class__.__name__,
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
text)
return '<%s at %s state=%s>' % (
self.__class__.__name__,
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
with self._condition:
if self._state in (RUNNING, FINISHED):
return False
elif self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
return True
self._state = CANCELLED
self._condition.notify_all()
self._run_callbacks()
return True
@property
def cancelled(self):
with self._condition:
return self._state in (CANCELLED, CANCELLED_AND_NOTIFIED)
@property
def running(self):
with self._condition:
return self._state == RUNNING
@property
def done(self):
with self._condition:
return self._state in (CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED)
def result(self, timeout=0):
with self._condition:
if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
raise CancelledError()
elif self._state == FINISHED:
return self._get_result()
self._condition.wait(timeout)
if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
raise CancelledError()
elif self._state == FINISHED:
return self._get_result()
else:
raise TimeoutError()
get = result
def exception(self, timeout=None):
with self._condition:
if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
def add_done_callback(self, func):
with self._condition:
if self._state not in (CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED):
self._callbacks.append(func)
return
func(self)
# Internal
def _get_result(self):
if self._used:
raise RuntimeError('the result of a futue can only be fetched once')
try:
if self._exception:
raise self._exception
else:
return self._result
finally:
self._used = True
self._exception = self._result = None
def _run_callbacks(self):
for cb in self._callbacks:
try:
cb(self)
except Exception:
pass
# log.exception('exception calling callback for %r', self)
self._callbacks = []
def set_running_or_notify_cancel(self):
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
raise RuntimeError('Future in unexpected state: %s' % self._state)
def set_result(self, result):
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._run_callbacks()
def set_exception(self, exception):
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._run_callbacks()
class Executor(object):
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + _time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.get()
else:
yield future.get(end_time - _time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
class InfiniteHandler(object):
"""Helper class to create a handler that keeps the loop alive until
it's cancelled.
"""
def __init__(self, loop):
self.loop = loop
self._cb()
def _cb(self):
self.handler = self.loop.call_later(24*3600, self._cb)
def cancel(self):
if self.handler:
self.handler.cancel()
self.handler = self.loop = None
|
py | b40c23bf2cb44ec6a28c823a9afdcbae54929668 | '''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Reducer/stats_of_columns.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Reducer/stats_of_columns.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Reducer/stats_of_columns.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Reducer/stats_of_columns.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# Load US cenus data as a FeatureCollection.
census = ee.FeatureCollection('TIGER/2010/Blocks')
# Filter the collection to include only Benton County, OR.
benton = census.filter(
ee.Filter.And(
ee.Filter.eq('statefp10', '41'),
ee.Filter.eq('countyfp10', '003')
)
)
# Display Benton County cenus blocks.
Map.setCenter(-123.27, 44.57, 13)
Map.addLayer(ee.Image().paint(benton, 1, 3), {}, 'Benten County, OR')
# Compute sums of the specified properties.
properties = ['pop10', 'housing10']
sums = benton \
.filter(ee.Filter.notNull(properties)) \
.reduceColumns(**{
'reducer': ee.Reducer.sum().repeat(2),
'selectors': properties
})
# Print the resultant Dictionary.
print(sums.getInfo())
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.