ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4519e3f231fa81b6f05146214db1788212d7fc | # Copyright 2011 Nicolas Maupu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Package acm.funcitonal
class curry(object):
'''Class to currify a function'''
def __init__(*args, **kw):
self = args[0]
self.fn, self.args, self.kw = (args[1], args[2:], kw)
def __call__(self, *args, **kw):
if kw and self.kw:
d = self.kw.copy()
d.update(kw)
else:
d = kw or self.kw
return self.fn(*(self.args + args), **d)
|
py | 1a451b07f6aed4d98771d9ddb311250b88daef8a | #!/usr/bin/env python
from collections import defaultdict, namedtuple
import sys
import re
import os
import random
from itertools import chain
import extractor_util as util
import data_util as dutil
import config
# This defines the Row object that we read in to the extractor
parser = util.RowParser([
('doc_id', 'text'),
('section_id', 'text'),
('sent_id', 'int'),
('words', 'text[]'),
('lemmas', 'text[]'),
('poses', 'text[]'),
('ners', 'text[]'),
('pa_abbrevs', 'text[]'),
('pheno_entities', 'text[]'),
('pa_section_ids', 'text[]'),
('pa_sent_ids', 'int[]')])
ExpandedRow = namedtuple('ExpandedRow', [
'doc_id',
'section_id',
'sent_id',
'words',
'lemmas',
'poses',
'ners',
'pa_abbrev',
'pheno_entity',
'pa_section_id',
'pa_sent_id'])
# This defines the output Mention object
Mention = namedtuple('Mention', [
'dd_id',
'doc_id',
'section_id',
'sent_id',
'wordidxs',
'mention_id',
'mention_supertype',
'mention_subtype',
'entity',
'words',
'is_correct'])
def expand_array_rows(array_row):
for i, pa_abbrev in enumerate(array_row.pa_abbrevs):
row = ExpandedRow(doc_id = array_row.doc_id,
section_id = array_row.section_id,
sent_id = array_row.sent_id,
words = array_row.words,
lemmas = array_row.lemmas,
poses = array_row.poses,
ners = array_row.ners,
pa_abbrev = pa_abbrev,
pheno_entity = array_row.pheno_entities[i],
pa_section_id = array_row.pa_section_ids[i],
pa_sent_id = array_row.pa_sent_ids[i])
yield row
### CANDIDATE EXTRACTION ###
SR = config.PHENO_ACRONYMS['SR']
def extract_candidate_mentions(row):
"""Extracts candidate phenotype mentions from an input row object"""
mentions = []
for i, word in enumerate(row.words):
if word == row.pa_abbrev:
mention_id = '%s_%s_%d_%d' % \
(row.doc_id, \
row.section_id, \
row.sent_id, \
i)
subtype = '%s_%s_%d_%s' % (row.doc_id, row.pa_section_id, row.pa_sent_id, row.pa_abbrev)
m = Mention(None, row.doc_id, row.section_id, row.sent_id,
[i], mention_id, "ABBREV", subtype, row.pheno_entity,
[word], True)
mentions.append(m)
return mentions
def generate_rand_negatives(row, pos, neg):
mentions = []
for i, word in enumerate(row.words):
if neg >= pos:
break
if word == row.pa_abbrev:
continue
if word.isupper() and word.strip() != '-LRB-' and word.strip() != '-RRB-':
mention_id = '%s_%s_%d_%d' % \
(row.doc_id, \
row.section_id, \
row.sent_id, \
i)
subtype = '%s_%s_%d_%s' % (row.doc_id, row.pa_section_id, row.pa_sent_id, row.pa_abbrev)
m = Mention(None, row.doc_id, row.section_id, row.sent_id,
[i], mention_id, 'ABBREV_RAND_NEG', subtype, None, [word], False)
neg += 1
return mentions
if __name__ == '__main__':
onto_path = lambda p : '%s/onto/%s' % (os.environ['GDD_HOME'], p)
pos = 0
neg = 0
# Read TSV data in as Row objects
for line in sys.stdin:
array_row = parser.parse_tsv_row(line)
abbrevs = set()
for row in expand_array_rows(array_row):
if row.pa_abbrev in abbrevs:
continue
abbrevs.add(row.pa_abbrev)
# Skip row if sentence doesn't contain a verb, contains URL, etc.
if util.skip_row(row):
continue
# find candidate mentions & supervise
mentions = extract_candidate_mentions(row)
pos += len(mentions)
if SR.get('rand-negs'):
negs = generate_rand_negatives(row, pos, neg)
neg += len(negs)
mentions.extend(negs)
# print output
for mention in mentions:
util.print_tsv_output(mention)
#!/usr/bin/env python
|
py | 1a451b8c50443979193370f1e5bd9b62d75fb36f | import tempfile
import mmcv
import pytest
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import TDANNet
from mmedit.models.losses import MSELoss
def test_tdan_model():
model_cfg = dict(
type='TDAN',
generator=dict(
type='TDANNet',
in_channels=3,
mid_channels=64,
out_channels=3,
num_blocks_before_align=5,
num_blocks_after_align=10),
pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
lq_pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'),
)
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'TDAN'
assert isinstance(restorer.generator, TDANNet)
assert isinstance(restorer.pixel_loss, MSELoss)
# prepare data
inputs = torch.rand(1, 5, 3, 8, 8)
targets = torch.rand(1, 3, 32, 32)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters()))
}
# train_step
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 32, 32)
# test forward_dummy
with torch.no_grad():
output = restorer.forward_dummy(data_batch['lq'])
assert isinstance(output, tuple)
assert torch.is_tensor(output[0])
assert output[0].size() == (1, 3, 32, 32)
assert torch.is_tensor(output[1])
assert output[1].size() == (1, 5, 3, 8, 8)
# forward_test
with torch.no_grad():
outputs = restorer(**data_batch, test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.equal(outputs['gt'], data_batch['gt'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 32, 32)
with torch.no_grad():
outputs = restorer(inputs.cuda(), test_mode=True)
assert torch.equal(outputs['lq'], data_batch['lq'].cpu())
assert torch.is_tensor(outputs['output'])
assert outputs['output'].size() == (1, 3, 32, 32)
# test with metric and save image
if torch.cuda.is_available():
train_cfg = mmcv.ConfigDict(tsa_iter=1)
test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0)
test_cfg = mmcv.Config(test_cfg)
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'meta': [{
'gt_path': 'fake_path/fake_name.png',
'key': '000/00000000'
}]
}
restorer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
with pytest.raises(AssertionError):
# evaluation with metrics must have gt images
restorer(lq=inputs.cuda(), test_mode=True)
with tempfile.TemporaryDirectory() as tmpdir:
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=None)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
outputs = restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration=100)
assert isinstance(outputs, dict)
assert isinstance(outputs['eval_result'], dict)
assert isinstance(outputs['eval_result']['PSNR'], float)
assert isinstance(outputs['eval_result']['SSIM'], float)
with pytest.raises(ValueError):
# iteration should be number or None
restorer(
**data_batch,
test_mode=True,
save_image=True,
save_path=tmpdir,
iteration='100')
|
py | 1a451b956c7715175539edaddd879b5a20a1d52e | #!/usr/bin/env python3
import torch
from .kernel import Kernel
from ..lazy import delazify
from ..constraints import Positive
class ScaleKernel(Kernel):
r"""
Decorates an existing kernel object with an output scale, i.e.
.. math::
\begin{equation*}
K_{\text{scaled}} = \theta_\text{scale} K_{\text{orig}}
\end{equation*}
where :math:`\theta_\text{scale}` is the `outputscale` parameter.
In batch-mode (i.e. when :math:`x_1` and :math:`x_2` are batches of input matrices), each
batch of data can have its own `outputscale` parameter by setting the `batch_shape`
keyword argument to the appropriate number of batches.
.. note::
The :attr:`outputscale` parameter is parameterized on a log scale to constrain it to be positive.
You can set a prior on this parameter using the :attr:`outputscale_prior` argument.
Args:
:attr:`base_kernel` (Kernel):
The base kernel to be scaled.
:attr:`batch_shape` (int, optional):
Set this if you want a separate outputscale for each batch of input data. It should be `b`
if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`
:attr:`outputscale_prior` (Prior, optional): Set this if you want to apply a prior to the outputscale
parameter. Default: `None`
:attr:`outputscale_constraint` (Constraint, optional): Set this if you want to apply a constraint to the
outputscale parameter. Default: `Positive`.
Attributes:
:attr:`base_kernel` (Kernel):
The kernel module to be scaled.
:attr:`outputscale` (Tensor):
The outputscale parameter. Size/shape of parameter depends on the :attr:`batch_shape` arguments.
Example:
>>> x = torch.randn(10, 5)
>>> base_covar_module = gpytorch.kernels.RBFKernel()
>>> scaled_covar_module = gpytorch.kernels.ScaleKernel(base_covar_module)
>>> covar = scaled_covar_module(x) # Output: LazyTensor of size (10 x 10)
"""
def __init__(self, base_kernel, outputscale_prior=None, outputscale_constraint=None, **kwargs):
super(ScaleKernel, self).__init__(has_lengthscale=False, **kwargs)
if outputscale_constraint is None:
outputscale_constraint = Positive()
self.base_kernel = base_kernel
outputscale = torch.zeros(*self.batch_shape) if len(self.batch_shape) else torch.tensor(0.)
self.register_parameter(name="raw_outputscale", parameter=torch.nn.Parameter(outputscale))
if outputscale_prior is not None:
self.register_prior(
"outputscale_prior", outputscale_prior, lambda: self.outputscale, lambda v: self._set_outputscale(v)
)
self.register_constraint("raw_outputscale", outputscale_constraint)
@property
def outputscale(self):
return self.raw_outputscale_constraint.transform(self.raw_outputscale)
@outputscale.setter
def outputscale(self, value):
self._set_outputscale(value)
def _set_outputscale(self, value):
if not torch.is_tensor(value):
value = torch.as_tensor(value).to(self.raw_outputscale)
self.initialize(raw_outputscale=self.raw_outputscale_constraint.inverse_transform(value))
def forward(self, x1, x2, last_dim_is_batch=False, diag=False, **params):
orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
outputscales = self.outputscale
if last_dim_is_batch:
outputscales = outputscales.unsqueeze(-1)
if diag:
outputscales = outputscales.unsqueeze(-1)
return delazify(orig_output) * outputscales
else:
outputscales = outputscales.view(*outputscales.shape, 1, 1)
return orig_output.mul(outputscales)
def num_outputs_per_input(self, x1, x2):
return self.base_kernel.num_outputs_per_input(x1, x2)
|
py | 1a451bc77775025a894f3fe041080741a386f2c1 | """
Run FragileX data synapse detections
"""
import os
import sys
import pandas as pd
from at_synapse_detection import dataAccess as da
from at_synapse_detection import SynapseDetection as syn
from at_synapse_detection import antibodyAnalysis as aa
from at_synapse_detection import SynapseAnalysis as sa
import socket
import multiprocessing as mp
import copy
import numpy as np
def run_list_of_queries(mouse_number, mouse_project_str, sheet_name):
"""
run queries in a parallel manner
Parameters
-----------------
mouse_number : int
mouse_project_str : str
sheet_name : str
"""
output_foldername = 'results_' + sheet_name
query_fn = 'queries/' + mouse_project_str + '_queries.json'
data_location = '/Users/anish/Documents/yi_mice/' + \
str(mouse_number) + 'ss_stacks/'
hostname = socket.gethostname()
if hostname == 'Galicia':
data_location = '/data5TB/yi_mice/' + str(mouse_number) + 'ss_stacks'
dapi_mask_str_base = '/data5TB/yi_mice/dapi-masks/' + \
str(mouse_number) + 'ss_stacks'
print('Query Filename: ', query_fn)
print('Data Location: ', data_location)
print('OutputFoldername: ', output_foldername)
print('Sheetname: ', sheet_name)
listOfQueries = syn.loadQueriesJSON(query_fn)
resolution = {'res_xy_nm': 100, 'res_z_nm': 70}
region_name_base = 'F00'
thresh = 0.9
result_list = []
num_workers = mp.cpu_count() - 1
print(num_workers)
pool = mp.Pool(num_workers)
atet_inputs_list = []
mask_location_str = -1
queryID = 0
foldernames = []
for region_num in range(0, 4):
region_name = region_name_base + str(region_num)
data_region_location = os.path.join(data_location, region_name)
dapi_mask_str = os.path.join(dapi_mask_str_base, region_name)
for nQuery, query in enumerate(listOfQueries):
foldername = region_name + '-Q' + str(nQuery)
foldernames.append(foldername)
print(foldername)
mask_location_str = -1
#dapi_mask_str = -1
atet_input = {'query': query, 'queryID': queryID, 'nQuery': nQuery, 'resolution': resolution,
'data_region_location': data_region_location, 'data_location': data_location,
'output_foldername': output_foldername, 'region_name': region_name,
'mask_str': mask_location_str, 'dapi_mask_str': dapi_mask_str, 'mouse_number': mouse_number}
atet_inputs_list.append(atet_input)
queryID = queryID + 1
# Run processes
result_list = pool.map(sa.run_synapse_detection, atet_inputs_list)
pool.close()
pool.join()
print('Get process results from the output queue')
sorted_queryresult = sa.organize_result_lists(result_list)
mouse_df = sa.create_synapse_df(sorted_queryresult, foldernames)
print(mouse_df)
fn = sheet_name + '.xlsx'
df_list = [mouse_df]
aa.write_dfs_to_excel(df_list, sheet_name, fn)
def main():
if len(sys.argv) < 4:
print('Run All Combinations')
print(sys.argv)
# mouse_number = 2
# mouse_project_str = '2ss'
# sheet_name = '2ss_fragX'
# python run_fragX.py 4 '4ss_inhibitory' '4ss_inhibitory_fragX'
# run_list_of_queries(
# mouse_number=1, mouse_project_str='1ss_inhibitory', sheet_name='1ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=22, mouse_project_str='22ss_inhibitory', sheet_name='22ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=2, mouse_project_str='2ss_inhibitory', sheet_name='2ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=3, mouse_project_str='3ss_inhibitory', sheet_name='3ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=4, mouse_project_str='4ss_inhibitory', sheet_name='4ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=6, mouse_project_str='6ss_inhibitory', sheet_name='6ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=5, mouse_project_str='5ss_inhibitory', sheet_name='5ss_inhibitory_fragX')
# run_list_of_queries(
# mouse_number=7, mouse_project_str='7ss_inhibitory', sheet_name='7ss_inhibitory_fragX')
run_list_of_queries(
mouse_number=1, mouse_project_str='1ss', sheet_name='1ss_fragX')
run_list_of_queries(
mouse_number=22, mouse_project_str='22ss', sheet_name='22ss_fragX')
run_list_of_queries(
mouse_number=2, mouse_project_str='2ss', sheet_name='2ss_fragX')
run_list_of_queries(
mouse_number=3, mouse_project_str='3ss', sheet_name='3ss_fragX')
run_list_of_queries(
mouse_number=4, mouse_project_str='4ss', sheet_name='4ss_fragX')
run_list_of_queries(
mouse_number=6, mouse_project_str='6ss', sheet_name='6ss_fragX')
run_list_of_queries(
mouse_number=5, mouse_project_str='5ss', sheet_name='5ss_fragX')
run_list_of_queries(
mouse_number=7, mouse_project_str='7ss', sheet_name='7ss_fragX')
else:
print('we have arguments')
print(sys.argv)
mouse_number = sys.argv[1]
mouse_project_str = sys.argv[2]
sheet_name = sys.argv[3]
run_list_of_queries(mouse_number, mouse_project_str, sheet_name)
if __name__ == '__main__':
main()
|
py | 1a451bcebd6b16e5c91fd23bf151721911db224d | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.algo.enhancement.sna.variants.pandas import handover, subcontracting, working_together, jointactivities
|
py | 1a451c769571db35f7bcac1c165a96b15b9946c6 | from flask import request, Blueprint, Response
from werkzeug.utils import secure_filename
from models import article
from app import db
from models.article import Article, article_schema, articles_schema
import codecs
articleRoute = Blueprint("articleRoute", __name__)
@articleRoute.route("/article/create", methods=["POST"])
def add_article():
article.Article.title = request.form["title"]
article.Article.body = request.form["body"]
article.Article.author = request.form["author"]
article.Article.categoria = request.form["categoria"]
multimedia = request.files["multimedia"]
if not multimedia:
return "No picture uploaded", 400
filename = secure_filename(multimedia.filename)
article.Article.filename = filename
mimetype = multimedia.mimetype
article.Article.mimetype = mimetype
article.Article.data = multimedia
if not filename:
return "Bad Upload!!", 400
# data = article.Article(article.Article.filename, article.Article.data.read(), article.Article.mimetype)
new_article = article.Article(
article.Article.title,
article.Article.body,
article.Article.author,
article.Article.categoria,
article.Article.filename,
article.Article.data.read(),
article.Article.mimetype
)
db.session.add(new_article)
db.session.commit()
return article.article_schema.jsonify(new_article)
@articleRoute.route("/article/<int:idArticle>")
def get_article(idArticle):
returnable = db.session.query(Article).get(idArticle)
returnable.data = codecs.encode(returnable.data, 'base64').decode('utf-8')
base64 = f"data:{returnable.mimetype};base64,{returnable.data}"
returnable.data = base64
return article_schema.dump(returnable)
@articleRoute.route("/article/all")
def get_all_article():
returnable = Article.query.all()
for article in returnable:
article.data = codecs.encode(article.data, 'base64').decode('utf-8')
base64 = f"data:{article.mimetype};base64,{article.data}"
article.data = base64
return articles_schema.jsonify(returnable)
@articleRoute.route("/article/delete/<int:idArticle>", methods=["DELETE"])
def delete_article(idArticle):
returnable = Article.query.get_or_404(idArticle)
db.session.delete(returnable)
db.session.commit()
return "", 204
@articleRoute.route("/article/edit/<int:idArticle>", methods=["POST"])
def edit_article(idArticle):
returnable = Article.query.get_or_404(idArticle)
if "title" in request.form:
returnable.title = request.form["title"]
if "body" in request.form:
returnable.body = request.form["body"]
if "author" in request.form:
returnable.author = request.form["author"]
db.session.commit()
return article_schema.dump(returnable), 200
|
py | 1a451c98fae8e6f8535c15ec30b8d0991031e7cf | # -*- coding: utf-8 -*-
#
# childes_MI_project documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'childes_MI_project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'childes_mi_projectdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'childes_mi_project.tex',
u'childes_MI_project Documentation',
u"Tim Sainburg", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'childes_mi_project', u'childes_MI_project Documentation',
[u"Tim Sainburg"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'childes_mi_project', u'childes_MI_project Documentation',
u"Tim Sainburg", 'childes_MI_project',
'Information theoretic analysis of child language acquisition', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py | 1a451d873787945c8849043bdb17da11cbb3af33 | # Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Example of a PCI alias::
| [pci]
| alias = '{
| "name": "QuickAssist",
| "product_id": "0443",
| "vendor_id": "8086",
| "device_type": "type-PCI",
| "numa_policy": "legacy"
| }'
Aliases with the same name, device_type and numa_policy are ORed::
| [pci]
| alias = '{
| "name": "QuickAssist",
| "product_id": "0442",
| "vendor_id": "8086",
| "device_type": "type-PCI",
| }'
These two aliases define a device request meaning: vendor_id is "8086" and
product_id is "0442" or "0443".
"""
import jsonschema
from oslo_log import log as logging
from oslo_serialization import jsonutils
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import utils
LOG = logging.getLogger(__name__)
PCI_NET_TAG = 'physical_network'
PCI_TRUSTED_TAG = 'trusted'
PCI_DEVICE_TYPE_TAG = 'dev_type'
DEVICE_TYPE_FOR_VNIC_TYPE = {
network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF
}
CONF = nova.conf.CONF
_ALIAS_SCHEMA = {
"type": "object",
"additionalProperties": False,
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 256,
},
# TODO(stephenfin): This isn't used anywhere outside of tests and
# should probably be removed.
"capability_type": {
"type": "string",
"enum": ['pci'],
},
"product_id": {
"type": "string",
"pattern": utils.PCI_VENDOR_PATTERN,
},
"vendor_id": {
"type": "string",
"pattern": utils.PCI_VENDOR_PATTERN,
},
"device_type": {
"type": "string",
"enum": list(obj_fields.PciDeviceType.ALL),
},
"numa_policy": {
"type": "string",
"enum": list(obj_fields.PCINUMAAffinityPolicy.ALL),
},
},
"required": ["name"],
}
def _get_alias_from_config():
"""Parse and validate PCI aliases from the nova config.
:returns: A dictionary where the keys are device names and the values are
tuples of form ``(specs, numa_policy)``. ``specs`` is a list of PCI
device specs, while ``numa_policy`` describes the required NUMA
affinity of the device(s).
:raises: exception.PciInvalidAlias if two aliases with the same name have
different device types or different NUMA policies.
"""
jaliases = CONF.pci.alias
aliases = {} # map alias name to alias spec list
try:
for jsonspecs in jaliases:
spec = jsonutils.loads(jsonspecs)
jsonschema.validate(spec, _ALIAS_SCHEMA)
name = spec.pop('name').strip()
numa_policy = spec.pop('numa_policy', None)
if not numa_policy:
numa_policy = obj_fields.PCINUMAAffinityPolicy.LEGACY
dev_type = spec.pop('device_type', None)
if dev_type:
spec['dev_type'] = dev_type
if name not in aliases:
aliases[name] = (numa_policy, [spec])
continue
if aliases[name][0] != numa_policy:
reason = _("NUMA policy mismatch for alias '%s'") % name
raise exception.PciInvalidAlias(reason=reason)
if aliases[name][1][0]['dev_type'] != spec['dev_type']:
reason = _("Device type mismatch for alias '%s'") % name
raise exception.PciInvalidAlias(reason=reason)
aliases[name][1].append(spec)
except exception.PciInvalidAlias:
raise
except jsonschema.exceptions.ValidationError as exc:
raise exception.PciInvalidAlias(reason=exc.message)
except Exception as exc:
raise exception.PciInvalidAlias(reason=str(exc))
return aliases
def _translate_alias_to_requests(alias_spec, affinity_policy=None):
"""Generate complete pci requests from pci aliases in extra_spec."""
pci_aliases = _get_alias_from_config()
pci_requests = []
for name, count in [spec.split(':') for spec in alias_spec.split(',')]:
name = name.strip()
if name not in pci_aliases:
raise exception.PciRequestAliasNotDefined(alias=name)
count = int(count)
numa_policy, spec = pci_aliases[name]
policy = affinity_policy or numa_policy
# NOTE(gibi): InstancePCIRequest has a requester_id field that could
# be filled with the flavor.flavorid but currently there is no special
# handling for InstancePCIRequests created from the flavor. So it is
# left empty.
pci_requests.append(objects.InstancePCIRequest(
count=count,
spec=spec,
alias_name=name,
numa_policy=policy))
return pci_requests
def get_instance_pci_request_from_vif(context, instance, vif):
"""Given an Instance, return the PCI request associated
to the PCI device related to the given VIF (if any) on the
compute node the instance is currently running.
In this method we assume a VIF is associated with a PCI device
if 'pci_slot' attribute exists in the vif 'profile' dict.
:param context: security context
:param instance: instance object
:param vif: network VIF model object
:raises: raises PciRequestFromVIFNotFound if a pci device is requested
but not found on current host
:return: instance's PCIRequest object associated with the given VIF
or None if no PCI device is requested
"""
# Get PCI device address for VIF if exists
vif_pci_dev_addr = vif['profile'].get('pci_slot') \
if vif['profile'] else None
if not vif_pci_dev_addr:
return None
try:
cn_id = objects.ComputeNode.get_by_host_and_nodename(
context,
instance.host,
instance.node).id
except exception.NotFound:
LOG.warning("expected to find compute node with host %s "
"and node %s when getting instance PCI request "
"from VIF", instance.host, instance.node)
return None
# Find PCIDevice associated with vif_pci_dev_addr on the compute node
# the instance is running on.
found_pci_dev = None
for pci_dev in instance.pci_devices:
if (pci_dev.compute_node_id == cn_id and
pci_dev.address == vif_pci_dev_addr):
found_pci_dev = pci_dev
break
if not found_pci_dev:
return None
# Find PCIRequest associated with the given PCIDevice in instance
for pci_req in instance.pci_requests.requests:
if pci_req.request_id == found_pci_dev.request_id:
return pci_req
raise exception.PciRequestFromVIFNotFound(
pci_slot=vif_pci_dev_addr,
node_id=cn_id)
def get_pci_requests_from_flavor(flavor, affinity_policy=None):
"""Validate and return PCI requests.
The ``pci_passthrough:alias`` extra spec describes the flavor's PCI
requests. The extra spec's value is a comma-separated list of format
``alias_name_x:count, alias_name_y:count, ... ``, where ``alias_name`` is
defined in ``pci.alias`` configurations.
The flavor's requirement is translated into a PCI requests list. Each
entry in the list is an instance of nova.objects.InstancePCIRequests with
four keys/attributes.
- 'spec' states the PCI device properties requirement
- 'count' states the number of devices
- 'alias_name' (optional) is the corresponding alias definition name
- 'numa_policy' (optional) states the required NUMA affinity of the devices
For example, assume alias configuration is::
{
'vendor_id':'8086',
'device_id':'1502',
'name':'alias_1'
}
While flavor extra specs includes::
'pci_passthrough:alias': 'alias_1:2'
The returned ``pci_requests`` are::
[{
'count':2,
'specs': [{'vendor_id':'8086', 'device_id':'1502'}],
'alias_name': 'alias_1'
}]
:param flavor: The flavor to be checked
:param affinity_policy: pci numa affinity policy
:returns: A list of PCI requests
:rtype: nova.objects.InstancePCIRequests
:raises: exception.PciRequestAliasNotDefined if an invalid PCI alias is
provided
:raises: exception.PciInvalidAlias if the configuration contains invalid
aliases.
"""
pci_requests = []
if ('extra_specs' in flavor and
'pci_passthrough:alias' in flavor['extra_specs']):
pci_requests = _translate_alias_to_requests(
flavor['extra_specs']['pci_passthrough:alias'],
affinity_policy=affinity_policy)
return objects.InstancePCIRequests(requests=pci_requests)
|
py | 1a451db61370270cddadbf598dca59a54e01d934 | # Generated by Django 3.2 on 2021-04-08 03:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='Some article', max_length=50, unique=True)),
('text', models.TextField(max_length=255)),
('time_in', models.DateTimeField(auto_now_add=True)),
('rating', models.FloatField(default=0.0)),
('choosing', models.BooleanField(default=False)),
('category', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='search',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Author', models.CharField(default='Author', max_length=100)),
],
),
]
|
py | 1a451dc25f0cfa4e616cf5ffbe5ab277c7d9e055 | # encoding: utf-8
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from odps.df import DataFrame
from odps.ml import merge_data
from odps.ml.preprocess import *
from odps.ml.tests.base import MLTestBase, tn, ci_skip_case
IONOSPHERE_TABLE = tn('pyodps_test_ml_ionosphere')
IONOSPHERE_RANDOM_SAMPLE_TABLE = tn('pyodps_test_ml_iono_rand_sample')
IONOSPHERE_WEIGHTED_SAMPLE_TABLE = tn('pyodps_test_ml_iono_weight_sample')
IONOSPHERE_APPEND_ID_TABLE = tn('pyodps_test_ml_iono_append_id')
IONOSPHERE_MERGED_TABLE = tn('pyodps_test_ml_iono_merged')
IONOSPHERE_PRINCOMP_TABLE = tn('pyodps_test_ml_iono_princomp')
IONOSPHERE_ABNORMAL_TABLE = tn('pyodps_test_ml_iono_abnormal')
USER_ITEM_TABLE = tn('pyodps_test_ml_user_item')
USER_ITEM_UNPIVOT_TABLE = tn('pyodps_test_ml_unpivot_user_item')
class TestPreprocess(MLTestBase):
def setUp(self):
super(TestPreprocess, self).setUp()
self.create_ionosphere(IONOSPHERE_TABLE)
@ci_skip_case
def test_merge(self):
self.delete_table(IONOSPHERE_MERGED_TABLE)
ds = DataFrame(self.odps.get_table(IONOSPHERE_TABLE))
merged_df = merge_data(ds, ds, auto_rename=True)
merged_df.persist(IONOSPHERE_MERGED_TABLE)
assert self.odps.exist_table(IONOSPHERE_MERGED_TABLE)
@ci_skip_case
def test_sample(self):
self.delete_table(IONOSPHERE_WEIGHTED_SAMPLE_TABLE)
df = DataFrame(self.odps.get_table(IONOSPHERE_TABLE)).label_field('class')
df.sample(0.5, replace=True).persist(IONOSPHERE_RANDOM_SAMPLE_TABLE)
assert self.odps.exist_table(IONOSPHERE_RANDOM_SAMPLE_TABLE)
df['a01', 'a02', ((df.a05 + 1) / 2).rename('a05')].sample(0.5, prob_field='a05', replace=True).persist(
IONOSPHERE_WEIGHTED_SAMPLE_TABLE)
assert self.odps.exist_table(IONOSPHERE_WEIGHTED_SAMPLE_TABLE) |
py | 1a451df7df32e32b44fb6d1b4efc904cb56a81c5 | """
Aqualink API documentation
The Aqualink public API documentation # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import aqualink_sdk
from aqualink_sdk.model.update_site_application_dto import UpdateSiteApplicationDto
from aqualink_sdk.model.update_site_with_application_dto import UpdateSiteWithApplicationDto
globals()['UpdateSiteApplicationDto'] = UpdateSiteApplicationDto
globals()['UpdateSiteWithApplicationDto'] = UpdateSiteWithApplicationDto
from aqualink_sdk.model.inline_object import InlineObject
class TestInlineObject(unittest.TestCase):
"""InlineObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineObject(self):
"""Test InlineObject"""
# FIXME: construct object with mandatory attributes with example values
# model = InlineObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a451e5277a8c11e881eda2d1cf7b9055af7b5c4 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
# File: model_desc.py
from collections import namedtuple
import tensorflow as tf
from ..models.regularize import regularize_cost_from_collection
from ..tfutils.tower import get_current_tower_context
from ..tfutils.common import get_tf_version_tuple
from ..utils import logger
from ..utils.argtools import memoized_method
from ..utils.develop import log_deprecated
if get_tf_version_tuple() >= (1, 7):
from tensorflow.python.framework.tensor_spec import TensorSpec
__all__ = ['InputDesc', 'ModelDesc', 'ModelDescBase']
class InputDesc(
namedtuple('InputDescTuple', ['type', 'shape', 'name'])):
"""
Metadata about an input entry point to the graph.
This metadata can be later used to build placeholders or other types of
input source.
"""
def __new__(cls, type, shape, name):
"""
Args:
type (tf.DType):
shape (tuple):
name (str):
"""
shape = tuple(shape) # has to be tuple for "self" to be hashable
assert isinstance(type, tf.DType), type
if any(k in name for k in [':', '/', ' ']):
raise ValueError("Invalid InputDesc name: '{}'".format(name))
self = super(InputDesc, cls).__new__(cls, type, shape, name)
self._cached_placeholder = {}
return self
def _build_placeholder(self):
"""
Build a tf.placeholder from the metadata.
Returns:
tf.Tensor:
"""
with tf.name_scope(None): # clear any name scope it might get called in
ret = tf.placeholder(
self.type, shape=self.shape, name=self.name)
self._register_cached_placeholder(ret)
return ret
# cannot memoize here, because InputDesc is hashed by its fields.
def build_placeholder_reuse(self):
"""
Build a tf.placeholder from the metadata, or return an old one.
Returns:
tf.Tensor:
"""
g = tf.get_default_graph()
if g in self._cached_placeholder:
return self._cached_placeholder[g]
else:
return self._build_placeholder()
def _register_cached_placeholder(self, placeholder):
graph = placeholder.graph
assert graph not in self._cached_placeholder, \
"Placeholder for this InputDesc had been created before! This is a bug."
self._cached_placeholder[graph] = placeholder
@staticmethod
def _from_placeholder(placeholder):
name = placeholder.op.name
if name.endswith('_1') or name.endswith('_2'):
logger.error("Creating InputDesc from a placeholder named {}.".format(name))
logger.error("You might have mistakenly created this placeholder multiple times!")
ret = InputDesc(
placeholder.dtype,
tuple(placeholder.shape.as_list()),
name)
ret._register_cached_placeholder(placeholder)
return ret
@staticmethod
def _from_tensor_spec(spec):
assert spec.name is not None, "TensorSpec should have a name!"
return InputDesc(spec.dtype, tuple(spec.shape.as_list()), spec.name)
class ModelDescBase(object):
"""
Base class for a model description.
"""
@memoized_method
def get_inputs_desc(self):
"""
Returns:
A list of :class:`InputDesc`, which describes the inputs of this model.
The result is cached for each instance of :class:`ModelDescBase`.
"""
try:
ret = self._get_inputs()
log_deprecated(
"ModelDescBase._get_inputs() interface",
"Use inputs() instead!",
"2019-03-30")
return ret
except NotImplementedError:
with tf.Graph().as_default() as G: # create these placeholder in a temporary graph
inputs = self.inputs()
if isinstance(inputs[0], tf.Tensor):
for p in inputs:
assert p.graph == G, "Placeholders returned by inputs() should be created inside inputs()!"
return [InputDesc._from_placeholder(p) for p in inputs]
else:
for p in inputs:
assert isinstance(p, TensorSpec), type(p)
return [InputDesc._from_tensor_spec(p) for p in inputs]
@property
def input_names(self):
"""
Returns:
[str]: the names of all the inputs.
"""
return [k.name for k in self.get_inputs_desc()]
def _get_inputs(self):
raise NotImplementedError()
def inputs(self):
"""
Returns a list of :class:`tf.TensorSpec` or placeholders.
A subclass is expected to implement this method.
If returning placeholders,
the placeholders __have to__ be created inside this method.
Don't return placeholders created in other places.
Also, you should never call this method by yourself.
Returns:
list[tf.placeholder] or list[tf.TensorSpec], to be converted to :class:`InputDesc`.
"""
raise NotImplementedError()
def build_graph(self, *args):
"""
Build the whole symbolic graph.
This is supposed to be part of the "tower function" when used with :class:`TowerTrainer`.
A subclass is expected to implement this method.
Args:
args ([tf.Tensor]): tensors that matches the list of inputs defined by ``inputs()``.
Returns:
In general it returns nothing, but a subclass
may require it to return necessary information to build the trainer.
For example, `SingleCostTrainer` expect this method to return the cost tensor.
"""
assert len(args) == len(self.get_inputs_desc()), \
"Number of inputs passed to the graph != number of inputs defined " \
"in ModelDesc! ({} != {})".format(len(args), len(self.get_inputs_desc()))
log_deprecated(
"ModelDescBase._build_graph() interface",
"Use build_graph() instead!",
"2019-03-30")
return self._build_graph(args)
def _build_graph(self, inputs):
"""
This is an alternative interface which takes a list of tensors, instead of positional arguments.
By default :meth:`build_graph` will call this method.
"""
pass
class ModelDesc(ModelDescBase):
"""
A ModelDesc with **single cost** and **single optimizer**.
It has the following constraints in addition to :class:`ModelDescBase`:
1. :meth:`build_graph(...)` method should return a cost when called under a training context.
The cost will be the final cost to be optimized by the optimizer.
Therefore it should include necessary regularization.
2. Subclass is expected to implement :meth:`optimizer()` method.
"""
def get_cost(self):
"""
Being deprecated.
You're recommended to return a cost tensor in :meth:`build_graph` method directly.
This function takes the `self.cost` tensor defined by :meth:`build_graph`,
and applies the collection
``tf.GraphKeys.REGULARIZATION_LOSSES`` to the cost automatically.
"""
log_deprecated(
"get_cost() and self.cost",
"Return the cost tensor directly in build_graph() instead!",
"2019-03-30")
cost = self._get_cost()
reg_cost = regularize_cost_from_collection()
if reg_cost.op.type != 'Const':
logger.warn("Regularization losses found in collection, and a 'cost' tensor was "
"not returned by `build_graph`. Therefore applying regularization automatically!")
return tf.add(cost, reg_cost, name='cost_with_regularizer')
else:
return cost
def _get_cost(self, *args):
return self.cost
@memoized_method
def get_optimizer(self):
"""
Return the memoized optimizer returned by `optimizer()`.
Users of :class:`ModelDesc` will need to implement `optimizer()`,
which will only be called once per each model.
Returns:
a :class:`tf.train.Optimizer` instance.
"""
try:
ret = self._get_optimizer()
log_deprecated(
"ModelDescBase._get_optimizer() interface",
"Use optimizer() instead!",
"2019-03-30")
return ret
except NotImplementedError:
pass
return self.optimizer()
def _get_optimizer(self):
raise NotImplementedError()
def optimizer(self):
"""
Returns a `tf.train.Optimizer` instance.
A subclass is expected to implement this method.
"""
raise NotImplementedError()
def _build_graph_get_cost(self, *inputs):
"""
Equivalent to `build_graph`.
Used internally by trainers to get the final cost for optimization in a backward-compatible way.
"""
ret = self.build_graph(*inputs)
if not get_current_tower_context().is_training:
return None # this is the tower function, could be called for inference
if ret is not None:
return ret
else: # the old way, for compatibility
return self.get_cost()
|
py | 1a451e8f82582892b151931dc528ef3561cf594c | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import warnings
try:
from osgeo import gdal, ogr
except ImportError:
warnings.warn('Complex validation requires GDAL/OGR support.')
try:
import netCDF4
except ImportError:
warnings.warn('Complex validation requires netCDF4 support.')
|
py | 1a451eb1dcd4f27ed2f24b36879ca7c62c8a02b3 | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
import subprocess
from distutils.cmd import Command
from setuptools import find_packages
try:
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
except ImportError:
from distutils.core import setup
from distutils.command.install import install
from distutils.command.build_py import build_py
NAME = "feast"
DESCRIPTION = "Python SDK for Feast"
URL = "https://github.com/feast-dev/feast"
AUTHOR = "Feast"
REQUIRES_PYTHON = ">=3.7.0"
REQUIRED = [
"Click==7.*",
"colorama>=0.3.9",
"fastavro>=1.1.0",
"google-api-core>=1.23.0",
"googleapis-common-protos==1.52.*",
"grpcio>=1.34.0",
"Jinja2>=2.0.0",
"jsonschema",
"mmh3",
"pandas>=1.0.0",
"pandavro==1.5.*",
"protobuf>=3.10",
"pyarrow>=2.0.0",
"pydantic>=1.0.0",
"PyYAML==5.3.*",
"tabulate==0.8.*",
"tenacity>=7.*",
"toml==0.10.*",
"tqdm==4.*",
]
GCP_REQUIRED = [
"google-cloud-bigquery>=2.0.*",
"google-cloud-bigquery-storage >= 2.0.0",
"google-cloud-datastore>=2.1.*",
"google-cloud-storage>=1.20.*",
"google-cloud-core==1.4.*",
]
REDIS_REQUIRED = [
"redis-py-cluster==2.1.2",
]
AWS_REQUIRED = [
"boto3==1.17.*",
]
CI_REQUIRED = [
"cryptography==3.3.2",
"flake8",
"black==19.10b0",
"isort>=5",
"grpcio-tools==1.34.0",
"grpcio-testing==1.34.0",
"mock==2.0.0",
"moto",
"mypy==0.790",
"mypy-protobuf==1.24",
"avro==1.10.0",
"gcsfs",
"urllib3>=1.25.4",
"pytest==6.0.0",
"pytest-cov",
"pytest-xdist",
"pytest-lazy-fixture==0.6.3",
"pytest-timeout==1.4.2",
"pytest-ordering==0.6.*",
"pytest-mock==1.10.4",
"Sphinx!=4.0.0",
"sphinx-rtd-theme",
"adlfs==0.5.9",
"firebase-admin==4.5.2",
"pre-commit",
"assertpy==1.1",
"google-cloud-bigquery>=2.0.*",
"google-cloud-bigquery-storage >= 2.0.0",
"google-cloud-datastore>=2.1.*",
"google-cloud-storage>=1.20.*",
"google-cloud-core==1.4.*",
"redis-py-cluster==2.1.2",
"boto3==1.17.*",
]
# README file from Feast repo root directory
repo_root = (
subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
.communicate()[0]
.rstrip()
.decode("utf-8")
)
README_FILE = os.path.join(repo_root, "README.md")
#with open(README_FILE, "r") as f:
LONG_DESCRIPTION = "Feast Feast Feast"
# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
# Regex modified from default tag regex in:
# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
TAG_REGEX = re.compile(
r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
)
class BuildProtoCommand(Command):
description = "Builds the proto files into python files."
def initialize_options(self):
self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
self.proto_folder = os.path.join(repo_root, "protos")
self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
self.sub_folders = ["core", "serving", "types", "storage"]
def finalize_options(self):
pass
def _generate_protos(self, path):
proto_files = glob.glob(os.path.join(self.proto_folder, path))
subprocess.check_call(self.protoc + [
'-I', self.proto_folder,
'--python_out', self.this_package,
'--grpc_python_out', self.this_package,
'--mypy_out', self.this_package] + proto_files)
def run(self):
for sub_folder in self.sub_folders:
self._generate_protos(f'feast/{sub_folder}/*.proto')
from pathlib import Path
for path in Path('feast/protos').rglob('*.py'):
for folder in self.sub_folders:
# Read in the file
with open(path, 'r') as file:
filedata = file.read()
# Replace the target string
filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
# Write the file out again
with open(path, 'w') as file:
file.write(filedata)
class BuildCommand(build_py):
"""Custom build command."""
def run(self):
self.run_command('build_proto')
build_py.run(self)
class DevelopCommand(develop):
"""Custom develop command."""
def run(self):
self.run_command('build_proto')
develop.run(self)
setup(
name=NAME,
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=("tests",)),
install_requires=REQUIRED,
# https://stackoverflow.com/questions/28509965/setuptools-development-requirements
# Install dev requirements with: pip install -e .[dev]
extras_require={
"dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"],
"ci": CI_REQUIRED,
"gcp": GCP_REQUIRED,
"aws": AWS_REQUIRED,
"redis": REDIS_REQUIRED,
},
include_package_data=True,
license="Apache",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
entry_points={"console_scripts": ["feast=feast.cli:cli"]},
use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.34.0", "mypy-protobuf", "sphinx!=4.0.0"],
package_data={
"": [
"protos/feast/**/*.proto",
"protos/feast/third_party/grpc/health/v1/*.proto",
"protos/tensorflow_metadata/proto/v0/*.proto",
"feast/protos/feast/**/*.py",
"tensorflow_metadata/proto/v0/*.py"
],
},
cmdclass={
"build_proto": BuildProtoCommand,
"build_py": BuildCommand,
"develop": DevelopCommand,
},
)
|
py | 1a451f3b04fdb707d793cf791e788c3025dea61b | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 14:42:14 2017
This downloads and unzips the wage data by MSA and States from the BLS website
@author: carrie
"""
from bs4 import BeautifulSoup
import requests, urllib.request, shutil, zipfile
import datetime, os, time
#import re, webbrowser
#import schedule
#import datetime
#import time
#
## Obtain current time
#start = datetime.datetime.now()
#
## Simple callable for example
#class DummyClock:
# def __call__(self):
# print datetime.datetime.now()
#
#schedule.every(1).seconds.do(DummyClock())
#
#while True:
# schedule.run_pending()
# # 5 minutes == 300 seconds
# if (datetime.datetime.now() - start).seconds >= 300:
# break
# # And here we halt execution for a second
# time.sleep(1)
class BLSWages:
'''Download the zipped folders from BLS with wage data from Metro Areas and the State'''
#BLS Data Source
BLS_url = 'https://www.bls.gov/oes/tables.htm'
BLS_main_link = 'https://www.bls.gov/'
page = requests.get(BLS_url)
titleShouldBe = "Tables Created by BLS"
#Todays Date
now = datetime.datetime.now()
formatTime = now.strftime("%Y-%m-%d %H:%M")
print("Running BLS Wage Web scraper: {0}".format(formatTime))
#First test is if the page will load
def PageStatus(self):
status = self.page.status_code
soup = ""
if status == 200:
soup = BeautifulSoup(self.page.text, 'html.parser')
self.CheckElementonWebsite(soup, self.titleShouldBe)
print("Downloading...")
self.DownloadStateData(soup)
time.sleep(2)
self.DownloadMetroData(soup)
else:
print("Page will not load")
log = open("Error_Data.txt","a")
log.write("Error on Page Load: Page status is " + " " + str(status) + "\t" + "Date: " + self.formatTime + "\n")
#Check if the page title has changed if so the rest of the page and downloads may have changed so log the issue
def CheckElementonWebsite(self, soup, titletoCheckAgainst ):
title = soup.title.string
if title == titletoCheckAgainst:
print("Title of web page check passed: {0}".format(soup.title.string))
else:
print("Title on BLSWages website changed")
log = open("Error_Data.txt","a")
log.write("Title on Website has changed from '" + str(titletoCheckAgainst) + "' to '" + str(title) + "' \t" + "Date: " + self.formatTime + "\n")
def GetFileNamesfromDirectory(self):
dirpath = os.getcwd()
print(dirpath+"\log")
for file in os.listdir(dirpath+"\log"):
print(file)
if file.endswith(".zip"):
print(os.path.join(dirpath+"\log", file))
return file
#Download BLS Data unzip it and delete the zip container
def DownloadMetroData(self, soup):
body = soup.find("div", {"id": "bodytext"})
links = body.find_all('a', href=True)[6]
href = links['href']
url = self.BLS_main_link+href
print(url)
dir_path = os.path.dirname(os.path.realpath(__file__))
bLS_WageMetro = os.path.join(os.path.sep, dir_path, 'log', 'BLS_WageMetro.zip')
folder = os.path.join(os.path.sep, dir_path, 'log')
with urllib.request.urlopen(url) as response, open(bLS_WageMetro, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
#Extract files from zip
with zipfile.ZipFile(bLS_WageMetro) as zf:
zf.extractall(folder)
#Remove the zip file and remove unnecissary files
os.remove(bLS_WageMetro)
#webbrowser.open(url)
#if href == "/oes/special.requests/oesm16ma.zip":
# print("Data for May 2016 allready downloaded" + href)
#Download BLS Data unzip it and delete the zip container
def DownloadStateData(self, soup):
body = soup.find("div", {"id": "bodytext"})
links = body.find_all('a', href=True)[4]
href = links['href']
url = self.BLS_main_link+href
print(url)
dir_path = os.path.dirname(os.path.realpath(__file__))
bLS_WageState = os.path.join(os.path.sep, dir_path, 'log', 'BLS_WageState.zip')
folder = os.path.join(os.path.sep, dir_path, 'log')
with urllib.request.urlopen(url) as response, open(bLS_WageState, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
#Extract files from zip
time.sleep(8)
z = zipfile.ZipFile(bLS_WageState)
z.extractall(folder)
z.close()
del z
os.unlink(bLS_WageState)
##MAIN
#wages = BLSWages()
#wages.PageStatus()
|
py | 1a451fd4f3115de7be6e99dd11645b70d60d703f | """Command line tools to interact with the Insteon devices."""
from .. import devices
from ..constants import RAMP_RATES, ALDBStatus, DeviceCategory
from ..managers.scene_manager import async_add_device_to_scene
from ..utils import seconds_to_ramp_rate
from .advanced import AdvancedTools
from .tools_base import ToolsBase
class ToolsAldb(ToolsBase):
"""Command class to test interactivity."""
async def do_load_aldb(self, *args, **kwargs):
"""Load the All-Link Database of a device.
Usage:
load_aldb <ADDRESS>|all y|n Load one or all devices (can be the modem address)
To clear the current ALDB and reload from the device, enter `y` as the second argment.
Otherwise enter `n`.
"""
args = args[0].split()
try:
address = args[0]
except IndexError:
address = None
try:
refresh_yn = args[1]
refresh = refresh_yn.lower() == "y"
except IndexError:
refresh_yn = ""
addresses = await self._get_addresses(
address=address, allow_cancel=True, allow_all=True, match_device=True
)
if not addresses:
return
if devices[addresses[0]] != devices.modem or len(addresses) > 1:
if not refresh_yn:
refresh_yn = await self._get_char(
"Clear existing records and reload (y/n)",
default="n",
values=["y", "n"],
)
refresh = refresh_yn.lower() == "y"
battery_devices = []
for address in addresses:
if devices[address].is_battery:
battery_devices.append(address)
# Only load the modem ALDB if explicitly asked
if devices[address] == devices.modem and len(addresses) == 1:
await devices.modem.aldb.async_load()
elif devices[address].cat == 0x03:
pass
else:
# tasks.append(devices[address].aldb.async_load(refresh=refresh))
await devices[address].aldb.async_load(refresh=refresh)
if battery_devices:
self._log_stdout("The following devices are battery operated.")
self._log_stdout("They will load in the background when they wake up.")
for address in battery_devices:
self._log_stdout(f" - {address}")
# if the device did not load the first time, try one more time with refresh
for address in addresses:
if (
devices[address] != devices.modem
and devices[address].aldb.status != ALDBStatus.LOADED
and not devices[address].is_battery
):
await devices[address].aldb.async_load(refresh=refresh)
async def do_print_aldb(self, *args, **kwargs):
"""Print the records in an All-Link Database.
Usage:
print_aldb <ADDRESS>|all
"""
await self._print_aldb(*args)
async def do_add_default_links(self, *args, **kwargs):
"""Add default links between a device and the modem.
Usage:
add_default_links <ADDRESS>
"""
args = args[0].split()
try:
address = args[0]
except IndexError:
address = None
addresses = await self._get_addresses(
address=address, allow_all=False, allow_cancel=True, match_device=True
)
if not addresses:
return
device = devices[addresses[0]]
self._log_command(f"add_default_links {addresses[0]}")
await device.async_add_default_links()
async def do_add_device_to_scene(self, *args, **kwargs):
"""Add a device to a scene.
Usage:
add_device_to_scene <ADDRESS> <SCENE NUMBER> [<ON LEVEL>] [<RAMP RATE>] [<BUTTON>] | [Data1] [Data2] [Data3]
<ADDRESS>: The device address such as 1a.2b.3c
<SCENE NUMBER>: Value from 25 to 255.
For Device type 1:
<ON LEVEL>: (Optional) Value from 0 (off) - 255 (full on).
For dimmable devices any number from 0 to 255 is allowable.
Default is 255.
<RAMP RATE>: 0.1 seconds to 480 seconds (8 minutes)
Default is 0.5 seconds
<BUTTON>: (Optional) The button or group number of the device to change as part of the scene.
Valid values are device dependant.
Default is 1.
for Device type 2:
<Data1>: (Optional) Value from 0 (off) - 255 (full on).
For on/off devices only 0 and 255 are allowed.
Default is 255.
<Data2>: Data field 2. Default is 0. Typically, this is not used by device type 2.
<BUTTON>: (Optional) The button or group number of the device to change as part of the scene.
Valid values are device dependant.
Default is 1.
For all other device types:
<Data1>: Data field 1. Any value from 0 - 255 are allowed. Default is 255.
<Data2>: Data field 2 Any value from 0 - 255 are allowed. Default is 0.
<Data3>: Data field 3 Any value from 0 - 255 are allowed. Default is 1.
KeyPadLinc devices will not respond correctly to scenes in this way other than the main power.
"""
args = args[0].split()
try:
address = args[0]
except IndexError:
address = None
try:
scene = int(args[1])
except (IndexError, ValueError):
scene = None
try:
data1 = int(args[2])
except (IndexError, ValueError):
data1 = None
try:
data2 = int(args[3])
except (IndexError, ValueError):
data2 = None
try:
data3 = int(args[4])
except (IndexError, ValueError):
data3 = None
addresses = await self._get_addresses(
address=address, allow_all=False, allow_cancel=True, match_device=True
)
if not addresses:
return
device = devices[addresses[0]]
if not scene:
scene = await self._get_int(
"Scene number or blank to cancel",
values=range(25, 256),
)
if not scene:
return
if data1 is None:
if device.cat == DeviceCategory.DIMMABLE_LIGHTING_CONTROL:
data1 = await self._get_int(
"On level", default=255, values=range(0, 256)
)
elif device.cat == DeviceCategory.SWITCHED_LIGHTING_CONTROL:
data1 = await self._get_int("On level", default=255, values=[0, 255])
else:
data1 = await self._get_int("Data1", default=255, values=range(0, 255))
if device.cat == DeviceCategory.DIMMABLE_LIGHTING_CONTROL:
if data2 is None:
try:
data2_seconds = float(args[3])
except (IndexError, ValueError):
data2_seconds = None
if data2_seconds is None:
data2_seconds = await self._get_float(
"Ramp rate",
default=0.5,
maximum=480,
minimum=0.1,
)
else:
data2_seconds = data2
data2 = seconds_to_ramp_rate(data2_seconds)
if RAMP_RATES[data2] != data2_seconds:
self._log_stdout(
f"Ramp rate rounded to {RAMP_RATES[data2]} to conform to standard values."
)
elif data2 is None:
data2 = await self._get_int("Data2", default=0, values=range(0, 255))
if data3 is None:
if device.cat in [
DeviceCategory.DIMMABLE_LIGHTING_CONTROL,
DeviceCategory.SWITCHED_LIGHTING_CONTROL,
]:
data3 = await self._get_int("Button", default=1, values=range(0, 255))
else:
data3 = await self._get_int("Data3", default=0, values=range(0, 255))
await async_add_device_to_scene(device, scene, data1, data2, data3)
def do_print_aldb_load_status(self, *args, **kwargs):
"""Print the All-Link databbase load status for all devices."""
self._log_stdout("")
self._log_stdout("Device Status")
self._log_stdout("-------- ---------------")
for address in devices:
self._log_stdout(f"{address} {str(devices[address].aldb.status)}")
async def do_advanced(self, *args, **kwargs):
"""Enter advanced ALDB menu."""
self._log_command("advanced")
await self._call_next_menu(AdvancedTools, "advanced")
|
py | 1a45203c3628a9210a485d38f4731507420eb6cf | # Copyright 2020-present Kensho Technologies, LLC.
from .registry import REGISTRY # noqa
|
py | 1a45209ac34bd8faa6eb53adfb44f85e261cb243 | from gym.spaces import Discrete, Box
from gym_electric_motor.physical_systems.electric_motors import DcShuntMotor, DcExternallyExcitedMotor, \
DcPermanentlyExcitedMotor, DcSeriesMotor
from gym_electric_motor.physical_systems import SynchronousMotorSystem
import math
import numpy as np
class Controller:
@classmethod
def make(cls, controller_type, environment, **controller_kwargs):
assert controller_type in _controllers.keys(), f'Controller {controller_type} unknown'
controller = _controllers[controller_type](environment, **controller_kwargs)
return controller
def control(self, state, reference):
raise NotImplementedError
def reset(self):
pass
class OnOffController(Controller):
def __init__(self, environment, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Discrete, 'Not suitable action space for On off controller'
self._high_action = 1
if action_space.n in [3, 4]:
self._low_action = 2
else:
self._low_action = 0
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
self._ref_idx = reference_idx
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx]:
return self._high_action
else:
return self._low_action
class ThreePointController(Controller):
def __init__(self, environment, hysteresis=0.01, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Discrete, 'Not suitable action space for three point controller'
self._hysteresis = hysteresis
self._high_action = 1
self._idle_action = 0
self._ref_idx = reference_idx
if action_space.n in [3, 4]:
self._low_action = 2
else:
self._low_action = 0
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx] - self._hysteresis:
return self._high_action
elif state[self._referenced_state] > reference[self._ref_idx] + self._hysteresis:
return self._low_action
else:
return self._idle_action
class PController(Controller):
def __init__(self, environment, k_p=10, controller_no=0, state_idx=None, reference_idx=0):
action_space = environment.action_space
assert type(action_space) is Box, 'No suitable action space for P Controller'
self._k_p = k_p
self._controller_no = controller_no
self._action_min = action_space.low[controller_no]
self._action_max = action_space.high[controller_no]
self._ref_idx = reference_idx
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
def control(self, state, reference):
return np.array([
max(
self._action_min,
min(
self._action_max,
self._k_p * (reference[self._ref_idx] - state[self._referenced_state])
)
)
])
class PIController(PController):
def __init__(self, environment, k_p=10, k_i=0.01, controller_no=0, reference_idx=0):
super().__init__(environment, k_p, controller_no, reference_idx)
self._k_i = k_i
self._tau = environment.physical_system.tau
self._integrated_value = 0
def control(self, state, reference):
diff = reference[self._ref_idx] - state[self._referenced_state]
self._integrated_value += diff * self._tau
return np.array([
max(
self._action_min,
min(
self._action_max,
self._k_p * (reference[0] - state[self._referenced_state])
+ self._k_i / self._tau * self._integrated_value
)
)
])
def reset(self, **__):
self._integrated_value = 0
class PmsmOnOffController(Controller):
def __init__(self, environment, state_idx=None, ref_idx=0):
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
t23 = environment.physical_system.electrical_motor.t_23
q_inv = environment.physical_system.electrical_motor.q_inv
self._forward_transformation = lambda quantities, eps: q_inv(t23(quantities), eps)[::-1]
self._backward_transformation = (
lambda quantities, eps: t32(q(quantities[::-1], eps))
)
self._l_q = environment.physical_system.electrical_motor.motor_parameter['l_q']
self._epsilon_idx = environment.physical_system.EPSILON_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._ref_idx = ref_idx
self._omega_idx = environment.physical_system.state_positions['omega']
self._u_sup = environment.physical_system.supply.u_nominal
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
self._limits = environment.physical_system.electrical_motor.limits
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx]:
u_q = 1
else:
u_q = -1
epsilon = np.pi * state[self._epsilon_idx]
u_d = 0
u_a, u_b, u_c = self._backward_transformation((u_q, u_d), epsilon)
return 4 * (u_a > 0) + 2 * (u_b > 0) + (u_c > 0)
class SynRmOnOffController(PmsmOnOffController):
def control(self, state, reference):
if state[self._referenced_state] < reference[self._ref_idx]:
u_q = 1
u_d = 1
else:
u_q = -1
u_d = -1
epsilon = state[self._epsilon_idx]
u_a, u_b, u_c = self._backward_transformation((u_q, u_d), epsilon)
return 4 * u_a > 0 + 2 * u_b > 0 + u_c > 0
class CascadedPIController(Controller):
def __init__(self, environment, ref_idx=0):
self._omega_idx = environment.physical_system.OMEGA_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._voltages_idx = environment.physical_system.VOLTAGES_IDX
self._u_a_idx = self._voltages_idx[0]
self._i_a_idx = self._currents_idx[0]
if len(self._currents_idx) > 1:
self._i_e_idx = environment.physical_system.state_positions['i_e']
else:
self._i_e_idx = environment.physical_system.state_positions['i']
if len(self._voltages_idx) > 1:
self._u_e_idx = environment.physical_system.state_positions['u_e']
else:
self._u_e_idx = None
self._limits = environment.physical_system.limits
self._ref_idx = ref_idx
self._tau = environment.physical_system.tau
mp = environment.physical_system.electrical_motor.motor_parameter
t_motor = mp['l_a'] / mp['r_a']
t_t = 3 / 2 * self._tau
r_motor = mp['r_a']
self._i_a_max = 0
self._i_a_min = 0
self._u_a_max = 0
self._u_a_min = 0
self._integrated_values = [0, 0]
self._converter_voltages = environment.physical_system.converter.voltages
self._i_a_max = self._limits[self._i_a_idx] * environment.physical_system.state_space.high[self._i_a_idx]
self._i_a_min = self._limits[self._i_a_idx] * environment.physical_system.state_space.low[self._i_a_idx]
if 'psi_e' in mp.keys():
self._psi_e = mp['psi_e']
self._i_e_max_prime = None
elif 'l_e_prime' in mp.keys():
self._psi_e = None
self._i_e_max_prime = self._limits[self._currents_idx[-1]] * mp['l_e_prime']
else:
raise Exception('Motor Parameter Error. No psi_e and no l_e_prime entry found in motor parameters')
self._u_a_max = self._limits[self._u_a_idx] * environment.physical_system.state_space.high[self._u_a_idx]
self._u_a_min = self._limits[self._u_a_idx] * environment.physical_system.state_space.low[self._u_a_idx]
# compute motor type specific parameter
# use inner_ and outer_gain_adjustment to adjust the integral part gains for better control behaviour
# Gains chosen as given in "Elektrische Antriebe - Regelung von Antriebssystemen", D. Schröder, 2009
if type(environment.physical_system.electrical_motor) == DcPermanentlyExcitedMotor:
inner_gain_adjustment = 1e-3
outer_gain_adjustment = 1e-3
elif type(environment.physical_system.electrical_motor) == DcSeriesMotor:
t_motor = (mp['l_a'] + mp['l_e']) / (mp['r_a'] + mp['r_e'])
r_motor = (mp['r_a'] + mp['r_e'])
inner_gain_adjustment = 1
outer_gain_adjustment = 1
elif type(environment.physical_system.electrical_motor) == DcExternallyExcitedMotor:
inner_gain_adjustment = 1E-4
outer_gain_adjustment = 1E-3
elif type(environment.physical_system.electrical_motor) == DcShuntMotor:
inner_gain_adjustment = 1E-2
outer_gain_adjustment = 1
else:
raise Exception('Unknown Motor')
# set up gains for the controller
# Integral gains are multiplied by the sampling time to simplify the computation during control
t_sigma = min(t_motor, t_t)
t_1 = max(t_motor, t_t)
v_s = 1 / r_motor
# Integral Inner loop
self._k_i_i = 1 / (2 * t_sigma * v_s) * self._tau * inner_gain_adjustment
# Proportional Inner loop
self._k_p_i = t_1 / (2 * t_sigma * v_s)
# Integral Outer loop
j = environment.physical_system.mechanical_load.j_total
self._k_i_o = (
j / (32 * t_sigma ** 2)
* self._tau * outer_gain_adjustment
)
# Proportional Outer loop
self._k_p_o = j / (4 * t_sigma)
def control(self, state, reference):
# denormalize quantities
omega = state[self._omega_idx] * self._limits[self._omega_idx]
omega_ref = reference[self._ref_idx] * self._limits[self._omega_idx]
i_a = state[self._i_a_idx] * self._limits[self._i_a_idx]
psi_e = self._psi_e or state[self._i_e_idx] * self._i_e_max_prime
# outer control loop
d_omega = omega_ref - omega
if psi_e != 0:
temp = self._integrated_values[0] + d_omega * self._k_i_o / psi_e # integral part
i_a_des = temp + d_omega * self._k_p_o / psi_e
else:
i_a_des = math.copysign(1, d_omega) * self._i_a_max
temp = self._integrated_values[0]
# hold current constraints, anti wind-up
if i_a_des > self._i_a_max or i_a_des < self._i_a_min:
i_a_des = min(max(i_a_des, self._i_a_min), self._i_a_max)
else:
self._integrated_values[0] = temp
d_i_a = i_a_des - i_a
# inner control loop
temp = self._integrated_values[1] + d_i_a * self._k_i_i # integral part
d_u_a = temp + d_i_a * self._k_p_i
u_a_0 = omega * psi_e
u_a = d_u_a + u_a_0
# hold voltage limits, anti wind-up
if u_a > self._u_a_max or u_a < self._u_a_min:
u_a = min(max(u_a, self._u_a_min), self._u_a_max)
else:
self._integrated_values[1] = temp
# normalize the desired output voltage to a duty cycle referring to the supply voltage
# Assumption: u_sup = u_N is made
des_duty_cycle = u_a / self._limits[self._u_a_idx]
duty_cycle = min(
max(des_duty_cycle, self._u_a_min / self._limits[self._u_a_idx]),
self._u_a_max / self._limits[self._u_a_idx])
return np.array([duty_cycle])
class FOCController(Controller):
def __init__(self, environment, ref_idx=0, weight=1):
assert type(environment.physical_system) is SynchronousMotorSystem
self._ref_idx = ref_idx
self._weight = weight
self._omega_idx = environment.physical_system.OMEGA_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._voltages_idx = environment.physical_system.VOLTAGES_IDX
self._epsilon_idx = environment.physical_system.EPSILON_IDX
self._limits = environment.physical_system.limits
self._tau = environment.physical_system.tau
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
t23 = environment.physical_system.electrical_motor.t_23
q_inv = environment.physical_system.electrical_motor.q_inv
self._forward_transformation = lambda quantities, eps: q_inv(t23(quantities), eps)[::-1]
self._backward_transformation = (
lambda quantities, eps: t32(q(quantities[::-1], eps))
)
self._motor_parameter = environment.physical_system.electrical_motor.motor_parameter
mp = self._motor_parameter
# current controller i_d
t_motor_d = mp['l_d'] / mp['r_s']
tau = environment.physical_system.tau
t_t = 3 / 2 * tau
t_1_d = max(t_motor_d, t_t)
t_sigma_d = min(t_motor_d, t_t)
v_s_d = 1 / mp['r_s']
# current controller i_q
t_motor_q = mp['l_q'] / mp['r_s']
t_1_q = max(t_motor_q, t_t)
t_sigma_q = min(t_motor_q, t_t)
v_s_q = 1 / mp['r_s']
# outer speed controller
t_2 = 2 * t_sigma_q
t_1_s = environment.physical_system.mechanical_load.j_total
v_s_s = 3 / 2 * mp['p'] * mp['psi_p']
self._k_i_t = 2 * t_1_s / v_s_s * tau # integral gain speed controller.
self._k_p_t = t_1_s / (2 * t_2 * v_s_s) # prop. gain speed controller
self._k_i_d = 1 / (2 * t_sigma_d * v_s_d) * tau # integral gain i_sd controller.
self._k_p_d = t_1_d / (2 * t_sigma_d * v_s_d) # prop. gain i_sd controller
self._k_i_q = 1 / (2 * t_sigma_q * v_s_q) * tau # integral gain i_sq controller.
self._k_p_q = t_1_q / (2 * t_sigma_q * v_s_q) # prop. gain i_sq controller
# specify max values for normalisation and anti wind up
# an anti wind up scheme is necessary for good control behaviour to limit the integral parts in case of
# limit violations of the desired input voltage
# maximum speed without flux weakening
self._omega_1 = (
self._limits[self._voltages_idx][0] / mp['l_q'] / np.sqrt(self._limits[self._currents_idx][0]) ** 2
+ mp['psi_p'] ** 2 / mp['l_q'] ** 2
)
self._integrated_values = [0, 0, 0]
def reset(self):
self._integrated_values = [0, 0, 0]
def control(self, state, reference):
"""
Field oriented control from the lecture "controlled three phase drives, chapter 5"
"""
# extract quantities from state
mp = self._motor_parameter
omega = state[self._omega_idx] * self._limits[self._omega_idx]
omega_ref = reference[self._ref_idx] * self._limits[self._omega_idx]
u = state[self._voltages_idx] * self._limits[self._voltages_idx]
epsilon = state[self._epsilon_idx] * self._limits[self._epsilon_idx]
i = state[self._currents_idx] * self._limits[self._currents_idx]
# transformation from a/b/c to alpha/beta and d/q
i_qd = self._forward_transformation(i, epsilon)
# compute u_d_0 and u_q_0
u_d_0 = omega * mp['l_q'] * i_qd[0]
u_q_0 = omega * (mp['psi_p'] + mp['l_d'] * i_qd[1])
d_omega = omega_ref - omega
# compute T* (Torque reference) and i*_sq (q-axis current reference)
temp = self._integrated_values[0] + d_omega * self._k_i_t # integral part
t_des = temp + d_omega * self._k_p_t # proportional part
i_sq_des = 2 * t_des / (3 * mp['p'] * mp['psi_p'])
# anti wind-up
if i_sq_des > self._limits[self._currents_idx[0]] * self._weight\
or i_sq_des < -self._limits[self._currents_idx[0]] * self._weight:
i_sq_des = min(
max(i_sq_des, -self._limits[self._currents_idx[0]] * self._weight),
self._limits[self._currents_idx[0]] * self._weight
)
else:
self._integrated_values[0] = temp
if abs(omega_ref) < self._omega_1:
i_sd_des = 0
else:
i_sd_des = (
(self._limits[self._voltages_idx[0]] / omega_ref) ** 2
- (mp['l_q'] * self._limits[self._currents_idx[0]]) ** 2 - mp['psi_p'] ** 2
/ (2 * mp['psi_p'] * mp['l_d']))
# transform back to abc-domain
currents = self._backward_transformation((i_sq_des, i_sd_des), epsilon)
# test if current limits are violated
if np.max(np.abs(currents)) > self._limits[self._currents_idx[0]]:
clipping = self._limits[self._currents_idx]
currents = np.clip(currents, -clipping, clipping)
array = self._forward_transformation(currents, epsilon)
i_sd_des = array[1]
i_sq_des = array[0]
# compute du*_sq, du*_sd
d_i_sd = i_sd_des - i_qd[1]
d_i_sq = i_sq_des - i_qd[0]
temp_u_sd = self._integrated_values[1] + d_i_sd * self._k_i_d # integral part
temp_u_sq = self._integrated_values[2] + d_i_sq * self._k_i_q # integral part
d_u_sd_des = temp_u_sd + d_i_sd * self._k_p_d
d_u_sq_des = temp_u_sq + d_i_sq * self._k_p_q
# anti-wind-up u_sd
if d_u_sd_des > self._limits[self._voltages_idx[0]] * self._weight - u_d_0 or \
d_u_sd_des < -self._limits[self._voltages_idx[0]] * self._weight - u_d_0:
d_u_sd_des = np.clip(d_u_sd_des, -self._limits[self._voltages_idx[0]] * self._weight - u_d_0,
self._limits[self._voltages_idx[0]] * self._weight - u_d_0)
else:
self._integrated_values[1] = temp_u_sd
# anti-wind-up u_sq
if d_u_sq_des > self._limits[self._voltages_idx[0]] * self._weight - u_q_0 or \
d_u_sq_des < -self._limits[self._voltages_idx[0]] * self._weight - u_q_0:
d_u_sq_des = np.clip(d_u_sq_des, -self._limits[self._voltages_idx[0]] * self._weight - u_q_0,
self._limits[self._voltages_idx[0]] * self._weight - u_q_0)
else:
self._integrated_values[2] = temp_u_sq
# compute u*_sq, u*_sd, epsilon + d_epsilon due to delay of the controller
u_sd_des = u_d_0 + d_u_sd_des
u_sq_des = d_u_sq_des + u_q_0
epsilon_shift = epsilon + 3 / 2 * self._tau * omega
# from d/q to alpha/beta and a/b/c
u_qd_des = np.array([u_sq_des, u_sd_des])
voltages = self._backward_transformation(u_qd_des, epsilon_shift)
# normalise inputs
result = np.clip(voltages / self._limits[self._voltages_idx[0]], -1, 1)
return result
class PmsmPController(Controller):
def __init__(self, environment, state_idx=None, ref_idx=0, k_p=1):
self._k_p = k_p
t32 = environment.physical_system.electrical_motor.t_32
q = environment.physical_system.electrical_motor.q
t23 = environment.physical_system.electrical_motor.t_23
q_inv = environment.physical_system.electrical_motor.q_inv
self._forward_transformation = lambda quantities, eps: q_inv(t23(quantities), eps)[::-1]
self._backward_transformation = (
lambda quantities, eps: t32(q(quantities[::-1], eps))
)
self._epsilon_idx = environment.physical_system.EPSILON_IDX
self._currents_idx = environment.physical_system.CURRENTS_IDX
self._ref_idx = ref_idx
self._referenced_state = state_idx or np.argmax(environment.reference_generator.referenced_states)
self._phase = 0
def control(self, state, reference):
u_q = min(1, max(-1, self._k_p * reference[self._ref_idx] - state[self._referenced_state]))
epsilon = np.pi * state[self._epsilon_idx]
u_d = 0
u_a, u_b, u_c = self._backward_transformation((u_q, u_d), epsilon)
return [u_a, u_b, u_c]
def reset(self):
self._phase = 0
class ThreePhaseSteadyState(Controller):
def __init__(self, environment, omega_el=15):
self._omega_el = omega_el
self._tau = environment.physical_system.tau
self._k = 0
t = np.linspace(0, 2 * np.pi / abs(omega_el), 1 / abs(omega_el * self._tau))
self._u_a = np.sin(omega_el * t)
self._u_b = np.sin(omega_el * t - 2/3 * np.pi)
self._u_c = np.sin(omega_el * t + 2/3 * np.pi)
def reset(self):
self._k = -1
def control(self, state, reference):
self._k += 1
length = len(self._u_a)
return self._u_a[self._k % length], self._u_b[self._k % length], self._u_c[self._k % length],
_controllers = {
'on_off': OnOffController,
'three_point': ThreePointController,
'p_controller': PController,
'pi_controller': PIController,
'pmsm_on_off': PmsmOnOffController,
'synrm_on_off': SynRmOnOffController,
'cascaded_pi': CascadedPIController,
'foc_controller': FOCController,
'pmsm_p_controller': PmsmPController,
'three_phase_steadystate': ThreePhaseSteadyState
}
|
py | 1a4520cf2f47f945263e27c6e2a2639ed2be8ae2 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_overlay_global
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Configures anycast gateway MAC of the switch.
description:
- Configures anycast gateway MAC of the switch.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default restores params default value
- Supported MAC address format are "E.E.E", "EE-EE-EE-EE-EE-EE",
"EE:EE:EE:EE:EE:EE" and "EEEE.EEEE.EEEE"
options:
anycast_gateway_mac:
description:
- Anycast gateway mac of the switch.
required: true
'''
EXAMPLES = '''
- nxos_overlay_global:
anycast_gateway_mac: "b.b.b"
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["fabric forwarding anycast-gateway-mac 000B.000B.000B"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'anycast_gateway_mac': 'fabric forwarding anycast-gateway-mac',
}
def get_existing(module, args):
existing = {}
config = str(get_config(module))
for arg in args:
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.findall(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M)
value = ''
if has_command:
value = has_command[0]
existing[arg] = value
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if value:
new_dict[new_key] = value
return new_dict
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, proposed in proposed_commands.items():
existing_value = existing_commands.get(key)
if proposed == 'default' and existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
elif 'anycast-gateway-mac' in key and proposed != 'default':
proposed = normalize_mac(proposed, module)
existing_value = normalize_mac(existing_value, module)
if proposed != existing_value:
command = '{0} {1}'.format(key, proposed)
commands.append(command)
if commands:
candidate.add(commands, parents=[])
def normalize_mac(proposed_mac, module):
if proposed_mac is None:
return ''
try:
if '-' in proposed_mac:
splitted_mac = proposed_mac.split('-')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
elif '.' in proposed_mac:
splitted_mac = []
splitted_dot_mac = proposed_mac.split('.')
if len(splitted_dot_mac) != 3:
raise ValueError
for octect in splitted_dot_mac:
if len(octect) > 4:
raise ValueError
else:
octect_len = len(octect)
padding = 4 - octect_len
splitted_mac.append(octect.zfill(padding + 1))
elif ':' in proposed_mac:
splitted_mac = proposed_mac.split(':')
if len(splitted_mac) != 6:
raise ValueError
for octect in splitted_mac:
if len(octect) != 2:
raise ValueError
else:
raise ValueError
except ValueError:
module.fail_json(msg='Invalid MAC address format', proposed_mac=proposed_mac)
joined_mac = ''.join(splitted_mac)
mac = [joined_mac[i:i + 4] for i in range(0, len(joined_mac), 4)]
return '.'.join(mac).upper()
def main():
argument_spec = dict(
anycast_gateway_mac=dict(required=True, type='str'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
candidate = CustomNetworkConfig(indent=3)
get_commands(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
if not module.check_mode:
load_config(module, candidate)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
py | 1a45210986e9d2229a6712c8d94a696c1950f890 | import networkx as nx
from networkx.readwrite import json_graph
import pylab as plt
import json
import sys
import os
from c_aws import *
import urllib3
import concurrent.futures
import time
def carve_results():
# call subnet lambdas to collect their results from their beacons
# get all registered beacons from SSM
print('getting latest test results')
# get a list of subnets, accounts, regions, and beacons
subnets = get_subnet_beacons()
# use threading for speed, get all beacon reports
results = {}
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
p = os.environ['Prefix']
for beacon, data in subnets.items():
print(f"getting results from {beacon}")
payload = {
'action': 'results',
'beacon': beacon
}
futures.append(executor.submit(
aws_invoke_lambda,
arn=f"arn:aws:lambda:{data['region']}:{data['account']}:function:{p}carve-{data['subnet']}",
payload=payload,
region=data['region'],
credentials=None))
for future in concurrent.futures.as_completed(futures):
result = future.result()
results[result['subnet']] = {
'beacon': result['beacon'],
'status': result['status'],
'fping': result['fping'],
'health': result['health'],
'ts': result['ts']
}
# push subnet beacons data to S3
log = json.dumps(results, ensure_ascii=True, indent=2, sort_keys=True)
aws_put_direct(log, f"logs/verification-{int(time.time())}")
return
def process_test_results(results):
# determine verification beacons here
G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
subnet_beacons = get_subnet_beacons()
verify_beacons = []
for edge in G.edges:
if vpc not in edge:
G.remove_edge(edge[0], edge[1])
# def get_asgs(G=None):
# if G is None:
# G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
# # determine all deployed ASGs
# asgs = {}
# for subnet in list(G.nodes):
# asg = f"{os.environ['Prefix']}carve-beacon-asg-{G.nodes().data()[subnet]['VpcId']}"
# if asg not in asgs:
# asgs[asg] = {
# 'account': G.nodes().data()[subnet]['Account'],
# 'region': G.nodes().data()[subnet]['Region'],
# }
# for asg, values in asgs.items():
# return asgs
def scale_beacons(scale):
'''
discover all beacon IP address
add the beacons to the carve-config cloudformation snippet
push the snipped to regional s3 buckets to be used as a cloudformation include
'''
G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
vpcs = {}
payload = []
for subnet in list(G.nodes):
# determine VPCs and regions
a = G.nodes().data()[subnet]['Account']
r = G.nodes().data()[subnet]['Region']
vpcs[G.nodes().data()[subnet]['VpcId']] = (a, r)
# add an ssm path to store tokens for each subnet
payload.append({
'parameter': f"/{os.environ['Prefix']}carve-resources/tokens/{subnet}",
'task': 'scale',
'scale': scale
})
# start a step function to generate tokens to track scaling each subnet
name = f"scale-{scale}-{int(time.time())}"
print('starting token step function')
aws_start_stepfunction(os.environ['TokenStateMachine'], payload, name)
# generate a list of autoscaling groups to scale
asgs = []
for vpc, ar in vpcs.items():
vpc_subnets = [x for x,y in G.nodes(data=True) if y['VpcId'] == vpc]
asgs.append({
'asg': f"{os.environ['Prefix']}carve-beacon-asg-{vpc}",
'account': ar[0],
'region': ar[1],
'subnets': vpc_subnets
})
# wait for tokens to appear before scaling
i = 0
while True:
tokens = aws_ssm_get_parameters(f"/{os.environ['Prefix']}carve-resources/tokens/")
if len(payload) == len(tokens):
print('tokens are ready')
break
else:
if i > 30:
print('timed out waiting for tokens')
else:
i = i + 1
print('waiting 1s for tokens...')
time.sleep(1)
print(f'scaling asgs: {asgs}')
# using threading, set all ASGs to correct scale for all beacons
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for asg in asgs:
if scale == 'none':
desired = 0
elif scale == 'subnet':
desired = len(asg['subnets'])
elif scale == 'vpc':
desired = 1
futures.append(executor.submit(
update_asg_size,
account=asg['account'],
asg=asg['asg'],
minsize=0,
maxsize=len(asg['subnets']),
desired=desired,
region=asg['region']
))
for future in concurrent.futures.as_completed(futures):
result = future.result()
def update_asg_size(account, asg, minsize, maxsize, desired, region):
credentials=aws_assume_role(carve_role_arn(account), f"lookup-{asg}")
asg_info = aws_describe_asg(asg, region, credentials)
print(f'scaling asg: {asg}')
# only update ASG if min/max/desired is different
update = False
if int(asg_info['MinSize']) != int(minsize):
print('scale due to MinSize')
update = True
elif int(asg_info['MaxSize']) != int(maxsize):
print('scale due to MaxSize')
update = True
elif int(asg_info['DesiredCapacity']) != int(desired):
print('scale due to DesiredCapacity')
update = True
else:
print('no scaling update to ASG')
if update:
aws_update_asg_size(asg, minsize, maxsize, desired, region, credentials)
else:
# if no udpates, return success for the task tokens
subnets = asg_info['VPCZoneIdentifier'].split(',')
print(f'clearing tokens for subnets: {subnets}')
for subnet in subnets:
ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{subnet}"
token = aws_ssm_get_parameter(ssm_param)
aws_ssm_delete_parameter(ssm_param)
if token is not None:
aws_send_task_success(token, {"action": "scale", "result": "none"})
else:
print(f'taskToken was None for {subnet}')
def get_subnet_beacons():
# return dict containing all subnets with their beacon ip, account, and region
# load latest graph
G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
subnet_beacons = json.loads(aws_read_s3_direct('managed_deployment/subnet-beacons.json', current_region))
subnets = {}
# for vpc in list(G.nodes):
for subnet, data in G.nodes().data():
# only get results if there is an active beacon in the subnet
if subnet in subnet_beacons:
subnets[subnet_beacons[subnet]] = {
'subnet': subnet,
'account': data['Account'],
'region': data['Region']
}
else:
# this conditon needs to be handled if there is no beacon
pass
return subnets
def update_carve_beacons():
'''
discover all beacon IP address
add the beacons to the carve-config cloudformation snippet
push the snipped to regional s3 buckets to be used as a cloudformation include
'''
print('updating carve beacons')
G = load_graph(aws_newest_s3('deployed_graph/'), local=False)
# determine all deployed ASGs
asgs = {}
for subnet in list(G.nodes):
asg = f"{os.environ['Prefix']}carve-beacon-asg-{G.nodes().data()[subnet]['VpcId']}"
if asg not in asgs:
asgs[asg] = {
'account': G.nodes().data()[subnet]['Account'],
'region': G.nodes().data()[subnet]['Region']
}
# threaded look up the IP address of all beacons in all ASGs
subnet_beacons = {}
all_beacons = []
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for asg, value in asgs.items():
futures.append(executor.submit(
get_beacons_thread, asg=asg, account=value['account'], region=value['region']))
for future in concurrent.futures.as_completed(futures):
result = future.result()
subnet_beacons.update(result)
for subnet, beacon in result.items():
all_beacons.append(beacon)
# push subnet beacons data to S3
data = json.dumps(subnet_beacons, ensure_ascii=True, indent=2, sort_keys=True)
aws_put_direct(data, 'managed_deployment/subnet-beacons.json')
# # create an updated config file with all the beacons
# config_path = "managed_deployment/carve-config.json"
# with open(config_path) as f:
# config = json.load(f)
# config['/root/carve.cfg']['content'] = '\n'.join(beacons)
# # push carve config file to S3
# data = json.dumps(config, ensure_ascii=True, indent=2, sort_keys=True)
# aws_put_direct(data, config_path)
# get a list of subnets, accounts, regions, and beacons
subnets = get_subnet_beacons()
# use threading to update all beacons with new beacon lists
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
p = os.environ['Prefix']
for beacon, data in subnets.items():
futures.append(executor.submit(
aws_invoke_lambda,
arn=f"arn:aws:lambda:{data['region']}:{data['account']}:function:{p}carve-{data['subnet']}",
payload={
'action': 'update',
'beacon': beacon,
'beacons': ','.join(all_beacons)
},
region=data['region'],
credentials=None))
for future in concurrent.futures.as_completed(futures):
results.append(future.result())
print(results)
# # copy config file to all required regions for CloudFormation includes
# prefix = os.environ['Prefix']
# org = os.environ['OrgId']
# for r in regions:
# aws_copy_s3_object(
# key=config_path,
# target_key=config_path,
# source_bucket=os.environ['CarveS3Bucket'],
# target_bucket=f"{prefix}carve-managed-bucket-{org}-{r}")
# # update all VPC stacks
# deploy_key = get_deploy_key(last=True)
# if deploy_key is not None:
# start_carve_deployment(event, context, key=deploy_key)
# else:
# print('No previous deploy key to run updates with')
def get_beacons_thread(asg, account, region):
# threaded lookup of all beacon IP addresses in an ASG
credentials = aws_assume_role(carve_role_arn(account), f"lookup-{asg}")
instance_ids = []
asg_info = aws_describe_asg(asg, region, credentials)
for instance in asg_info['Instances']:
if instance['LifecycleState'] == "InService":
instance_ids.append(instance['InstanceId'])
instances = aws_describe_instances(instance_ids, region, credentials)
beacons = {}
for instance in instances:
beacons[instance['SubnetId']] = instance['PrivateIpAddress']
return beacons
def ssm_event(event, context):
ssm_param = event['detail']['name']
ssm_value = aws_ssm_get_parameter(ssm_param)
if ssm_param.split('/')[-1] == 'scale':
scale_beacons(ssm_value)
elif ssm_param.split('/')[-1] == 'status':
# should enable/disable continuous verification
pass
def cleanup_ssm():
# make function to clean up SSM tokens
# move function to cleanup workflow
pass
def asg_event(event):
# should only be one item, but treat as a list
for record in event['Records']:
message = json.loads(record['Sns']['Message'])
print(f"TRIGGERED by ASG: {message['detail']['AutoScalingGroupName']}")
# get insances from event data
instance_id = ""
for resource in message['resources']:
if resource.startswith("arn:aws:ec2"):
instance_id = resource.split('/')[1]
vpc = message['detail']['AutoScalingGroupName'].split(f"{os.environ['Prefix']}carve-beacon-asg-")[-1]
credentials = aws_assume_role(carve_role_arn(message['account']), f"event-{message['detail']['AutoScalingGroupName']}")
# get instance metadata from account and update SSM
ec2 = aws_describe_instances([instance_id], message['region'], credentials)[0]
# print(ec2)
# parameter = f"/{os.environ['Prefix']}carve-resources/vpc-beacons/{vpc}/{ec2['InstanceId']}"
if 'EC2 Instance Launch Successful' == message['detail-type']:
# # add to SSM
# print(f"adding beacon to ssm: {instance_id} - {ec2['PrivateIpAddress']} - {ec2['SubnetId']}")
# beacon = {ec2['PrivateIpAddress']: ec2['SubnetId']}
# aws_ssm_put_parameter(parameter, json.dumps(beacon))
### need to update this code to grab subnet ssm param instead of ASG
# append azid code to end of instance name
subnet = aws_describe_subnets(message['region'], credentials, message['account'], ec2['SubnetId'])[0]
az = subnet['AvailabilityZoneId'].split('-')[-1]
name = f"{os.environ['Prefix']}carve-beacon-{ec2['SubnetId']}-{az}"
tags = [{'Key': 'Name', 'Value': name}]
aws_create_ec2_tag(ec2['InstanceId'], tags, message['region'], credentials)
function = f"arn:aws:lambda:{message['region']}:{message['account']}:function:{os.environ['Prefix']}carve-{ec2['SubnetId']}"
beacon = ec2['PrivateIpAddress']
## will need to udate SSM logic for tokens to be 1 token per subnet that will come back up?
## or do we check the whole ASG for health?
i = 0
while True:
result = beacon_results(function, beacon)
print(result)
if result['health'] == 'up':
# ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{asg}",
ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{ec2['SubnetId']}"
token = aws_ssm_get_parameter(ssm_param)
aws_ssm_delete_parameter(ssm_param)
if token is not None:
aws_send_task_success(token, {"action": "scale", "result": "success"})
else:
print(f"taskToken was None for {ec2['SubnetId']}")
break
else:
if i > 30:
break
print(f'timed out waiting for beacon {beacon}')
else:
print(f'waiting for beacon {beacon} - {i}')
i = i + 1
time.sleep(1)
elif 'EC2 Instance Terminate Successful' == message['detail-type']:
# ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{asg}",
subnet = message['detail']['Details']['Subnet ID']
ssm_param = f"/{os.environ['Prefix']}carve-resources/tokens/{subnet}"
token = aws_ssm_get_parameter(ssm_param)
aws_ssm_delete_parameter(ssm_param)
if token is not None:
aws_send_task_success(token, {"action": "scale", "result": "success"})
else:
print(f'taskToken was None for {subnet}')
print(f"beacon terminated {message}")
def beacon_results(function, beacon):
region = function.split(':')[3]
subnet = function.split(':')[-1]
print(f"getting beacon results from {subnet}")
payload = {
'action': 'results',
'beacon': beacon
}
result = aws_invoke_lambda(
arn=function,
payload=payload,
region=region,
credentials=None
)
return result
def carve_role_arn(account):
# return the carve IAM role ARN for any account number
role_name = f"{os.environ['Prefix']}carve-core"
role = f"arn:aws:iam::{account}:role/{role_name}"
return role
def network_diff(A, B):
# compare peering both directions
diff_peering(A, B)
diff_vpcs(A, B)
def diff_peering(A, B, repeat=True):
for edge in A.edges() - B.edges():
print(f"DIFFERENCE DETECTED! \'{B.graph['Name']}\' contains a PEERING CONNECTION that \'{A.graph['Name']}\' does not:")
print(f"#######################")
print(A.nodes().data()[edge[0]])
print(f"-------peered to-------")
print(A.nodes().data()[edge[1]])
print(f"#######################")
if repeat:
diff_peering(B, A, repeat=False)
def diff_vpcs(A, B, repeat=True):
for node in A.nodes() - B.nodes():
print(f"DIFF DETECTED! \'{B.graph['Name']}\' contains a VPC that \'{A.graph['Name']}\' does not:")
print(f"#######################")
print(A.nodes().data()[node])
print(f"#######################")
if repeat:
diff_peering(B, A, repeat=False)
def export_visual(Graph, c_context):
G = Graph
# remove isolated nodes from graph
if 'peers_only' in c_context:
if c_context['peers_only'] == 'true':
G.remove_nodes_from(list(nx.isolates(G)))
print('drawing graph diagram')
# print(f"/src/c_graphic_{G.graph['Name']}.png")
options = {
'node_color': 'blue',
'node_size': 100,
'font_size': 14,
'width': 3,
'with_labels': True,
}
plt.figure(G.graph['Name'],figsize=(24,24))
nx.draw_circular(G, **options)
# G = nx.cycle_graph(80)
# pos = nx.circular_layout(G)
# # default
# plt.figure(1)
# nx.draw(G,pos)
# # smaller nodes and fonts
# plt.figure(2)
# nx.draw(G,pos,node_size=60,font_size=8)
# # larger figure size
# plt.figure(3,figsize=(12,12))
# nx.draw(G,pos)
plt.savefig(f"/src/c_graphic_{G.graph['Name']}.png")
def draw_vpc(Graph, vpc):
G = Graph
print('drawing graph diagram')
print(f"/src/c_graphic_{vpc}.png")
# remove all edges without vpc
for edge in G.edges:
if vpc not in edge:
G.remove_edge(edge[0], edge[1])
# remove all nodes left without edges
G.remove_nodes_from(list(nx.isolates(G)))
options = {
'node_color': 'blue',
'node_size': 100,
'font_size': 14,
'width': 3,
'with_labels': True,
}
plt.figure(vpc,figsize=(24,24))
# nx.draw_circular(G, **options)
# nx.draw_networkx(G, **options) # good for single
# nx.draw_spectral(G, **options)
# nx.draw_spring(G, **options) # similar to netoworkx also good
nx.draw_shell(G, **options)
plt.savefig(f"/src/c_graphic_{vpc}.png")
def load_graph(graph, local=True):
try:
if local:
with open(graph) as f:
G = json_graph.node_link_graph(json.load(f))
G.graph['Name'] = graph.split('/')[-1].split('.')[0]
return G
else:
graph_data = aws_read_s3_direct(graph, current_region)
G = json_graph.node_link_graph(json.loads(graph_data))
return G
except Exception as e:
print(f'error opening json_graph {json_graph}: {e}')
sys.exit()
def save_graph(G, file_path):
# save json data
try:
os.remove(file_path)
except:
pass
with open(file_path, 'a') as f:
json.dump(json_graph.node_link_data(G), f)
# def main(c_context):
# # either load graph data for G from json, or generate dynamically
# if 'json_graph' in c_context:
# G = load_graph(c_context['json_graph'])
# else:
# G = False
# if not G:
# G = discovery(c_context)
# if 'export_visual' in c_context:
# if c_context['export_visual'] == 'true':
# export_visual(G, c_context)
# if 'diff_graph' in c_context:
# D = load_graph(c_context['diff_graph'])
# if D:
# network_diff(G, D)
# else:
# print(f'cannot compare: diff_graph did not load')
# draw_vpc(G, c_context['VpcId'])
|
py | 1a4521445216d925928da3d3743566d95610dabf | import warnings
warnings.filterwarnings('ignore')
import os
import sys
import argparse
import random
import joblib
import json
import tqdm
import torch
import numpy as np
import pandas as pd
import sklearn
from types import SimpleNamespace
from collections import Counter
def simulate(xp, tps, celltype_annotations, w, model, config, num_sims, num_cells, num_steps, device, tp_subset, celltype_subset):
"""
Use trained PRESCIENT model to simulate cell trajectories with arbitrary initializations.
"""
# load data
xp = torch.from_numpy(xp)
# make meta dataframe
# TO-DO implement weight sampling strategy
dict = {"tp": tps, "celltype": celltype_annotations}
meta = pd.DataFrame(dict)
all_sims = []
pbar = tqdm.tqdm(range(num_sims))
for s in pbar:
# sample cells based on timepoint or celltype or both
if tp_subset != None and celltype_subset != None:
idx = pd.DataFrame(meta[(meta["tp"]==tp_subset) & (meta["celltype"]==celltype_subset)]).sample(num_cells).index
elif tp_subset != None:
idx = pd.DataFrame(meta[meta["tp"]==tp_subset]).sample(num_cells).index
elif celltype_subset != None:
idx = pd.DataFrame(meta[meta["celltype"]==celltype_subset]).sample(num_cells).index
else:
idx = meta.sample(num_cells).index
# map tensor to device
xp_i = xp[idx].to(device)
# store inital value
xp_i_ = xp_i.detach().cpu().numpy()
xps_i = [xp_i_] # n
# simulate all cells forward through time
for _ in range(num_steps):
# initialize latent vector
z = torch.randn(xp_i.shape[0], xp_i.shape[1]) * config.train_sd
z = z.to(device)
# step forward with trained model
xp_i = model._step(xp_i.float(), dt=config.train_dt, z=z)
# store next step
xp_i_ = xp_i.detach().cpu().numpy()
xps_i.append(xp_i_)
# group timepoints
xps = np.stack(xps_i) #[n_cells x n_steps]
all_sims.append(xps) #[n_sims x n_cells x n_steps]
pbar.set_description('[simulate] {}'.format(s))
return all_sims
|
py | 1a4521d8a0db02e3727eb4c8585161aad675ea52 | import sys, csv, os, string, re, shutil
# @function DATE FUNCTIONS
# @version v0.18.04.30
##################################
def dtos(dt=''):
if (len(dt) == 10):
ano = dt[6]+dt[7]+dt[8]+dt[9]
mes = dt[3]+dt[4]
dia = dt[0]+dt[1]
data = ano+"-"+mes+"-"+dia
sr = data
else:
sr = '0000-00-00'
return sr
# @function PADRONIZAÇÃO DE NOMES
# @version v0.18.04.30
##################################
##################################
def nbr_title(title=''):
sr = ''
uc = 1
title = title.lower()
for x in range(0, len(title)):
if len(title) > 0:
t = title[x]
if uc == 1:
if not t.isupper():
sr = sr + t.upper()
else:
sr = sr + t
else:
sr = sr + t
uc = 0
if t == '.':
uc = 1
return sr
def nbr_name(name=''):
uc = 1
sr = ''
name = name.replace('.', '. ')
for x in range(0, len(name)):
# Caracter ############################
s = name[x]
if uc == 1:
if not s.isupper():
sr = sr + s.upper()
else:
sr = sr + s
uc = 0
else:
sr = sr + s.lower()
uc = 0
if s == ' ' or s == '.':
uc = 1
# Regras ##################################
sr = sr.replace(' ', ' ')
sr = sr.replace(' E ', ' e ')
sr = sr.replace(' De ', ' de ')
sr = sr.replace(' Do ', ' do ')
sr = sr.replace(' Dos ', ' dos ')
sr = sr.replace(' Da ', ' da ')
sr = sr.replace(' Das ', ' das ')
sr = sr.replace(' Em ', ' em ')
sr = sr.replace(' O ', ' o ')
return sr
with open('U:/Excel-Metadados/pilla_acervodoc.csv', newline='') as csvfile:
handle = '2050011959'
license = 'license.txt'
ast = ''
spamreader = csv.reader(csvfile, delimiter=';')
for row in spamreader:
hd = row[2]
while len(hd) < 5:
hd = '0' + hd
hd = '300' + hd
directory = 'pilla_raul/' + hd
########################### HANDLE
handle_nr = handle + '/' + hd
########################### ID
id = row[2]
idf = id
while (len(idf) < 4):
idf = '0' + idf
########################### ABSTRACT
abstract = row[11]
abstract = re.sub('\r\n', '; ', abstract)
title = nbr_name(abstract)
abstract = 'De: '+row[7]+'\r\rPara: '+row[8]+'\n\rData: '+row[6]+'\n\rDescrição: '+abstract
tl = title.split('.')
if len(tl) > 0:
title = tl[0]
title = title + ';'
tl = title.split(';')
if len(tl) > 0:
title = tl[0]
########################### ASSUNTO
t = row[10]
t = re.sub('\r\n', ';', t)
t = re.sub('; ', ';', t)
t = t.split(';')
subj = '';
for tt in t:
tt.split()
if len(tt) > 1:
tt.strip()
tt.rstrip();
tt.lstrip();
if len(tt) > 2:
subj = subj + tt + ';'
ast = ast + 'insert into pilla (r_arq, r_nrdoc, r_nrdoc2, r_doc, r_local, r_dtdoc, r_remetente, r_destinatario, r_descricao, r_assunto, r_n1, r_data2, r_isd, r_rmes) '
ast = ast + ' values '
row7 = ''
row8 = ''
if len(row[7]) > 3:
row7 = "" + nbr_name(row[7]) + ""
if len(row[8]) > 3:
row8 = "" + nbr_name(row[8]) + ""
ast = ast + "("
ast = ast + "'"+row[1]+"', "
ast = ast + "'"+row[2]+"', "
ast = ast + "'"+row[3]+"', "
ast = ast + "'"+row[4]+"', "
ast = ast + "'"+row[5]+"', "
#ast = ast + "'"+row[6]+"', "
ast = ast + "'"+row8+"', "
ast = ast + "'"+row7+"', "
ast = ast + "'"+row[9]+"', "
ast = ast + "'"+row[10]+"', "
ast = ast + "'"+row[11]+"', "
ast = ast + "'"+row[12]+"', "
ast = ast + "'"+dtos(row[13])+"', "
ast = ast + "'"+row[14]+"', "
ast = ast + "'"+row[15]+"' "
ast = ast + ");"
ast = ast + '\r\n'
######################### Bundle
arq = 'd:/lixo/n.sql'
ok = 0;
######################### DUBLIC CORE
fdc = open(arq, 'w')
fdc.write(ast)
fdc.close()
|
py | 1a45225b02ba38b10ccc71b37b6cf341de3f9d3a | # -*- coding: utf-8 -*-
"""Repair command tests"""
from __future__ import unicode_literals
from django.core import management
from modoboa.lib.permissions import ObjectAccess, get_object_owner
from modoboa.lib.tests import ModoTestCase
from .. import factories, models
class RepairTestCase(ModoTestCase):
"""TestCase for repair command."""
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create some data."""
super(RepairTestCase, cls).setUpTestData()
factories.populate_database()
def test_management_command(self):
"""Check that command works fine."""
ObjectAccess.objects.all().delete()
mbox = models.Mailbox.objects.first()
alias = models.Alias.objects.first()
# assert mbox has no owner
self.assertIs(get_object_owner(mbox), None)
# fix it. run in quiet mode because we dont want output in tests
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
# assert it's fixed
self.assertIsNot(get_object_owner(mbox), None)
self.assertIsNot(get_object_owner(alias), None)
def test_management_command_with_dry_run(self):
"""Check that command works fine."""
ObjectAccess.objects.all().delete()
mbox = models.Mailbox.objects.first()
# assert mbox has no owner
self.assertIs(get_object_owner(mbox), None)
# show problems. run in quiet mode because we dont want output in tests
ret = management.call_command("modo", "repair", "--quiet", "--dry-run")
assert ret is None
# assert its not fixed
self.assertIs(get_object_owner(mbox), None)
def test_management_command_with_nul_domain(self):
"""Just assume nothing raise when an alias has no domain."""
models.Alias.objects.create(address="@modoboa.xxx")
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
def test_management_command_with_no_alias(self):
"""Check that problem is fixed."""
count, detail = models.Alias.objects.filter(
address="[email protected]", internal=True).delete()
self.assertEqual(count, 3)
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
self.assertTrue(
models.Alias.objects.filter(
address="[email protected]", internal=True).exists())
|
py | 1a4522b36859c341c6c66cb482bd451b3a6ebe20 | from lambdas.nop import _nop, _cma_output
def test_nop():
expected = None
actual = _nop()
assert expected == actual
def test_cma_output():
expected = {"NOP": None}
actual = _cma_output(None)
assert expected == actual
|
py | 1a45235f15a84ba183b30794d7373430a858a367 | """
Support for Modbus Coil sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.modbus/
"""
import logging
import voluptuous as vol
from homeassistant.components import modbus
from homeassistant.const import CONF_NAME, CONF_SLAVE
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.helpers import config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
CONF_COIL = 'coil'
CONF_COILS = 'coils'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COILS): [{
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SLAVE): cv.positive_int
}]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Modbus binary sensors."""
sensors = []
for coil in config.get(CONF_COILS):
sensors.append(ModbusCoilSensor(
coil.get(CONF_NAME),
coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
add_devices(sensors)
class ModbusCoilSensor(BinarySensorDevice):
"""Modbus coil sensor."""
def __init__(self, name, slave, coil):
"""Initialize the modbus coil sensor."""
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._value = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._value
def update(self):
"""Update the state of the sensor."""
result = modbus.HUB.read_coils(self._slave, self._coil, 1)
try:
self._value = result.bits[0]
except AttributeError:
_LOGGER.error(
'No response from modbus slave %s coil %s',
self._slave,
self._coil)
|
py | 1a452372dbac04f7f541c5a75f6ca12e1d10c8fb | #!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Start apollo data recorder.
It lists all available disks mounted under /media, and prioritize them in order:
- Disk#1. Largest NVME disk
- Disk#2. Smaller NVME disk
- ...
- Disk#x. Largest Non-NVME disk
- Disk#y. Smaller Non-NVME disk
- ...
Run with '--help' to see more options.
"""
import argparse
import datetime
import os
import subprocess
import sys
import psutil
MAP_COLLECTION_DATA_TOPICS = [
'/apollo/monitor/system_status',
'/apollo/sensor/gnss/best_pose',
'/apollo/sensor/gnss/gnss_status',
'/apollo/sensor/gnss/imu',
'/apollo/sensor/gnss/ins_stat',
'/apollo/sensor/gnss/odometry',
'/apollo/sensor/gnss/raw_data',
'/tf',
'/tf_static',
'/apollo/sensor/camera/front_12mm/image/compressed',
'/apollo/sensor/camera/front_6mm/image/compressed',
'/apollo/sensor/lidar16/front/up/Scan',
'/apollo/sensor/lidar16/front/up/compensator/PointCloud2',
'/apollo/sensor/lidar128/Scan',
'/apollo/sensor/lidar128/compensator/PointCloud2',
]
def shell_cmd(cmd, alert_on_failure=True):
"""Execute shell command and return (ret-code, stdout, stderr)."""
print('SHELL > {}'.format(cmd))
proc = subprocess.Popen(cmd, shell=True, close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.wait()
stdout = proc.stdout.read().decode('utf-8') if proc.stdout else None
stderr = proc.stderr.read().decode('utf-8') if proc.stderr else None
if alert_on_failure and stderr and ret != 0:
sys.stderr.write('{}\n'.format(stderr))
return (ret, stdout, stderr)
class ArgManager(object):
"""Arguments manager."""
def __init__(self):
self.parser = argparse.ArgumentParser(
description="Manage apollo data recording.")
self.parser.add_argument('--start', default=False, action="store_true",
help='Start recorder. It is the default '
'action if no other actions are triggered. In '
'that case, the False value is ignored.')
self.parser.add_argument('--stop', default=False, action="store_true",
help='Stop recorder.')
self.parser.add_argument('--split_duration', default="1m",
help='Duration to split bags, will be applied '
'as parameter to "rosbag record --duration".')
self._args = None
def args(self):
"""Get parsed args."""
if self._args is None:
self._args = self.parser.parse_args()
return self._args
class DiskManager(object):
"""Disk manager."""
def __init__(self):
"""Manage disks."""
disks = []
for disk in psutil.disk_partitions():
if not disk.mountpoint.startswith('/media/'):
continue
disks.append({
'mountpoint': disk.mountpoint,
'available_size': DiskManager.disk_avail_size(disk.mountpoint),
'is_nvme': disk.mountpoint.startswith('/media/apollo/internal_nvme'),
})
# Prefer NVME disks and then larger disks.
self.disks = sorted(
disks, reverse=True,
key=lambda disk: (disk['is_nvme'], disk['available_size']))
@staticmethod
def disk_avail_size(disk_path):
"""Get disk available size."""
statvfs = os.statvfs(disk_path)
return statvfs.f_frsize * statvfs.f_bavail
class Recorder(object):
"""Data recorder."""
def __init__(self, args):
self.args = args
self.disk_manager = DiskManager()
def start(self):
"""Start recording."""
if Recorder.is_running():
print('Another data recorder is running, skip.')
return
disks = self.disk_manager.disks
# Use the best disk, or fallback '/apollo' if none available.
disk_to_use = disks[0]['mountpoint'] if len(disks) > 0 else '/apollo'
topics = list(MAP_COLLECTION_DATA_TOPICS)
self.record_task(disk_to_use, topics)
def stop(self):
"""Stop recording."""
shell_cmd('pkill -f "cyber_recorder record"')
def record_task(self, disk, topics):
"""Record tasks into the <disk>/data/bag/<task_id> directory."""
task_id = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
task_dir = os.path.join(disk, 'data/bag', task_id)
print('Recording bag to {}'.format(task_dir))
log_file = '/apollo/data/log/apollo_record.out'
topics_str = ' -c '.join(topics)
os.makedirs(task_dir)
cmd = '''
cd "{}"
source /apollo/scripts/apollo_base.sh
source /apollo/framework/install/setup.bash
nohup cyber_recorder record -c {} >{} 2>&1 &
'''.format(task_dir, topics_str, log_file)
shell_cmd(cmd)
@staticmethod
def is_running():
"""Test if the given process running."""
_, stdout, _ = shell_cmd('pgrep -c -f "cyber_recorder record"', False)
# If stdout is the pgrep command itself, no such process is running.
return stdout.strip() != '1' if stdout else False
def main():
"""Main entry."""
arg_manager = ArgManager()
args = arg_manager.args()
recorder = Recorder(args)
if args.stop:
recorder.stop()
else:
recorder.start()
if __name__ == '__main__':
main()
|
py | 1a4523c14fac39bd0877c9781baf5e1a333a4c0c | import moviepy.editor as mpy
import argparse
import os
def parseArgs():
parser = argparse.ArgumentParser(
description='Edit video by picking intervals and highlight danmu')
parser.add_argument('vid_id', type=str,
help='the id for the video')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parseArgs()
vid_id = args.vid_id
print('Start preprocessing {}'.format(vid_id))
with open('cache/{}_intervals.txt'.format(vid_id)) as f:
intervals = eval(f.readline())
intervals_danmu = eval(f.readline())
ori_clip = mpy.VideoFileClip('cache/{}.mp4'.format(vid_id))
for i in intervals:
direc = os.listdir('cache/')
if '{}_clip_{}.mp4'.format(vid_id, i) in direc:
continue
ori_clip.subclip(i[0], i[1]).write_videofile(
'cache/{}_clip_{}.mp4'.format(vid_id, i))
print('Video clips splitted, start interactive editing')
clips = []
for i, d in list(zip(intervals,intervals_danmu)):
print('interval: {}\ndanmu:'.format(i))
print(d)
clip = mpy.VideoFileClip('cache/{}_clip_{}.mp4'.format(vid_id, i))
os.system('mpv \"cache/{}_clip_{}.mp4\" --loop'.format(vid_id, i))
shear = input('Give two time length to cut from head and tail> ')
if shear != '':
shear = list(map(int, shear.split()))
clip = clip.subclip(shear[0], clip.duration - shear[1])
picked = input('Pick danmu or press ENTER to abort this clip> ')
if picked == '':
continue
picked = picked
subtitle = (mpy.TextClip(picked, fontsize=40,
font='Microsoft-YaHei-UI-Bold',
color='white',
stroke_color='black',
stroke_width=2)
.margin(top=15, left=45, opacity=0)
.set_position(('left', 'top')))
clips.append(mpy.CompositeVideoClip([clip, subtitle])
.fadein(.3).set_duration(clip.duration).fadeout(.3))
out_clip = mpy.concatenate_videoclips(clips)
out_clip.write_videofile('output/{}_fin.mp4'.format(vid_id))
print('Edit done!')
|
py | 1a4524edb52e2f01ab4776940958856a346ca663 | import gym
import numpy as np
from gym.envs.registration import register
# Refer https://github.com/openai/gym/issues/565
register(
id='FrozenLakeNotSlippery-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4', 'is_slippery': False},
max_episode_steps=2000,
reward_threshold=0.78, # optimum = .8196
)
#env = gym.make("FrozenLakeNotSlippery-v0")
env = gym.make("FrozenLakeNotSlippery-v0")
env.seed(0)
np.random.seed(56776)
# Test how does the game work.
print("-------------Test game--------------")
ql_table = np.zeros([env.observation_space.n, env.action_space.n])
print(ql_table)
env.render()
env.reset()
hardcore_steps = [1, 1, 2, 2, 1, 2]
for step in hardcore_steps:
env.step(step)
env.render()
# Let machine learng the step.
print("-------------Let machine learng the steps--------------")
env.reset()
env.render()
ql_table = np.zeros([env.observation_space.n, env.action_space.n]) + np.random.randn(16, 4)
print(ql_table)
"""
Hyper parameters:
"""
n_round = 5000
n_steps = 2000
lr = 0.3
discount = 0.8
for round in range(n_round):
state = env.reset()
for step in range(n_steps):
action = np.argmax(ql_table[state, :] + np.random.randn(1, 4))
new_state, reward, done, _ = env.step(action)
ql_table[state, action] = (1 - lr) * ql_table[state, action] + \
lr * (reward + discount * np.max(ql_table[new_state, :]))
state = new_state
if done is True:
break
print(np.argmax(ql_table, axis=1))
print(np.around(ql_table, 6))
env.reset()
for step in np.argmax(ql_table, axis=1):
state_new, reward, done, _ = env.step(step)
env.render() |
py | 1a452647cdf3134b2cf31294edc28f7ec2bdaa7e | """
Написать функцию, которая перемещает два первых элемента списка в конец списка"
"""
numbers = [1, 2, 3, 4, 5]
def rotate(numbers):
numbers = [*numbers[2:], *numbers[0:2]]
return numbers
print(rotate(numbers)) |
py | 1a452706d054f564fc79570c5a89dc413565ac3b | '''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: Rahul Verma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from arjuna.tpi.markup import *
from arjuna.tpi.markup_helpers import *
@test_function
def assert_nothingness(my):
my.steps.assert_none("Business Purpose", None)
my.steps.assert_not_none("Business Purpose", 1)
@test_function
def assert_none_fails_for_notnone(my):
my.steps.assert_none("Should fail for not None value", 1)
@test_function
def assert_notnone_fails_for_none(my):
my.steps.assert_not_none("Should fail for None value.", None)
|
py | 1a4527b12cef28c86e6438fd471fe702f355b3ea | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
from pyspark import SparkContext
from pyspark.rdd import RDD
from pyspark.mllib.common import JavaModelWrapper, callMLlibFunc, inherit_doc
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['MatrixFactorizationModel', 'ALS', 'Rating']
class Rating(namedtuple("Rating", ["user", "product", "rating"])):
"""
Represents a (user, product, rating) tuple.
>>> r = Rating(1, 2, 5.0)
>>> (r.user, r.product, r.rating)
(1, 2, 5.0)
>>> (r[0], r[1], r[2])
(1, 2, 5.0)
"""
def __reduce__(self):
return Rating, (int(self.user), int(self.product), float(self.rating))
@inherit_doc
class MatrixFactorizationModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""A matrix factorisation model trained by regularized alternating
least-squares.
>>> r1 = (1, 1, 1.0)
>>> r2 = (1, 2, 2.0)
>>> r3 = (2, 1, 2.0)
>>> ratings = sc.parallelize([r1, r2, r3])
>>> model = ALS.trainImplicit(ratings, 1, seed=10)
>>> model.predict(2, 2)
0.43...
>>> testset = sc.parallelize([(1, 2), (1, 1)])
>>> model = ALS.train(ratings, 2, seed=0)
>>> model.predictAll(testset).collect()
[Rating(user=1, product=1, rating=1.0...), Rating(user=1, product=2, rating=1.9...)]
>>> model = ALS.train(ratings, 4, seed=10)
>>> model.userFeatures().collect()
[(1, array('d', [...])), (2, array('d', [...]))]
>>> first_user = model.userFeatures().take(1)[0]
>>> latents = first_user[1]
>>> len(latents) == 4
True
>>> model.productFeatures().collect()
[(1, array('d', [...])), (2, array('d', [...]))]
>>> first_product = model.productFeatures().take(1)[0]
>>> latents = first_product[1]
>>> len(latents) == 4
True
>>> model = ALS.train(ratings, 1, nonnegative=True, seed=10)
>>> model.predict(2,2)
3.8...
>>> model = ALS.trainImplicit(ratings, 1, nonnegative=True, seed=10)
>>> model.predict(2,2)
0.43...
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = MatrixFactorizationModel.load(sc, path)
>>> sameModel.predict(2,2)
0.43...
>>> try:
... os.removedirs(path)
... except OSError:
... pass
"""
def predict(self, user, product):
return self._java_model.predict(int(user), int(product))
def predictAll(self, user_product):
assert isinstance(user_product, RDD), "user_product should be RDD of (user, product)"
first = user_product.first()
assert len(first) == 2, "user_product should be RDD of (user, product)"
user_product = user_product.map(lambda (u, p): (int(u), int(p)))
return self.call("predict", user_product)
def userFeatures(self):
return self.call("getUserFeatures")
def productFeatures(self):
return self.call("getProductFeatures")
class ALS(object):
@classmethod
def _prepare(cls, ratings):
assert isinstance(ratings, RDD), "ratings should be RDD"
first = ratings.first()
if not isinstance(first, Rating):
if isinstance(first, (tuple, list)):
ratings = ratings.map(lambda x: Rating(*x))
else:
raise ValueError("rating should be RDD of Rating or tuple/list")
return ratings
@classmethod
def train(cls, ratings, rank, iterations=5, lambda_=0.01, blocks=-1, nonnegative=False,
seed=None):
model = callMLlibFunc("trainALSModel", cls._prepare(ratings), rank, iterations,
lambda_, blocks, nonnegative, seed)
return MatrixFactorizationModel(model)
@classmethod
def trainImplicit(cls, ratings, rank, iterations=5, lambda_=0.01, blocks=-1, alpha=0.01,
nonnegative=False, seed=None):
model = callMLlibFunc("trainImplicitALSModel", cls._prepare(ratings), rank,
iterations, lambda_, blocks, alpha, nonnegative, seed)
return MatrixFactorizationModel(model)
def _test():
import doctest
import pyspark.mllib.recommendation
globs = pyspark.mllib.recommendation.__dict__.copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
py | 1a4527e3cec0ce3e5469a934de3df305c72f0c2b | ''' Defines files and directories used in stage 1 of the IMB calculation'''
def filepaths():
output = {
'source_dir' : # This should be set to the main
# directory where the IMB data is held.
# Subdirectories should have names
# equal to the IMB labels, e.g. '1997D',
# '2012L', with all files for each IMB
# placed under its directory
'series_dir' : # Directory where all IMB data series (raw and derived) can be saved in ASCII form
'output_dir' : # Directory where all IMB data series, regularised to temperature measurements points, can be saved in netCDF format, for reading by second stage of the code
'mday_file' : # File containing a list of all IMBs for which dates are in UK format (most are in US format). Provided with the code.
'temp_masks_file' : # File containing a list of blocks of IMB temperature data known to be spurious. Provided with the code
'translation_file': # File containing a list of IMB elevation data series thought be in error by a fixed displacement. Provided with the code
'temp_translations_file': # File containing a list of IMB temperature data blocks thought to be in error by a fixed displacement. Provided with the code
'temp_ref_elev_file' : # File with details of where the top temperature measurement point is located relative to the initial snow-ice interface, for each IMB (if not given, the point is assumed to be 60cm above the interface). Provided with the code
}
return output
|
py | 1a4529aac2725f19a94411b0e898b525ec4785e8 | # Scrapy settings for crawl_data project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'crawl_data'
SPIDER_MODULES = ['crawl_data.spiders']
NEWSPIDER_MODULE = 'crawl_data.spiders'
FEED_EXPORT_ENCODING = 'utf-8'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'crawl_data (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'crawl_data.middlewares.CrawlDataSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'crawl_data.middlewares.CrawlDataDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'crawl_data.pipelines.CrawlDataPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
py | 1a452a1c865083856b5d753f61d121a8f46ee856 | print("Hello World")
print("Hello Again")
print("I like typing this")
print("This is fun")
print("Yay! printing")
print("I'd much rather not 'not'.")
print('I "said" do not touch this.')
|
py | 1a452c2745e6dc10ceac8c49c341395e5b06c5cc | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.dialogflow_v2beta1.types import environment
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class EnvironmentsTransport(abc.ABC):
"""Abstract transport class for Environments."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_environments: gapic_v1.method.wrap_method(
self.list_environments, default_timeout=None, client_info=client_info,
),
}
@property
def list_environments(
self,
) -> typing.Callable[
[environment.ListEnvironmentsRequest],
typing.Union[
environment.ListEnvironmentsResponse,
typing.Awaitable[environment.ListEnvironmentsResponse],
],
]:
raise NotImplementedError()
__all__ = ("EnvironmentsTransport",)
|
py | 1a452cd618b5b58931a071cb43d49805c0ac7f3c | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for creating EstimatorSpecs for Onsets and Frames models."""
import functools
from magenta.models.onsets_frames_transcription import constants
from magenta.models.onsets_frames_transcription import drum_mappings
from magenta.models.onsets_frames_transcription import infer_util
from magenta.models.onsets_frames_transcription import metrics
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import tpu as contrib_tpu
def _drums_only_metric_ops(features, labels, frame_probs, onset_probs,
frame_predictions, onset_predictions,
offset_predictions, velocity_values, hparams):
"""Generate drum metrics: offsets/frames are ignored."""
del frame_predictions, offset_predictions # unused
metric_ops = metrics.define_metrics(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=onset_predictions,
onset_predictions=onset_predictions,
offset_predictions=onset_predictions,
velocity_values=velocity_values,
length=features.length,
sequence_label=labels.note_sequence,
frame_labels=labels.labels,
sequence_id=features.sequence_id,
hparams=hparams,
min_pitch=constants.MIN_MIDI_PITCH,
max_pitch=constants.MAX_MIDI_PITCH,
prefix='drums/',
onsets_only=True,
pitch_map=drum_mappings.GROOVE_PITCH_NAMES)
return metric_ops
def get_metrics(features, labels, frame_probs, onset_probs, frame_predictions,
onset_predictions, offset_predictions, velocity_values,
hparams):
"""Return metrics values ops."""
if hparams.drums_only:
return _drums_only_metric_ops(
features=features,
labels=labels,
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
hparams=hparams)
else:
return metrics.define_metrics(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
length=features.length,
sequence_label=labels.note_sequence,
frame_labels=labels.labels,
sequence_id=features.sequence_id,
hparams=hparams)
def _predict_sequences(frame_probs, onset_probs, frame_predictions,
onset_predictions, offset_predictions, velocity_values,
hparams):
"""Predict a batch of sequences."""
def predict_sequence(frame_probs, onset_probs, frame_predictions,
onset_predictions, offset_predictions, velocity_values,
hparams):
"""Predict a single sequence."""
if hparams.drums_only:
sequence_prediction = infer_util.predict_sequence(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=onset_predictions,
onset_predictions=onset_predictions,
offset_predictions=onset_predictions,
velocity_values=velocity_values,
min_pitch=constants.MIN_MIDI_PITCH,
hparams=hparams,
onsets_only=True)
for note in sequence_prediction.notes:
note.is_drum = True
else:
sequence_prediction = infer_util.predict_sequence(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
min_pitch=constants.MIN_MIDI_PITCH,
hparams=hparams)
return sequence_prediction.SerializeToString()
sequences = []
for i in range(frame_predictions.shape[0]):
sequence = tf.py_func(
functools.partial(predict_sequence, hparams=hparams),
inp=[
frame_probs[i],
onset_probs[i],
frame_predictions[i],
onset_predictions[i],
offset_predictions[i],
velocity_values[i],
],
Tout=tf.string,
stateful=False)
sequence.set_shape([])
sequences.append(sequence)
return tf.stack(sequences)
def get_estimator_spec(hparams, mode, features, labels, frame_logits,
onset_logits, offset_logits, velocity_values,
offset_network=True):
"""Create TPUEstimatorSpec."""
loss_metrics = {}
loss = None
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
onset_losses = tf.losses.sigmoid_cross_entropy(
labels.onsets[:, :, :constants.MIDI_PITCHES],
onset_logits[:, :, :constants.MIDI_PITCHES],
weights=tf.expand_dims(
tf.sequence_mask(
features.length, maxlen=tf.shape(labels.onsets)[1]),
axis=2))
loss_metrics['onset'] = onset_losses
if offset_network and not hparams.drums_only:
offset_losses = tf.losses.sigmoid_cross_entropy(
labels.offsets[:, :, :constants.MIDI_PITCHES],
offset_logits[:, :, :constants.MIDI_PITCHES],
weights=tf.expand_dims(
tf.sequence_mask(
features.length, maxlen=tf.shape(labels.offsets)[1]),
axis=2))
loss_metrics['offset'] = offset_losses
velocity_losses = tf.losses.mean_squared_error(
labels.velocities, velocity_values,
weights=labels.onsets * hparams.velocity_loss_weight)
loss_metrics['velocity'] = velocity_losses
if not hparams.drums_only:
frame_losses = tf.losses.sigmoid_cross_entropy(
labels.labels[:, :, :constants.MIDI_PITCHES],
frame_logits[:, :, :constants.MIDI_PITCHES],
weights=tf.expand_dims(
tf.sequence_mask(
features.length, maxlen=tf.shape(labels.labels)[1]),
axis=2))
loss_metrics['frame'] = frame_losses
loss = tf.losses.get_total_loss()
if (mode == tf.estimator.ModeKeys.EVAL or
mode == tf.estimator.ModeKeys.PREDICT):
frame_probs = tf.sigmoid(frame_logits)
onset_probs = tf.sigmoid(onset_logits)
if offset_network:
offset_probs = tf.sigmoid(offset_logits)
else:
offset_probs = tf.zeros_like(onset_probs)
frame_predictions = frame_probs > hparams.predict_frame_threshold
onset_predictions = onset_probs > hparams.predict_onset_threshold
offset_predictions = offset_probs > hparams.predict_offset_threshold
if hparams.drum_prediction_map:
map_predictions = functools.partial(
drum_mappings.map_pianoroll,
mapping_name=hparams.drum_prediction_map,
reduce_mode='any',
min_pitch=constants.MIN_MIDI_PITCH)
frame_predictions = tf.map_fn(map_predictions, frame_predictions)
onset_predictions = tf.map_fn(map_predictions, onset_predictions)
offset_predictions = tf.map_fn(map_predictions, offset_predictions)
map_values = functools.partial(
drum_mappings.map_pianoroll,
mapping_name=hparams.drum_prediction_map,
reduce_mode='max',
min_pitch=constants.MIN_MIDI_PITCH)
velocity_values = tf.map_fn(map_values, velocity_values)
metrics_values = get_metrics(features, labels, frame_probs, onset_probs,
frame_predictions, onset_predictions,
offset_predictions, velocity_values, hparams)
for label, loss_collection in loss_metrics.items():
loss_label = 'losses/' + label
metrics_values[loss_label] = loss_collection
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = contrib_layers.optimize_loss(
name='training',
loss=loss,
global_step=tf.train.get_or_create_global_step(),
learning_rate=hparams.learning_rate,
learning_rate_decay_fn=functools.partial(
tf.train.exponential_decay,
decay_steps=hparams.decay_steps,
decay_rate=hparams.decay_rate,
staircase=True),
clip_gradients=hparams.clip_norm,
summaries=[],
optimizer=lambda lr: contrib_tpu.CrossShardOptimizer( # pylint:disable=g-long-lambda
tf.train.AdamOptimizer(lr)))
return contrib_tpu.TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
metric_ops = {k: tf.metrics.mean(v) for k, v in metrics_values.items()}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=metric_ops)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'frame_probs':
frame_probs,
'onset_probs':
onset_probs,
'frame_predictions':
frame_predictions,
'onset_predictions':
onset_predictions,
'offset_predictions':
offset_predictions,
'velocity_values':
velocity_values,
'sequence_predictions':
_predict_sequences(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
hparams=hparams),
# Include some features and labels in output because Estimator 'predict'
# API does not give access to them.
'sequence_ids':
features.sequence_id,
'sequence_labels':
labels.note_sequence,
'frame_labels':
labels.labels,
'onset_labels':
labels.onsets,
}
for k, v in metrics_values.items():
predictions[k] = tf.stack(v)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
else:
raise ValueError('Unsupported mode: %s' % mode)
|
py | 1a452e0ca68d62d4517ca442947160866f4c4b1d | """
Metadata for morphology experiments.
"""
# Copyright 2018-2020 CNRS
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import inspect
from datetime import datetime
from .base import KGObject, KGQuery, cache, Field, Distribution
from .commons import QuantitativeValue, MorphologyType, BrainRegion, SomaType, ObjectiveType
from .core import Subject, Person, Protocol
from .minds import Dataset
from .utility import compact_uri, standard_context, as_list
from .experiment import Slice
from .electrophysiology import PatchedCell, PatchedSlice
from .optophysiology import Position
DEFAULT_NAMESPACE = "neuralactivity"
class LabeledCell(KGObject):
"""A labeled cell used in a morphology study."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/labeledcell/v0.1.1"
type = ["nsg:LabeledCell", "prov:Entity"]
query_id = "fgModified"
query_id_resolved = "fgResolvedModified"
collection_class = "LabeledCellCollection"
experiment_class = "PatchClampExperiment"
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"brainRegion": "nsg:brainRegion",
"mType": "nsg:mType",
"position": "nsg:position",
"spatialCellName": "nsg:spatialCellName",
"reconstructionRequested": "nsg:reconstructionRequested",
"reconstructable": "nsg:reconstructable"
}
fields = (
Field("name", str, "name", required=True),
Field("project_name", str, "projectName"),
Field("brain_location", BrainRegion, "brainRegion", multiple=True),
Field("morphology_type", MorphologyType, "mType"), # specifies the coordinates of the location of the cell in the slice
Field("location_in_slice", Position, "position"), #change to 3Dvector
Field("spatial_cell_name", str, "spatialCellName"), # spatial cell name given during the annotation process
Field("reconstruction_requested", bool, "reconstructionRequested"), # indicates if reconstruction the cell has been requested or not
Field("reconstructable", bool, "reconstructable"), #indicates if the cell can be reconstructed or not
Field("patched_cell", PatchedCell, "wasRevisionOf"),
Field("collection", "morphology.LabeledCellCollection", "^prov:hadMember",
reverse="labeled_cell") #chance reverse when labeledcellcollationmade
)
def __init__(self, name, project_name, brain_location, morphology_type=None,
location_in_slice=None, spatial_cell_name=None, reconstruction_requested=None,
reconstructable=None, patched_cell=None, collection=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class LabeledCellCollection(KGObject):
"""A collection of labeled cells."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/labeledcellcollection/v0.1.1"
type = ["nsg:Collection"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"size": "schema:size",
"hadMember": "prov:hadMember"
}
fields = (
Field("name", str, "name", required=True),
Field("cells", LabeledCell, "hadMember", required=True, multiple=True),
Field("slice", "morphology.AnnotatedSlice", "^nsg:hasPart", reverse="recorded_cells") # chcek reverse
)
def __init__(self, name,cells, slice=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
@property
def size(self):
return len(self.cells)
class FixedStainedSlice(KGObject):
"""An fixed, stained slice from a morphology experiment."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/fixedstainedslice/v0.1.1/"
type = ["nsg:FixedStainedSlice", "prov:Entity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"dcterms": "http://purl.org/dc/terms/",
"name": "schema:name",
"wasRevisionOf": "prov:wasRevisionOf"
}
fields = (
Field("name", str, "name", required=True),
Field("patched_slice", PatchedSlice, "wasRevisionOf")
)
def __init__(self, name, patched_slice=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class AnnotatedSlice(KGObject):
"""An annotated slice from a morphology experiment."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/annotatedslice/v0.1.1/"
type = ["nsg:AnnotatedSlice", "prov:Entity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"dcterms": "http://purl.org/dc/terms/",
"name": "schema:name",
"annotationAngle": "nsg:annotationAngle",
"annotatorComment": "nsg:annotatorComment",
"hasPart": "schema:hasPart",
"wasRevisionOf": "prov:wasRevisionOf"
}
fields = (
Field("name", str, "name", required=True),
Field("annotation_angle", QuantitativeValue, "annotationAngle"),
Field("annotator_comment", str, "annotatorComment"),
Field("cell_collection", LabeledCellCollection, "hasPart"),
Field("fixed_stained_slice", FixedStainedSlice, "wasRevisionOf")
)
def __init__(self, name, annotation_angle=None, annotator_comment=None,
cell_collection=None, fixed_stained_slice=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class ReconstructedCell(KGObject):
"""A reconstructed cell."""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/reconstructedcell/v0.1.4"
type = ["nsg:ReconstructedCell", "prov:Entity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"brainLocation": "nsg:brainLocation",
"mType": "nsg:mType",
"somaType": "nsg:somaType"
}
fields = (
Field("name", str, "name", required=True),
Field("soma_brain_location", BrainRegion, "brainLocation", multiple=True),
Field("axon_projection", BrainRegion, "brainRegion", multiple=True),
Field("morphology_type", MorphologyType, "mType"),
Field("soma_type", SomaType, "somaType")
)
def __init__(self, name, soma_brain_location=None, axon_projection=None, morphology_type=None,
soma_type=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class FixationStainingMounting(KGObject):
"""Fixing, Staining and Mounting activities description"""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/fixationstainingmounting/v0.1.1"
type = ["nsg:FixationStainingMounting", "prov:Activity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"fixationMethod": "nsg:fixationMethod",
"stain": "nsg:stain",
"mountingMedia": "nsg:mountingMedia",
"used": "prov:used",
"generated": "prov:generated"
}
fields = (
Field("name", str, "name", required=True),
Field("fixation_method", str, "fixationMethod"),
Field("stain", str, "stain"),
Field("mounting_media", str, "mountingMedia"),
Field("slice_used", Slice, "used"),
Field("slice_generated", FixedStainedSlice, "generated")
)
def __init__(self, name, fixation_method=None, stain=None, mounting_media=None,
slice_used=None, slice_generated=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class AcquisitionAnnotation(KGObject):
"""Acquisition and annotation activity"""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/acquisitionannotation/v0.1.1"
type = ["nsg:AcquisitionAnnotation", "prov:Activity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"objectiveMagnification": "nsg:objectiveMagnification",
"used": "prov:used",
"generated": "prov:generated",
}
fields = (
Field("name", str, "name", required=True),
Field("objective_magnification", str, "objectiveMagnification"),
Field("fixed_stained_slice", FixedStainedSlice, "used"),
Field("annotated_slice", AnnotatedSlice, "generated")
)
def __init__(self, name, objective_magnification=None, fixed_stained_slice=None,
annotated_slice=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
class Reconstruction(KGObject):
"""Reconstruction activity"""
namespace = DEFAULT_NAMESPACE
_path = "/morphology/reconstruction/v0.1.2"
type = ["nsg:Reconstruction", "prov:Activity"]
context = {
"schema": "http://schema.org/",
"prov": "http://www.w3.org/ns/prov#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"nsg": "https://bbp-nexus.epfl.ch/vocabs/bbp/neurosciencegraph/core/v0.1.0/",
"name": "schema:name",
"objectiveType": "nsg:objectiveType",
"objectiveMagnification": "nsg:objectiveMagnification",
"compressionCorrection": "nsg:compressionCorrection",
"used": "prov:used",
"generated": "prov:generated",
}
fields = (
Field("name", str, "name", required=True),
Field("objective_type", ObjectiveType, "objectiveType"),
Field("objective_magnification", str, "objectiveMagnification"),
Field("compression_correction", str, "compressionCorrection"),
Field("labeled_cell", LabeledCell, "used"),
Field("reconstructed_cell", ReconstructedCell, "generated")
)
def __init__(self, name, objective_type=None, compression_correction=None, labeled_cell=None,
reconstructed_cell=None, id=None, instance=None):
args = locals()
args.pop("self")
KGObject.__init__(self, **args)
|
py | 1a452e0fea9a041166b3c29a5d8965fc5fdfe3f0 | import pytest
from facegram.posts.tests.factories import (
PostFactory, PostCommentFactory, CommentVoteFactory
)
from facegram.posts.models import Post
@pytest.fixture
def post() -> Post:
return PostFactory()
@pytest.fixture
def public_post() -> Post:
return PostFactory(privacy="EO")
@pytest.fixture
def comment():
return PostCommentFactory()
@pytest.fixture
def comment_vote():
return CommentVoteFactory() |
py | 1a452e803a23f1603cd5c516904f7084e2bd043b | # Copyright 2019 SCHUFA Holding AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class TreeNode:
"""
A helper class to store the tree structure of a model tree.
Do not instantiate this class directly, but used the model tree classes
Parameters
----------
depth : int, (default=0)
Zero-based depth of the node in the tree
estimator : object
Base estimator of the node.
This estimator is used in leaf nodes for predictions, but can also be stored in other nodes.
children : list or None
List of child nodes. Should have 2 or 0 elements or be None.
split : Split
Defines, how samples are split (and mapped) to the child nodes.
Attributes
----------
depth : int, (default=0)
Zero-based depth of the node in the tree
estimator : object
Base estimator of the node.
This estimator is used in leaf nodes for predictions, but can also be stored in other nodes.
children : list or None
List of child nodes. Should have 2 or 0 elements or be None.
split : Split
Defines, how samples are split (and mapped) to the child nodes.
See Also
--------
modeltrees.tree.BaseModelTree : Base Model Tree implementation
Split : Class that defines how split / mapping to the child nodes
Notes
-----
This is not a sklearn estimator class, but a helper class
"""
def __init__(self, depth=0, estimator=None, children=None, split=None):
self.depth = depth
self.estimator = estimator
self.children = children
self.split = split
def is_leaf(self):
"""
Checks, if the node is a leaf node, i.e. no split is set.
Returns
-------
True, if the node is a leaf node.
"""
return self.split is None
def map_to_leaf(self, X):
"""
Maps input samples to leaf nodes by using split rules and the subtree structure
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input Features of the samples
Returns
-------
leaf_idx: array-like, shape = [n_samples]
For each sample an index of the corresponding leaf node.
leafs: list
A list of leaf nodes. Positions correspond to the indices in `leaf_idx`
"""
if self.is_leaf():
return np.zeros(np.shape(X)[0], dtype=int), [self]
else:
child_idx = self.split.map_to_children(X)
leaf_idx = -np.ones(child_idx.shape, dtype=int)
leafs = []
# Iterate over children
for c in range(len(self.children)):
# Get sample subset for child c
idx = child_idx == c
if np.any(idx):
# Recursively map to leafs
leaf_idx_, leafs_ = self.children[c].map_to_leaf(X[idx])
# Include results into output leaf_idx
# Note that we need to shift the index to avoid return the same leaf index for different leafs.
shift = len(leafs)
leaf_idx[idx] = leaf_idx_ + shift
# Append the new found leafs
leafs = leafs + leafs_
# Return results
return leaf_idx, leafs
class Split:
"""
Defines a splitting of a decision / model tree node, i.e. the mapping of samples to the child node.
This class supports splits based on one feature and threshold.
All samples with a feature value (in the given feature) less or equal to the threshold are mapped to child 0.
All others are mapped to child 1.
Parameters
----------
split_feature : int
Index of the feature that is used for the split
split_threshold : int
Threshold for the split.
Attributes
----------
split_feature : int
Index of the feature that is used for the split
split_threshold : int
Threshold for the split.
"""
def __init__(self, split_feature, split_threshold):
self.split_feature = split_feature
self.split_threshold = split_threshold
def _apply_split(self, X, y = None):
"""
Splits a set samples according to the defines split rule in split.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input Features of the samples
y : array-like, shape = [n_samples] or [n_samples, n_outputs], optional
Target variable.
Returns
-------
subsets: list
A list of Subsets. If `y` is `None`, each element `i` is an array with [n_samples[i], n_features].
Otherwise each element is a pair of input features and target variable.
"""
# Check for left subtree
split_filter = X[:, self.split_feature] <= self.split_threshold
# Output depending in input
if y is None:
return [X[split_filter], X[~split_filter]]
else:
return [
(X[split_filter], y[split_filter]), # Samples for the left subtree
(X[~split_filter], y[~split_filter]) # Samples for the right subtree
]
def map_to_children(self, X):
"""
Maps samples to child nodes. This is done based on the split feature and threshold
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input Features of the samples
Returns
-------
child_idx: array-like, shape = [n_samples]
For each sample an index (0 for left child, 1 for right child).
"""
child_idx = 1 - (X[:, self.split_feature] <= self.split_threshold)
return child_idx
|
py | 1a452ef05c53e4affb527abfdc4204df674c48e3 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import base64
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AddressPending
from ccxt.base.errors import NotSupported
class buda(Exchange):
def describe(self):
return self.deep_extend(super(buda, self).describe(), {
'id': 'buda',
'name': 'Buda',
'countries': ['AR', 'CL', 'CO', 'PE'],
'rateLimit': 1000,
'version': 'v2',
'has': {
'CORS': False,
'createDepositAddress': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFees': True,
'fetchMyTrades': False,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/47380619-8a029200-d706-11e8-91e0-8a391fe48de3.jpg',
'api': 'https://www.buda.com/api',
'www': 'https://www.buda.com',
'doc': 'https://api.buda.com',
'fees': 'https://www.buda.com/comisiones',
},
'status': {
'status': 'error',
'updated': None,
'eta': None,
'url': None,
},
'api': {
'public': {
'get': [
'pairs',
'markets',
'currencies',
'markets/{market}',
'markets/{market}/ticker',
'markets/{market}/volume',
'markets/{market}/order_book',
'markets/{market}/trades',
'currencies/{currency}/fees/deposit',
'currencies/{currency}/fees/withdrawal',
'tv/history',
],
'post': [
'markets/{market}/quotations',
],
},
'private': {
'get': [
'balances',
'balances/{currency}',
'currencies/{currency}/balances',
'orders',
'orders/{id}',
'markets/{market}/orders',
'deposits',
'currencies/{currency}/deposits',
'withdrawals',
'currencies/{currency}/withdrawals',
'currencies/{currency}/receive_addresses',
'currencies/{currency}/receive_addresses/{id}',
],
'post': [
'markets/{market}/orders',
'currencies/{currency}/deposits',
'currencies/{currency}/withdrawals',
'currencies/{currency}/simulated_withdrawals',
'currencies/{currency}/receive_addresses',
],
'put': [
'orders/{id}',
],
},
},
'timeframes': {
'1m': '1',
'5m': '5',
'30m': '30',
'1h': '60',
'2h': '120',
'1d': 'D',
'1w': 'W',
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.008, # 0.8%
'maker': 0.004, # 0.4%
'tiers': {
'taker': [
[0, 0.008], # 0.8%
[2000, 0.007], # 0.7%
[20000, 0.006], # 0.6%
[100000, 0.005], # 0.5%
[500000, 0.004], # 0.4%
[2500000, 0.003], # 0.3%
[12500000, 0.002], # 0.2%
],
'maker': [
[0, 0.004], # 0.4%
[2000, 0.0035], # 0.35%
[20000, 0.003], # 0.3%
[100000, 0.0025], # 0.25%
[500000, 0.002], # 0.2%
[2500000, 0.0015], # 0.15%
[12500000, 0.001], # 0.1%
],
},
},
},
'exceptions': {
'not_authorized': AuthenticationError, # {message: 'Invalid credentials', code: 'not_authorized'}
'forbidden': PermissionDenied, # {message: 'You dont have access to self resource', code: 'forbidden'}
'invalid_record': ExchangeError, # {message: 'Validation Failed', code: 'invalid_record', errors: []}
'not_found': ExchangeError, # {message: 'Not found', code: 'not_found'}
'parameter_missing': ExchangeError, # {message: 'Parameter missing', code: 'parameter_missing'}
'bad_parameter': ExchangeError, # {message: 'Bad Parameter format', code: 'bad_parameter'}
},
})
def fetch_currency_info(self, currency, currencies=None):
if not currencies:
response = self.publicGetCurrencies()
currencies = self.safe_value(response, 'currencies')
for i in range(0, len(currencies)):
currencyInfo = currencies[i]
if currencyInfo['id'] == currency:
return currencyInfo
return None
def fetch_markets(self, params={}):
marketsResponse = self.publicGetMarkets(params)
markets = self.safe_value(marketsResponse, 'markets')
currenciesResponse = self.publicGetCurrencies()
currencies = self.safe_value(currenciesResponse, 'currencies')
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
baseInfo = self.fetch_currency_info(baseId, currencies)
quoteInfo = self.fetch_currency_info(quoteId, currencies)
symbol = base + '/' + quote
precision = {
'amount': baseInfo['input_decimals'],
'price': quoteInfo['input_decimals'],
}
limits = {
'amount': {
'min': float(market['minimum_order_amount'][0]),
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def fetch_currencies(self, params={}):
response = self.publicGetCurrencies()
currencies = response['currencies']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
if not currency['managed']:
continue
id = self.safe_string(currency, 'id')
code = self.safe_currency_code(id)
precision = self.safe_float(currency, 'input_decimals')
minimum = math.pow(10, -precision)
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': None,
'active': True,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': minimum,
'max': None,
},
'price': {
'min': minimum,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'deposit': {
'min': float(currency['deposit_minimum'][0]),
'max': None,
},
'withdraw': {
'min': float(currency['withdrawal_minimum'][0]),
},
},
}
return result
def fetch_funding_fees(self, codes=None, params={}):
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define codes = ['ETH', 'BTC'] in args it will only load those
self.load_markets()
withdrawFees = {}
depositFees = {}
info = {}
if codes is None:
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currency(code)
request = {'currency': currency['id']}
withdrawResponse = self.publicGetCurrenciesCurrencyFeesWithdrawal(request)
depositResponse = self.publicGetCurrenciesCurrencyFeesDeposit(request)
withdrawFees[code] = self.parse_funding_fee(withdrawResponse['fee'])
depositFees[code] = self.parse_funding_fee(depositResponse['fee'])
info[code] = {
'withdraw': withdrawResponse,
'deposit': depositResponse,
}
return {
'withdraw': withdrawFees,
'deposit': depositFees,
'info': info,
}
def parse_funding_fee(self, fee, type=None):
if type is None:
type = fee['name']
if type == 'withdrawal':
type = 'withdraw'
return {
'type': type,
'currency': fee['base'][1],
'rate': fee['percent'],
'cost': float(fee['base'][0]),
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetMarketsMarketTicker(self.extend(request, params))
ticker = self.safe_value(response, 'ticker')
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
last = float(ticker['last_price'][0])
percentage = float(ticker['price_variation_24h'])
open = float(self.price_to_precision(symbol, last / (percentage + 1)))
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['max_bid'][0]),
'bidVolume': None,
'ask': float(ticker['min_ask'][0]),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': open,
'change': change,
'percentage': percentage * 100,
'average': average,
'baseVolume': float(ticker['volume'][0]),
'quoteVolume': None,
'info': ticker,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
# the since argument works backwards – returns trades up to the specified timestamp
# therefore not implemented here
# the method is still available for users to be able to traverse backwards in time
# by using the timestamp from the first received trade upon each iteration
if limit is not None:
request['limit'] = limit # 50 max
response = self.publicGetMarketsMarketTrades(self.extend(request, params))
#
# {trades: { market_id: "ETH-BTC",
# timestamp: null,
# last_timestamp: "1536901277302",
# entries: [["1540077456791", "0.0063767", "0.03", "sell", 479842],
# ["1539916642772", "0.01888263", "0.03019563", "sell", 479438],
# ["1539834081787", "0.023718648", "0.031001", "sell", 479069],
# ...]
#
return self.parse_trades(response['trades']['entries'], market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
# ["1540077456791", "0.0063767", "0.03", "sell", 479842]
#
timestamp = None
side = None
type = None
price = None
amount = None
id = None
order = None
fee = None
symbol = None
cost = None
if market:
symbol = market['symbol']
if isinstance(trade, list):
timestamp = int(trade[0])
price = float(trade[1])
amount = float(trade[2])
cost = price * amount
side = trade[3]
id = str(trade[4])
return {
'id': id,
'order': order,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetMarketsMarketOrderBook(self.extend(request, params))
orderbook = self.safe_value(response, 'order_book')
return self.parse_order_book(orderbook)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if since is None:
since = self.milliseconds() - 86400000
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
'from': since / 1000,
'to': self.seconds(),
}
response = self.publicGetTvHistory(self.extend(request, params))
return self.parse_trading_view_ohlcv(response, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalances(params)
result = {'info': response}
balances = self.safe_value(response, 'balances')
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'id')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = float(balance['available_amount'][0])
account['total'] = float(balance['amount'][0])
result[code] = account
return self.parse_balance(result)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': int(id),
}
response = self.privateGetOrdersId(self.extend(request, params))
order = self.safe_value(response, 'order')
return self.parse_order(order)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'market': market['id'],
'per': limit,
}
response = self.privateGetMarketsMarketOrders(self.extend(request, params))
orders = self.safe_value(response, 'orders')
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'state': 'pending',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'state': 'traded',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
side = 'Bid' if (side == 'buy') else 'Ask'
request = {
'market': self.market_id(symbol),
'price_type': type,
'type': side,
'amount': self.amount_to_precision(symbol, amount),
}
if type == 'limit':
request['limit'] = self.price_to_precision(symbol, price)
response = self.privatePostMarketsMarketOrders(self.extend(request, params))
order = self.safe_value(response, 'order')
return self.parse_order(order)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': int(id),
'state': 'canceling',
}
response = self.privatePutOrdersId(self.extend(request, params))
order = self.safe_value(response, 'order')
return self.parse_order(order)
def parse_order_status(self, status):
statuses = {
'traded': 'closed',
'received': 'open',
'canceling': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'id')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
symbol = None
if market is None:
marketId = order['market_id']
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
type = self.safe_string(order, 'price_type')
side = self.safe_string_lower(order, 'type')
status = self.parse_order_status(self.safe_string(order, 'state'))
amount = float(order['original_amount'][0])
remaining = float(order['amount'][0])
filled = float(order['traded_amount'][0])
cost = float(order['total_exchanged'][0])
price = self.safe_float(order, 'limit')
if price is not None:
price = float(price[0])
if cost > 0 and filled > 0:
price = self.price_to_precision(symbol, cost / filled)
fee = {
'cost': float(order['paid_fee'][0]),
'currency': order['paid_fee'][1],
}
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': fee,
'info': order,
}
def is_fiat(self, code):
fiats = {
'ARS': True,
'CLP': True,
'COP': True,
'PEN': True,
}
return self.safe_value(fiats, code, False)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
if self.is_fiat(code):
raise NotSupported(self.id + ' fetchDepositAddress() for fiat ' + code + ' is not supported')
request = {
'currency': currency['id'],
}
response = self.privateGetCurrenciesCurrencyReceiveAddresses(self.extend(request, params))
receiveAddresses = self.safe_value(response, 'receive_addresses')
addressPool = []
for i in range(1, len(receiveAddresses)):
receiveAddress = receiveAddresses[i]
if receiveAddress['ready']:
address = receiveAddress['address']
self.check_address(address)
addressPool.append(address)
addressPoolLength = len(addressPool)
if addressPoolLength < 1:
raise AddressPending(self.id + ': there are no addresses ready for receiving ' + code + ', retry again later)')
address = addressPool[0]
return {
'currency': code,
'address': address,
'tag': None,
'info': receiveAddresses,
}
def create_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
if self.is_fiat(code):
raise NotSupported(self.id + ': fiat fetchDepositAddress() for ' + code + ' is not supported')
request = {
'currency': currency['id'],
}
response = self.privatePostCurrenciesCurrencyReceiveAddresses(self.extend(request, params))
address = self.safe_string(response['receive_address'], 'address') # the creation is async and returns a null address, returns only the id
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
def parse_transaction_status(self, status):
statuses = {
'rejected': 'failed',
'confirmed': 'ok',
'anulled': 'canceled',
'retained': 'canceled',
'pending_confirmation': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'id')
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = float(transaction['amount'][0])
fee = float(transaction['fee'][0])
feeCurrency = transaction['fee'][1]
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
type = 'deposit' if ('deposit_data' in transaction) else 'withdrawal'
data = self.safe_value(transaction, type + '_data', {})
address = self.safe_value(data, 'target_address')
txid = self.safe_string(data, 'tx_hash')
updated = self.parse8601(self.safe_string(data, 'updated_at'))
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': {
'cost': fee,
'rate': feeCurrency,
},
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
if code is None:
raise ExchangeError(self.id + ': fetchDeposits() requires a currency code argument')
currency = self.currency(code)
request = {
'currency': currency['id'],
'per': limit,
}
response = self.privateGetCurrenciesCurrencyDeposits(self.extend(request, params))
deposits = self.safe_value(response, 'deposits')
return self.parse_transactions(deposits, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
if code is None:
raise ExchangeError(self.id + ': fetchDeposits() requires a currency code argument')
currency = self.currency(code)
request = {
'currency': currency['id'],
'per': limit,
}
response = self.privateGetCurrenciesCurrencyWithdrawals(self.extend(request, params))
withdrawals = self.safe_value(response, 'withdrawals')
return self.parse_transactions(withdrawals, currency, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
'withdrawal_data': {
'target_address': address,
},
}
response = self.privatePostCurrenciesCurrencyWithdrawals(self.extend(request, params))
withdrawal = self.safe_value(response, 'withdrawal')
return self.parse_transaction(withdrawal)
def nonce(self):
return self.microseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if query:
if method == 'GET':
request += '?' + self.urlencode(query)
else:
body = self.json(query)
url = self.urls['api'] + '/' + self.version + '/' + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
components = [method, '/api/' + self.version + '/' + request]
if body:
base64Body = base64.b64encode(self.encode(body))
components.append(self.decode(base64Body))
components.append(nonce)
message = ' '.join(components)
signature = self.hmac(self.encode(message), self.encode(self.secret), hashlib.sha384)
headers = {
'X-SBTC-APIKEY': self.apiKey,
'X-SBTC-SIGNATURE': signature,
'X-SBTC-NONCE': nonce,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if code >= 400:
errorCode = self.safe_string(response, 'code')
message = self.safe_string(response, 'message', body)
feedback = self.id + ' ' + message
exceptions = self.exceptions
if errorCode is not None:
if errorCode in exceptions:
raise exceptions[errorCode](feedback)
else:
raise ExchangeError(feedback)
|
py | 1a452fd85cebdea5b13e62f14b9aa7a9d498b3c4 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Indic Language Character Map
# Copyright 2008 Santhosh Thottingal <[email protected]>
# http://www.smc.org.in
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# If you find any bugs or have any suggestions
# email: [email protected]
# URL: http://www.smc.org.in
__all__ = ['charmap', 'get_language', 'char_compare']
import sys
if sys.version_info.major == 3:
from functools import lru_cache
else:
from repoze.lru import lru_cache
charmap = {
"hi_IN": [u"ँ", u"ं", u"ः", u"ऄ", u"अ", u"आ", u"इ", u"ई", u"उ", u"ऊ", u"ऋ",
u"ऌ", u"ऍ", u"ऎ", u"ए", u"ऐ", u"ऑ", u"ऒ", u"ओ", u"औ", u"क", u"ख",
u"ग", u"घ", u"ङ", u"च", u"छ", u"ज", u"झ", u"ञ", u"ट", u"ठ", u"ड",
u"ढ", u"ण", u"त", u"थ", u"द", u"ध", u"न", u"ऩ", u"प", u"फ", u"ब",
u"भ", u"म", u"य", u"र", u"ऱ", u"ल", u"ळ", u"ऴ", u"व", u"श", u"ष",
u"स", u"ह", u"ऺ", u"ऻ", u"़", u"ऽ", u"ा", u"ि", u"ी", u"ु", u"ू",
u"ृ", u"ॄ", u"ॅ", u"ॆ", u"े", u"ै", u"ॉ", u"ॊ", u"ो", u"ौ", u"्",
u"ॎ", u"ॏ", u"ॐ", u"॑", u"॒", u"॓", u"॔", u"ॕ", u"ॖ", u"ॗ", u"क़",
u"ख़", u"ग़", u"ज़", u"ड़", u"ढ़", u"फ़", u"य़", u"ॠ", u"ॡ", u"ॢ", u"ॣ",
u"।", u"॥", u"०", u"१", u"२", u"३", u"४", u"५", u"६", u"७", u"८",
u"९", u"॰", u"ॱ", u"ॲ", u"ॳ", u"ॴ", u"ॵ", u"ॶ", u"ॷ", u"ॸ", u"ॹ",
u"ॺ", u"ॻ", u"ॼ", u"ॽ", u"ॾ", u"ॿ"],
"bn_IN": [u"ঁ", u"ং", u"ঃ", u"", u"অ", u"আ", u"ই", u"ঈ", u"উ", u"ঊ", u"ঋ",
u"ঌ", u"", u"", u"এ", u"ঐ", u"", u"", u"ও", u"ঔ", u"ক", u"খ",
u"গ", u"ঘ", u"ঙ", u"চ", u"ছ", u"জ", u"ঝ", u"ঞ", u"ট", u"ঠ", u"ড",
u"ঢ", u"ণ", u"ত", u"থ", u"দ", u"ধ", u"ন", u"", u"প", u"ফ", u"ব",
u"ভ", u"ম", u"য", u"র", u"", u"ল", u"", u"", u"", u"শ", u"ষ",
u"স", u"হ", u"", u"", u"়", u"ঽ", u"া", u"ি", u"ী", u"ু", u"ূ",
u"ৃ", u"ৄ", u"", u"", u"ে", u"ৈ", u"", u"", u"ো", u"ৌ",
u"্", u"ৎ", u"", u"", u"", u"", u"", u"", u"", u"", u"ৗ",
u"", u"", u"", u"", u"ড়", u"ঢ়", u"", u"য়", u"ৠ", u"ৡ", u"ৢ",
u"ৣ", u"", u"", u"০", u"১", u"২", u"৩", u"৪", u"৫", u"৬", u"৭",
u"৮", u"৯", u"ৰ", u"ৱ", u"৲", u"৳", u"৴", u"৵", u"৶", u"৷", u"৸",
u"৹", u"৺", u"৻", u"ৼ", u"৽", u"৾", u""],
"pa_IN": [u"ਁ", u"ਂ", u"ਃ", u"", u"ਅ", u"ਆ", u"ਇ", u"ਈ", u"ਉ", u"ਊ", u"",
u"", u"", u"", u"ਏ", u"ਐ", u"", u"", u"ਓ", u"ਔ", u"ਕ", u"ਖ",
u"ਗ", u"ਘ", u"ਙ", u"ਚ", u"ਛ", u"ਜ", u"ਝ", u"ਞ", u"ਟ", u"ਠ", u"ਡ",
u"ਢ", u"ਣ", u"ਤ", u"ਥ", u"ਦ", u"ਧ", u"ਨ", u"", u"ਪ", u"ਫ", u"ਬ",
u"ਭ", u"ਮ", u"ਯ", u"ਰ", u"", u"ਲ", u"ਲ਼", u"", u"ਵ", u"ਸ਼", u"",
u"ਸ", u"ਹ", u"", u"", u"਼", u"", u"ਾ", u"ਿ", u"ੀ", u"ੁ", u"ੂ",
u"", u"", u"", u"", u"ੇ", u"ੈ", u"", u"", u"ੋ", u"ੌ", u"੍",
u"", u"", u"", u"ੑ", u"", u"", u"", u"", u"", u"", u"",
u"ਖ਼", u"ਗ਼", u"ਜ਼", u"ੜ", u"", u"ਫ਼", u"", u"", u"", u"", u"",
u"", u"", u"੦", u"੧", u"੨", u"੩", u"੪", u"੫", u"੬", u"੭", u"੮",
u"੯", u"ੰ", u"ੱ", u"ੲ", u"ੳ", u"ੴ", u"ੵ", u"੶", u"", u"", u"",
u"", u"", u"", u"", u"", u""],
"gu_IN": [u"ઁ", u"ં", u"ઃ", u"", u"અ", u"આ", u"ઇ", u"ઈ", u"ઉ", u"ઊ", u"ઋ",
u"ઌ", u"ઍ", u"", u"એ", u"ઐ", u"ઑ", u"", u"ઓ", u"ઔ", u"ક", u"ખ",
u"ગ", u"ઘ", u"ઙ", u"ચ", u"છ", u"જ", u"ઝ", u"ઞ", u"ટ", u"ઠ", u"ડ",
u"ઢ", u"ણ", u"ત", u"થ", u"દ", u"ધ", u"ન", u"", u"પ", u"ફ", u"બ",
u"ભ", u"મ", u"ય", u"ર", u"", u"લ", u"ળ", u"", u"વ", u"શ", u"ષ",
u"સ", u"હ", u"", u"", u"઼", u"ઽ", u"ા", u"િ", u"ી", u"ુ", u"ૂ",
u"ૃ", u"ૄ", u"ૅ", u"", u"ે", u"ૈ", u"ૉ", u"", u"ો", u"ૌ", u"્",
u"", u"", u"ૐ", u"", u"", u"", u"", u"", u"", u"", u"",
u"", u"", u"", u"", u"", u"", u"", u"ૠ", u"ૡ", u"ૢ", u"ૣ",
u"", u"", u"૦", u"૧", u"૨", u"૩", u"૪", u"૫", u"૬", u"૭", u"૮",
u"૯", u"૰", u"૱", u"", u"", u"", u"", u"", u"", u"", u"ૹ",
u"ૺ", u"ૻ", u"ૼ", u"૽", u"૾", u"૿"],
"or_IN": [u"ଁ", u"ଂ", u"ଃ", u"", u"ଅ", u"ଆ", u"ଇ", u"ଈ", u"ଉ", u"ଊ", u"ଋ",
u"ଌ", u"", u"", u"ଏ", u"ଐ", u"", u"", u"ଓ", u"ଔ", u"କ", u"ଖ",
u"ଗ", u"ଘ", u"ଙ", u"ଚ", u"ଛ", u"ଜ", u"ଝ", u"ଞ", u"ଟ", u"ଠ", u"ଡ",
u"ଢ", u"ଣ", u"ତ", u"ଥ", u"ଦ", u"ଧ", u"ନ", u"", u"ପ", u"ଫ", u"ବ",
u"ଭ", u"ମ", u"ଯ", u"ର", u"", u"ଲ", u"ଳ", u"", u"ଵ", u"ଶ", u"ଷ",
u"ସ", u"ହ", u"", u"", u"଼", u"ଽ", u"ା", u"ି", u"ୀ", u"ୁ", u"ୂ",
u"ୃ", u"ୄ", u"", u"", u"େ", u"ୈ", u"", u"", u"ୋ", u"ୌ", u"୍",
u"", u"", u"", u"", u"", u"", u"", u"୕", u"ୖ", u"ୗ", u"",
u"", u"", u"", u"ଡ଼", u"ଢ଼", u"", u"ୟ", u"ୠ", u"ୡ", u"ୢ", u"ୣ",
u"", u"", u"୦", u"୧", u"୨", u"୩", u"୪", u"୫", u"୬", u"୭", u"୮",
u"୯", u"୰", u"ୱ", u"୲", u"୳", u"୴", u"୵", u"୶", u"୷", u"", u"",
u"", u"", u"", u"", u"", u""],
"ta_IN": [u"", u"ஂ", u"ஃ", u"", u"அ", u"ஆ", u"இ", u"ஈ", u"உ", u"ஊ", u"",
u"", u"", u"எ", u"ஏ", u"ஐ", u"", u"ஒ", u"ஓ", u"ஔ", u"க", u"",
u"", u"", u"ங", u"ச", u"", u"ஜ", u"", u"ஞ", u"ட", u"", u"",
u"", u"ண", u"த", u"", u"", u"", u"ந", u"ன", u"ப", u"", u"",
u"", u"ம", u"ய", u"ர", u"ற", u"ல", u"ள", u"ழ", u"வ", u"ஶ", u"ஷ",
u"ஸ", u"ஹ", u"", u"", u"", u"", u"ா", u"ி", u"ீ", u"ு", u"ூ",
u"", u"", u"", u"ெ", u"ே", u"ை", u"", u"ொ", u"ோ", u"ௌ", u"்",
u"", u"", u"ௐ", u"", u"", u"", u"", u"", u"", u"ௗ", u"",
u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"",
u"", u"", u"௦", u"௧", u"௨", u"௩", u"௪", u"௫", u"௬", u"௭", u"௮",
u"௯", u"௰", u"௱", u"௲", u"௳", u"௴", u"௵", u"௶", u"௷", u"௸", u"௹",
u"௺", u"", u"", u"", u"", u""],
"te_IN": [u"ఁ", u"ం", u"ః", u"ఄ", u"అ", u"ఆ", u"ఇ", u"ఈ", u"ఉ", u"ఊ", u"ఋ",
u"ఌ", u"", u"ఎ", u"ఏ", u"ఐ", u"", u"ఒ", u"ఓ", u"ఔ", u"క", u"ఖ",
u"గ", u"ఘ", u"ఙ", u"చ", u"ఛ", u"జ", u"ఝ", u"ఞ", u"ట", u"ఠ", u"డ",
u"ఢ", u"ణ", u"త", u"థ", u"ద", u"ధ", u"న", u"", u"ప", u"ఫ", u"బ",
u"భ", u"మ", u"య", u"ర", u"ఱ", u"ల", u"ళ", u"ఴ", u"వ", u"శ", u"ష",
u"స", u"హ", u"", u"", u"఼", u"ఽ", u"ా", u"ి", u"ీ", u"ు", u"ూ",
u"ృ", u"ౄ", u"", u"ె", u"ే", u"ై", u"", u"ొ", u"ో", u"ౌ", u"్",
u"", u"", u"", u"", u"", u"", u"", u"ౕ", u"ౖ", u"", u"ౘ",
u"ౙ", u"ౚ", u"", u"", u"ౝ", u"", u"", u"ౠ", u"ౡ", u"ౢ", u"ౣ",
u"", u"", u"౦", u"౧", u"౨", u"౩", u"౪", u"౫", u"౬", u"౭", u"౮",
u"౯", u"", u"", u"", u"", u"", u"", u"", u"౷", u"౸", u"౹",
u"౺", u"౻", u"౼", u"౽", u"౾", u"౿"],
"kn_IN": [u"ಁ", u"ಂ", u"ಃ", u"಄", u"ಅ", u"ಆ", u"ಇ", u"ಈ", u"ಉ", u"ಊ", u"ಋ",
u"ಌ", u"", u"ಎ", u"ಏ", u"ಐ", u"", u"ಒ", u"ಓ", u"ಔ", u"ಕ", u"ಖ",
u"ಗ", u"ಘ", u"ಙ", u"ಚ", u"ಛ", u"ಜ", u"ಝ", u"ಞ", u"ಟ", u"ಠ", u"ಡ",
u"ಢ", u"ಣ", u"ತ", u"ಥ", u"ದ", u"ಧ", u"ನ", u"", u"ಪ", u"ಫ", u"ಬ",
u"ಭ", u"ಮ", u"ಯ", u"ರ", u"ಱ", u"ಲ", u"ಳ", u"", u"ವ", u"ಶ", u"ಷ",
u"ಸ", u"ಹ", u"", u"", u"಼", u"ಽ", u"ಾ", u"ಿ", u"ೀ", u"ು", u"ೂ",
u"ೃ", u"ೄ", u"", u"ೆ", u"ೇ", u"ೈ", u"", u"ೊ", u"ೋ", u"ೌ", u"್",
u"", u"", u"", u"", u"", u"", u"", u"ೕ", u"ೖ", u"", u"",
u"", u"", u"", u"", u"ೝ", u"ೞ", u"", u"ೠ", u"ೡ", u"ೢ", u"ೣ",
u"", u"", u"೦", u"೧", u"೨", u"೩", u"೪", u"೫", u"೬", u"೭", u"೮",
u"೯", u"", u"ೱ", u"ೲ", u"ೳ", u"", u"", u"", u"", u"", u"",
u"", u"", u"", u"", u"", u""],
"ml_IN": [u"ഁ", u"ം", u"ഃ", u"ഄ", u"അ", u"ആ", u"ഇ", u"ഈ", u"ഉ", u"ഊ", u"ഋ",
u"ഌ", u"", u"എ", u"ഏ", u"ഐ", u"", u"ഒ", u"ഓ", u"ഔ", u"ക", u"ഖ",
u"ഗ", u"ഘ", u"ങ", u"ച", u"ഛ", u"ജ", u"ഝ", u"ഞ", u"ട", u"ഠ", u"ഡ",
u"ഢ", u"ണ", u"ത", u"ഥ", u"ദ", u"ധ", u"ന", u"ഩ", u"പ", u"ഫ", u"ബ",
u"ഭ", u"മ", u"യ", u"ര", u"റ", u"ല", u"ള", u"ഴ", u"വ", u"ശ", u"ഷ",
u"സ", u"ഹ", u"ഺ", u"഻", u"഼", u"ഽ", u"ാ", u"ി", u"ീ", u"ു", u"ൂ",
u"ൃ", u"ൄ", u"", u"െ", u"േ", u"ൈ", u"", u"ൊ", u"ോ", u"ൌ", u"്",
u"ൎ", u"൏", u"", u"", u"", u"", u"ൔ", u"ൕ", u"ൖ", u"ൗ", u"൘",
u"൙", u"൚", u"൛", u"൜", u"൝", u"൞", u"ൟ", u"ൠ", u"ൡ", u"ൢ", u"ൣ",
u"", u"", u"൦", u"൧", u"൨", u"൩", u"൪", u"൫", u"൬", u"൭", u"൮",
u"൯", u"൰", u"൱", u"൲", u"൳", u"൴", u"൵", u"൶", u"൷", u"൸", u"൹",
u"ൺ", u"ൻ", u"ർ", u"ൽ", u"ൾ", u"ൿ"],
"en_US": [u"a", u"b", u"c", u"d", u"e", u"f", u"g", u"h", u"i", u"j", u"k",
u"l", u"m", u"n", u"o", u"p", u"q", u"r", u"s", u"t", u"u", u"v",
u"w", u"x", u"y", u"z"],
}
charmap_transphon = {
"ISO15919": ["m̐", "ṁ", "ḥ", "", "a", "ā", "i", "ī", "u", "ū", "ṛ", "ḷ",
"ê", "e", "ē", "ai", "ô", "o", "ō", "au", "ka", "kha", "ga",
"gha", "ṅa", "ca", "cha", "ja", "jha", "ña", "ṭa", "ṭha",
"ḍa", "ḍha", "ṇa", "ta", "tha", "da", "dha", "na", "ṉa",
"pa", "pha", "ba", "bha", "ma", "ya", "ra", "ṟa", "la", "ḷa",
"ḻa", "va", "śa", "ṣa", "sa", "ha", "", "", "", "'", "ā", "i",
"ī", "u", "ū", "ṛ", "ṝ", "ê", "e", "ē", "ai", "ô", "o", "ō",
"au", "", "", "", "oṃ", "", "", "", "", "", "", "", "qa",
"ḵẖa", "ġ", "za", "ṛa", "ṛha", "fa", "ẏa", "ṝ", "ḹ", "ḷ",
"ḹ", ".", "..", "0", "1", "2", "3", "4", "5", "6", "7", "8",
"9", "…", "", "", "", "", "", "", "", "", "", "", "", "", "",
"", "", "", ""],
"IPA": ["m", "m", "", "", "ə", "aː", "i", "iː", "u", "uː", "r̩", "l̩", "æ",
"e", "eː", "ɛː", "ɔ", "o", "oː", "ow", "kə", "kʰə", "gə", "gʱə",
"ŋə", "ʧə", "ʧʰə", "ʤə", "ʤʱə", "ɲə", "ʈə", "ʈʰə", "ɖə", "ɖʱə",
"ɳə", "t̪ə", "t̪ʰə", "d̪ə", "d̪ʱə", "n̪ə", "nə", "pə", "pʰə", "bə",
"bʱə", "mə", "jə", "ɾə", "rə", "lə", "ɭə", "ɻə", "ʋə", "ɕə", "ʂə",
"sə", "ɦə", "", "", "", "ഽ", "aː", "i", "iː", "u", "uː", "r̩",
"l̩", "e", "eː", "ɛː", "ɔ", "o", "oː", "ow", "", "", "", "", "",
"", "", "", "", "", "ow", "", "", "", "", "", "", "", "", "r̩ː",
"l̩ː", "", "", "", "", "0", "1", "2", "3", "4", "5", "6", "7", "8",
"9", "൰", "", "", "", "", "", "", "", "", "", "", "", "", "", "",
""]
}
@lru_cache(maxsize=1024)
def char_compare(char1, char2):
''' Check if 2 characters are similar
This function checks if given 2 characters are similar but are
from 2 different languages.
:param char1: First character for comparison
:param char2: Second character for comparison
:return: 0 if both characters are same, 1 if both characters
are similar but from different language and -1 if any
one or both characters are not found
'''
if char1 == char2:
return 0
char1_index = -1
char2_index = -1
char1_lang = get_language(char1)
char2_lang = get_language(char2)
if char1_lang is not None and char2_lang is not None:
# Is this IPA or ISO15919 char?
if char1_lang in ["ISO15919", "IPA"]:
char1_index = charmap_transphon[char1_lang].index(char1)
if char2_lang in ["ISO15919", "IPA"]:
char2_index = charmap_transphon[char2_lang].index(char2)
# Still index not found?
if char1_index == -1:
char1_index = charmap[char1_lang].index(char1)
if char2_index == -1:
char2_index = charmap[char2_lang].index(char2)
# is char index similar?
if char1_index == char2_index:
return 1
# char's are not similar
return -1
@lru_cache(maxsize=1024)
def get_language(char):
''' Get the language of given `char'
Return the language of given character, if character language
is not found `None' is returned.
:param char:
The char whose language is to be detected
:return: string representing language or None if char not found
in our mapping.
'''
if sys.version_info.major == 2:
tmpchr = char.decode('utf-8') if type(char).__name__ == 'str' else char
else:
tmpchr = char
for lang in charmap:
if tmpchr.lower() in charmap[lang]:
return lang
if sys.version_info.major == 2:
tmpchr = char.encode('utf-8') if type(char).__name__ == 'unicode'\
else char
else:
tmpchr = char
# Reached here means no language is found check in ISO and IPA set
for lang in charmap_transphon:
if tmpchr in charmap_transphon[lang]:
return lang
# Nothing found!
return None
|
py | 1a4530c84d723f8d8b5568591074d4d9302cbb27 | # -*- coding: utf-8 -*-
"""
pytest_instafail
~~~~~~~~~~~~~~~~
py.test plugin to show failures instantly.
:copyright: (c) 2013-2016 by Janne Vanhala.
:license: BSD, see LICENSE for more details.
"""
import pytest
from _pytest.terminal import TerminalReporter
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
'--instafail', action="store_true", dest="instafail", default=False,
help=(
"show failures and errors instantly as they occur (disabled by "
"default)."
)
)
@pytest.mark.trylast
def pytest_configure(config):
if hasattr(config, 'slaveinput'):
return # xdist slave, we are already active on the master
if config.option.instafail and config.pluginmanager.hasplugin('terminalreporter'):
# Get the standard terminal reporter plugin...
standard_reporter = config.pluginmanager.getplugin('terminalreporter')
instafail_reporter = InstafailingTerminalReporter(standard_reporter)
# ...and replace it with our own instafailing reporter.
config.pluginmanager.unregister(standard_reporter)
config.pluginmanager.register(instafail_reporter, 'terminalreporter')
class InstafailingTerminalReporter(TerminalReporter):
def __init__(self, reporter):
TerminalReporter.__init__(self, reporter.config)
self._tw = reporter._tw
def pytest_collectreport(self, report):
# Show errors occurred during the collection instantly.
TerminalReporter.pytest_collectreport(self, report)
if report.failed:
if self.isatty:
self.rewrite('') # erase the "collecting"/"collected" message
self.print_failure(report)
def pytest_runtest_logreport(self, report):
# Show failures and errors occuring during running a test
# instantly.
TerminalReporter.pytest_runtest_logreport(self, report)
if report.failed and not hasattr(report, 'wasxfail'):
if self.verbosity <= 0:
self._tw.line()
self.print_failure(report)
def summary_failures(self):
# Prevent failure summary from being shown since we already
# show the failure instantly after failure has occured.
pass
def summary_errors(self):
# Prevent error summary from being shown since we already
# show the error instantly after error has occured.
pass
def print_failure(self, report):
if self.config.option.tbstyle != "no":
if self.config.option.tbstyle == "line":
line = self._getcrashline(report)
self.write_line(line)
else:
msg = self._getfailureheadline(report)
# "when" was unset before pytest 4.2 for collection errors.
when = getattr(report, "when", "collect")
if when == "collect":
msg = "ERROR collecting " + msg
elif when == "setup":
msg = "ERROR at setup of " + msg
elif when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
if not self.config.getvalue("usepdb"):
self._outrep_summary(report)
|
py | 1a45319aaaa36400adf48feb56076be2eb304454 | """Auto-generated file, do not edit by hand. UZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_UZ = PhoneMetadata(id='UZ', country_code=998, international_prefix='810',
general_desc=PhoneNumberDesc(national_number_pattern='[679]\\d{8}', possible_length=(9,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:6(?:1(?:22|3[124]|4[1-4]|5[123578]|64)|2(?:22|3[0-57-9]|41)|5(?:22|3[3-7]|5[024-8])|6\\d{2}|7(?:[23]\\d|7[69])|9(?:22|4[1-8]|6[135]))|7(?:0(?:5[4-9]|6[0146]|7[12456]|9[135-8])|1[12]\\d|2(?:22|3[1345789]|4[123579]|5[14])|3(?:2\\d|3[1578]|4[1-35-7]|5[1-57]|61)|4(?:2\\d|3[1-579]|7[1-79])|5(?:22|5[1-9]|6[1457])|6(?:22|3[12457]|4[13-8])|9(?:22|5[1-9])))\\d{5}', example_number='669050123', possible_length=(9,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='6(?:1(?:2(?:98|2[01])|35[0-4]|50\\d|61[23]|7(?:[01][017]|4\\d|55|9[5-9]))|2(?:11\\d|2(?:[12]1|9[01379])|5(?:[126]\\d|3[0-4])|7\\d{2})|5(?:19[01]|2(?:27|9[26])|30\\d|59\\d|7\\d{2})|6(?:2(?:1[5-9]|2[0367]|38|41|52|60)|3[79]\\d|4(?:56|83)|7(?:[07]\\d|1[017]|3[07]|4[047]|5[057]|67|8[0178]|9[79])|9[0-3]\\d)|7(?:2(?:24|3[237]|4[5-9]|7[15-8])|5(?:7[12]|8[0589])|7(?:0\\d|[39][07])|9(?:0\\d|7[079]))|9(?:2(?:1[1267]|5\\d|3[01]|7[0-4])|5[67]\\d|6(?:2[0-26]|8\\d)|7\\d{2}))\\d{4}|7(?:0\\d{3}|1(?:13[01]|6(?:0[47]|1[67]|66)|71[3-69]|98\\d)|2(?:2(?:2[79]|95)|3(?:2[5-9]|6[0-6])|57\\d|7(?:0\\d|1[17]|2[27]|3[37]|44|5[057]|66|88))|3(?:2(?:1[0-6]|21|3[469]|7[159])|33\\d|5(?:0[0-4]|5[579]|9\\d)|7(?:[0-3579]\\d|4[0467]|6[67]|8[078])|9[4-6]\\d)|4(?:2(?:29|5[0257]|6[0-7]|7[1-57])|5(?:1[0-4]|8\\d|9[5-9])|7(?:0\\d|1[024589]|2[0127]|3[0137]|[46][07]|5[01]|7[5-9]|9[079])|9(?:7[015-9]|[89]\\d))|5(?:112|2(?:0\\d|2[29]|[49]4)|3[1568]\\d|52[6-9]|7(?:0[01578]|1[017]|[23]7|4[047]|[5-7]\\d|8[78]|9[079]))|6(?:2(?:2[1245]|4[2-4])|39\\d|41[179]|5(?:[349]\\d|5[0-2])|7(?:0[017]|[13]\\d|22|44|55|67|88))|9(?:22[128]|3(?:2[0-4]|7\\d)|57[05629]|7(?:2[05-9]|3[37]|4\\d|60|7[2579]|87|9[07])))\\d{4}|9[0-57-9]\\d{7}', example_number='912345678', possible_length=(9,)),
preferred_international_prefix='8~10',
national_prefix='8',
national_prefix_for_parsing='8',
number_format=[NumberFormat(pattern='([679]\\d)(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[679]'], national_prefix_formatting_rule='8 \\1')])
|
py | 1a453238cc274593b6b62a070873c0afdedaeca7 | import logging
from guitester import GuiTester
from pages.basepage import BasePage
from pages.dashboard import Dashboard
from pages.elastic_ip.elastic_ip_lp import EipLanding
from pages.elastic_ip.elastic_ip_detail import EipDetailPage
from pages.keypair.keypairdetail import KeypairDetailPage
from pages.keypair.keypair_lp import KeypairLanding
from pages.instance.instance_lp import InstanceLanding
from pages.volume.volume_view import VolumeLanding
from pages.volume.volume_detail import VolumeDetailPage
from pages.snapshot.snapshot_detail import SnapshotDetailPage
from pages.snapshot.snapshot_lp import SnapshotLanding
from pages.snapshot.create_snapshot import CreateSnapshotPage
from pages.instance.instancedetail import InstanceDetailPage
from pages.image.image_lp import ImageLanding
from pages.image.image_detail import ImageDetailPage
from pages.security_group.security_group_lp import SecurityGroupLanding
from pages.security_group.security_group_detail import SecurityGroupDetailPage
from dialogs.security_group_dialogs import CreateScurityGroupDialog, DeleteScurityGroupDialog
from dialogs.keypair_dialogs import CreateKeypairDialog, DeleteKeypairModal, ImportKeypairDialog
from dialogs.instance_dialogs import (
LaunchInstanceWizard, LaunchMoreLikeThisDialog, TerminateInstanceModal, TerminateAllInstancesModal)
from dialogs.volume_dialogs import (
CreateVolumeDialog, DeleteVolumeModal, AttachVolumeModalSelectInstance,
AttachVolumeModalSelectVolume, DetachVolumeModal)
from dialogs.snapshot_dialogs import CreateSnapshotModal, DeleteSnapshotModal, RegisterSnapshotAsImageModal
from dialogs.image_dialogs import RemoveImageFromCloudDialog
from dialogs.eip_dialogs import AllocateEipDialog, ReleaseEipDialog,AssociateEipDialog, DisassociateEipDialog
from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException, TimeoutException
logger = logging.getLogger('testlogger')
hdlr = logging.FileHandler('/tmp/testlog.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.setLevel(logging.WARNING)
class GuiEC2(GuiTester):
def __init__(self, console_url, sauce=False, webdriver_url=None, browser=None, version=None, platform=None):
super(GuiEC2, self).__init__(console_url, webdriver_url=webdriver_url, sauce=sauce,
browser=browser, version=version, platform=platform)
def set_implicit_wait(self, time_to_wait):
"""
Sets implicit wait to time_to_wait
:param time_to_wait:
"""
self.driver.implicitly_wait(time_to_wait)
def set_all_pages_to_list_view(self):
pass
def set_all_pages_to_tile_view(self):
pass
def goto_images_page_via_nav(self):
BasePage(self).goto_images_view_via_menu()
ImageLanding(self)
def get_region_list(self):
return BasePage(self).get_region_list()
def change_region(self, region=None):
"""
If region is passed, change to that region, otherwise select 1st from menu.
"""
if region is None:
region = BasePage(self).get_region_list()[0]
BasePage(self).select_region(region)
def create_keypair_from_dashboard(self, keypair_name):
"""
Navigates to Dashboard via menu, creates keypair. Verifies keypair visible on Keypair View page.
:param keypair_name:
"""
BasePage(self).goto_dashboard_via_menu()
Dashboard(self).click_create_keypair_link_from_dashboard()
CreateKeypairDialog(self).create_keypair(keypair_name)
KeypairDetailPage(self, keypair_name)
def create_keypair_from_keypair_view_page(self, keypair_name):
"""
Navigates from Dashboard to keypair landing page via menu. Creates keypair, verifies keypair detail page is loaded after keypair creation.
:param keypair_name:
"""
BasePage(self).goto_keypair_view_page_via_menu()
KeypairLanding(self).click_create_keypair_button_on_view_page()
CreateKeypairDialog(self).create_keypair(keypair_name)
KeypairDetailPage(self, keypair_name)
def import_keypair(self, keypair, keypair_name):
"""
Navigates to Keypair View via menu. Imports keypair. Verifies keypair visible on Keypair View page.
:param keypair_name:
"""
BasePage(self).goto_keypair_view_page_via_menu()
KeypairLanding(self).click_import_keypair_button()
ImportKeypairDialog(self).import_keypair(keypair, keypair_name)
KeypairDetailPage(self, keypair_name)
def delete_keypair_from_detail_page(self, keypair_name):
"""
Navigates to Keypair View via menu, finds keypair, goes to keypair detail page via keypair name link. Deletes keypair.
:param keypair_name:
"""
BasePage(self).goto_keypair_view_page_via_menu()
KeypairLanding(self).click_keypair_link_on_view_page(keypair_name)
KeypairDetailPage(self, keypair_name).click_action_delete_keypair_on_detail_page()
DeleteKeypairModal(self).click_delete_keypair_submit_button()
BasePage(self).goto_keypair_view_page_via_menu()
KeypairLanding(self).verify_keypair_not_present_on_view_page(keypair_name)
def delete_keypair_from_view_page(self, keypair_name):
"""
Navigates to Keypair View via menu. Deletes keypair from view page. Verifies keypair was removed from view page.
:param keypair_name:
"""
BasePage(self).goto_keypair_view_page_via_menu()
KeypairLanding(self).click_action_delete_keypair_on_view_page(keypair_name)
DeleteKeypairModal(self).click_delete_keypair_submit_button()
BasePage(self).goto_keypair_view_page_via_menu()
KeypairLanding(self).verify_keypair_not_present_on_view_page(keypair_name)
def create_security_group_from_dashboard(self, s_group_name, s_group_description):
"""
Creates security group from dashboard without adding rules or tags.
:param s_group_name:
:param s_group_description:
"""
BasePage(self).goto_dashboard_via_menu()
Dashboard(self).click_create_s_group_link_from_dashboard()
CreateScurityGroupDialog(self).create_s_group(s_group_name, s_group_description)
s_group_id = SecurityGroupDetailPage(self, s_group_name).get_s_group_id()
return {'s_group_name': s_group_name, 's_group_id':s_group_id}
def add_tcp_22_rule_to_s_group(self, s_group_name, s_group_id):
"""
Navigates to security group detail page. Opens TCP 22 port to user's IP.
:param s_group_name:
:param s_group_id:
"""
BasePage(self).goto_security_groups_view_via_menu()
SecurityGroupLanding(self).click_action_view_s_group_details_on_view_page(s_group_id)
SecurityGroupDetailPage(self, s_group_name).add_rule_to_s_group_open_to_my_ip("TCP port 22")
def add_ldap_rule_to_s_group(self, s_group_name, s_group_id):
"""
Navigates to security group detail page. Opens TCP 389 port to all addresses.
:param s_group_name:
:param s_group_id:
"""
BasePage(self).goto_security_groups_view_via_menu()
SecurityGroupLanding(self).click_action_view_s_group_details_on_view_page(s_group_id)
SecurityGroupDetailPage(self, s_group_name).add_rule_to_s_group_open_to_all_addresses("TCP port 389")
def add_custom_tcp_rule_to_s_group(self, s_group_name, s_group_id):
"""
Navigates to security group detail page. Opens TCP port 22-3389 to default group.
:param s_group_name:
:param s_group_id:
"""
BasePage(self).goto_security_groups_view_via_menu()
SecurityGroupLanding(self).click_action_view_s_group_details_on_view_page(s_group_id)
SecurityGroupDetailPage(self, s_group_name).add_custom_tcp_rule_open_to_default_group("22","3389")
def create_security_group_from_view_page(self, s_group_name, s_group_description):
"""
Creates security group from S. groups view page without adding rules or tags.
:param s_group_name:
:param s_group_description:
"""
BasePage(self).goto_security_groups_view_via_menu()
SecurityGroupLanding(self).click_create_new_s_group_button()
CreateScurityGroupDialog(self).create_s_group(s_group_name, s_group_description)
s_group_id = SecurityGroupDetailPage(self, s_group_name).get_s_group_id()
return {'s_group_name': s_group_name, 's_group_id':s_group_id}
def create_sesecurity_group_with_rules(self, s_group_name, s_group_description, rule_open_to_all, rule_open_to_default_group, rule_open_to_default_group_port_begin, rule_open_to_default_group_port_end):
BasePage(self).goto_dashboard_via_menu()
Dashboard(self).click_create_s_group_link_from_dashboard()
CreateScurityGroupDialog(self).create_s_group_with_rules(s_group_name, s_group_description, rule_open_to_all, rule_open_to_default_group, rule_open_to_default_group_port_begin, rule_open_to_default_group_port_end)
s_group_id = SecurityGroupDetailPage(self, s_group_name).get_s_group_id()
return {'s_group_name': s_group_name, 's_group_id':s_group_id}
def delete_security_group_from_view_page(self, sgroup_name, s_group_id):
"""
Navigates to security group view page. Deletes security group from view page.
:param sgroup_name:
:param s_group_id:
"""
BasePage(self).goto_security_groups_view_via_menu()
SecurityGroupLanding(self).click_action_delete_s_group_on_view_page(s_group_id)
DeleteScurityGroupDialog(self).delete_s_group()
SecurityGroupLanding(self).verify_s_group_not_present(sgroup_name)
def delete_security_group_from_detail_page(self, sgroup_name, s_group_id):
"""
Navigates to security group detail page. Deletes security group.
:param sgroup_name:
:param s_group_id:
"""
BasePage(self).goto_security_groups_view_via_menu()
SecurityGroupLanding(self).click_action_view_s_group_details_on_view_page(s_group_id)
SecurityGroupDetailPage(self, sgroup_name).click_action_delete_s_group_on_detail_page()
DeleteScurityGroupDialog(self).delete_s_group()
SecurityGroupLanding(self).verify_s_group_not_present(sgroup_name)
def launch_instance_from_dashboard(self, image="centos", availability_zone=None, instance_type="t1.micro",
number_of_of_instances=None, instance_name=None, key_name="None (advanced option)",
security_group="default", user_data=None, monitoring=False, private_addressing=False, timeout_in_seconds=480):
"""
Navigates to dashboard via menu. Launches instance.
:param image:
:param availability_zone:
:param instance_type:
:param number_of_of_instances:
:param instance_name:
:param key_name:
:param security_group:
:param user_data:
:param monitoring:
:param private_addressing:
"""
BasePage(self).goto_dashboard_via_menu()
Dashboard(self).click_launch_instance_button_from_dashboard()
LaunchInstanceWizard(self).launch_instance(image=image, availability_zone=availability_zone, instance_type=instance_type,
number_of_of_instances=number_of_of_instances, instance_name=instance_name, key_name=key_name,
security_group=security_group, user_data=user_data, monitoring=monitoring, private_addressing=private_addressing)
instance_id = InstanceLanding(self).get_id_of_newly_launched_instance()
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id, instance_name).verify_instance_is_in_running_state(timeout_in_seconds=timeout_in_seconds)
return {'instance_name': instance_name, 'instance_id':instance_id}
def launch_instance_from_instance_view_page(self, image = "centos",availability_zone = None,
instance_type = "t1.micro",
number_of_of_instances = None, instance_name = None, key_name = "None (advanced option)",
security_group = "default", user_data=None, monitoring=False, private_addressing=False, timeout_in_seconds=480):
"""
Navigates to instance view page via menu. Launches instance.
:param image:
:param availability_zone:
:param instance_type:
:param number_of_of_instances:
:param instance_name:
:param key_name:
:param security_group:
:param user_data:
:param monitoring:
:param private_addressing:
"""
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).click_action_launch_instance_on_landing_page()
LaunchInstanceWizard(self).launch_instance(image, availability_zone, instance_type,
number_of_of_instances, instance_name, key_name,
security_group, user_data, monitoring, private_addressing)
instance_id = InstanceLanding(self).get_id_of_newly_launched_instance()
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id, instance_name).verify_instance_is_in_running_state(timeout_in_seconds=timeout_in_seconds)
return {'instance_name': instance_name, 'instance_id':instance_id}
def launch_instance_from_image_view_page(self, image_id_or_type, availability_zone = None,
instance_type="t1.micro",
number_of_of_instances = None, instance_name = None, key_name = "None (advanced option)",
security_group = "default", user_data=None, monitoring=False, private_addressing=False, timeout_in_seconds=480):
"""
Navigates to image view page via menu. Launches instance from given image.
:param image_id_or_type:
:param availability_zone:
:param instance_type:
:param number_of_of_instances:
:param instance_name:
:param key_name:
:param security_group:
:param user_data:
:param monitoring:
:param private_addressing:
"""
BasePage(self).goto_images_view_via_menu()
ImageLanding(self).click_action_launch_instance(image_id_or_type)
LaunchInstanceWizard(self).launch_instance_step2(availability_zone, instance_type,
number_of_of_instances, instance_name, key_name,
security_group, user_data, monitoring, private_addressing)
instance_id = InstanceLanding(self).get_id_of_newly_launched_instance()
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id, instance_name).verify_instance_is_in_running_state(timeout_in_seconds=timeout_in_seconds)
return {'instance_name': instance_name, 'instance_id':instance_id}
def launch_more_like_this_from_view_page(self, instance_id, instance_name=None, user_data=None, monitoring=False,
private_addressing=False, timeout_in_seconds=480):
"""
Navigates to instances view page. Launches an instance like given instance.
:param instance_id:
:param instance_name:
:param user_data:
:param monitoring:
:param private_addressing:
:param timeout_in_seconds:
"""
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).click_action_launch_more_like_this(instance_id)
LaunchMoreLikeThisDialog(self).launch_more_like_this(instance_name, user_data, monitoring, private_addressing)
instance_id = InstanceLanding(self).get_id_of_newly_launched_instance()
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id, instance_name).verify_instance_is_in_running_state(
timeout_in_seconds=timeout_in_seconds)
return {'instance_name': instance_name, 'instance_id':instance_id}
def launch_more_like_this_from_detail_page(self, base_instance_id, instance_name=None, user_data=None, monitoring=False, private_addressing=False, timeout_in_seconds=240):
"""
Navigates to instance detail page. Launches an instance like given instance.
:param inatance_id:
:param instance_name:
:param user_data:
:param monitoring:
:param private_addressing:
"""
BasePage(self).goto_instances_via_menu()
base_instance_name=InstanceLanding(self).get_instance_name(base_instance_id)
InstanceLanding(self).goto_instance_detail_page_via_actions(base_instance_id)
InstanceDetailPage(self, base_instance_id, base_instance_name).click_action_launch_more_like_this()
LaunchMoreLikeThisDialog(self).launch_more_like_this(instance_name, user_data, monitoring, private_addressing)
instance_id = InstanceLanding(self).get_id_of_newly_launched_instance()
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id, instance_name).verify_instance_is_in_running_state(timeout_in_seconds=timeout_in_seconds)
return {'instance_name': instance_name, 'instance_id':instance_id}
def terminate_instance_from_view_page(self, instance_id, instance_name=None,timeout_in_seconds=480):
"""
Navigates to view page, terminates instance.
:param instance_name:
:param instance_id:
"""
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).click_action_terminate_instance_on_view_page(instance_id)
TerminateInstanceModal(self).click_terminate_instance_submit_button(instance_id)
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id, instance_name).verify_instance_is_terminated(timeout_in_seconds=timeout_in_seconds)
def terminate_instance_from_detail_page(self, instance_id, timeout_in_seconds=480):
"""
Navigates to detail page, terminates instance.
:param instance_id:
"""
BasePage(self).goto_instances_via_menu()
instance_name=InstanceLanding(self).get_instance_name(instance_id)
InstanceLanding(self).goto_instance_detail_page_via_actions(instance_id)
InstanceDetailPage(self, instance_id, instance_name).click_terminate_instance_action_item_from_detail_page()
TerminateInstanceModal(self).click_terminate_instance_submit_button(instance_id)
InstanceDetailPage(self, instance_id, instance_name).verify_instance_is_terminated(timeout_in_seconds=timeout_in_seconds)
def batch_terminate_all_instances(self):
"""
Navigates to instances view page and terminates all instances
"""
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).click_terminate_all_instances_button()
TerminateAllInstancesModal(self).click_terminate_all_instances_submit_button()
InstanceLanding(self).verify_there_are_no_running_instances()
def create_volume_from_view_page(self, volume_name=None, create_from_snapshot=False, snapshot_id = None, volume_size=None, availability_zone=None, timeout_in_seconds=240):
"""
Navigates to volumes view page and creates volume.
:param volume_name:
:param create_from_snapshot:
:param snapshot_id:
:param volume_size:
:param availability_zone:
"""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).click_create_volume_btn_on_landing_page()
CreateVolumeDialog(self).create_volume(volume_name, create_from_snapshot, snapshot_id, volume_size, availability_zone)
VolumeDetailPage(self).verify_volume_status_is_available(timeout_in_seconds=timeout_in_seconds)
volume = VolumeDetailPage(self).get_volume_name_and_id()
print volume
return volume
def create_volume_from_dashboard(self, volume_name=None, create_from_snapshot=False,snapshot_id=None, volume_size=None, availability_zone=None, timeout_in_seconds=240 ):
"""
Navigates to dashboard and creates volume.
:param volume_name:
:param create_from_snapshot:
:param snapshot_id:
:param volume_size:
:param availability_zone:
:param timeout_in_seconds:
"""
BasePage(self).goto_dashboard_via_menu()
Dashboard(self).click_create_volume_link()
CreateVolumeDialog(self).create_volume(volume_name=volume_name, create_from_snapshot=create_from_snapshot, snapshot_id=snapshot_id, volume_size=volume_size, availability_zone=None)
VolumeDetailPage(self).verify_volume_status_is_available(timeout_in_seconds=timeout_in_seconds)
volume = VolumeDetailPage(self).get_volume_name_and_id()
print volume
return volume
def delete_volume_from_view_page(self, volume_id, timeout_in_seconds=240):
"""
Navigates to volumes view page and deletes volume.
:param timeout_in_seconds:
:param volume_id:
"""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).click_action_delete_volume_on_view_page(volume_id)
DeleteVolumeModal(self).delete_volume()
VolumeLanding(self).verify_volume_status_is_deleted(volume_id, timeout_in_seconds)
def delete_volume_from_detail_page(self, volume_id, volume_name=None, timeout_in_seconds=240):
"""
Navigates to volume detail page and deletes volume. Waits for volume state to become 'deleted' on landing page.
:param timeout_in_seconds:
:param volume_id:
:param volume_name:
"""
print ""
print "====== Running delete_volume_from_detail_page ======"
print ""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).goto_volume_detail_page_via_actions(volume_id)
VolumeDetailPage(self).verify_volume_detail_page_loaded(volume_id, volume_name)
VolumeDetailPage(self).click_action_delete_volume_on_detail_page()
DeleteVolumeModal(self).delete_volume()
VolumeLanding(self).verify_volume_status_is_deleted(volume_id, timeout_in_seconds)
def attach_volume_from_volume_lp(self, instance_id, volume_id, device=None, timeout_in_seconds=240):
"""
Navigates to volumes landing page, attaches a given volume to a given instance.
:param device:
:param timeout_in_seconds:
:param instance_id:
:param volume_id:
"""
print ""
print "====== Running attach_volume_from_volume_lp ======"
print ""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).click_action_attach_to_instance(volume_id)
AttachVolumeModalSelectInstance(self).attach_volume(instance_id, device)
VolumeLanding(self).verify_volume_status_is_attached(volume_id, timeout_in_seconds)
def attach_volume_from_volume_detail_page(self, instance_id, volume_id, device=None, timeout_in_seconds=240):
"""
Navigates to volume detail page, attaches volume to instance.
:param instance_id:
:param volume_id:
:param device:
:param timeout_in_seconds:
"""
print ""
print "====== Running attach_volume_from_volume_detail_page ======"
print ""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).goto_volume_detail_page_via_link(volume_id)
VolumeDetailPage(self).click_action_attach_volume_on_detail_page()
AttachVolumeModalSelectInstance(self).attach_volume(instance_id, device=device)
VolumeDetailPage(self).verify_volume_status_is_attached(timeout_in_seconds)
def attach_volume_from_instance_detail_page(self, volume_id, instance_id, instance_name=None, device=None, timeout_in_seconds=240):
"""
Navigates to instance detail page and attaches volume.
:param instance_id:
:param volume_id:
:param device:
:param timeout_in_seconds:
"""
print ""
print "====== Running attach_volume_from_instance_detail_page ======"
print ""
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id, instance_name).click_action_attach_volume()
AttachVolumeModalSelectVolume(self).attach_volume(volume_id, device)
InstanceDetailPage(self, instance_id).verify_volume_is_attached(volume_id, timeout_in_seconds)
def attach_volume_from_instance_lp(self, volume_id, instance_id, instance_name=None, device=None, timeout_in_seconds=240):
"""
Navigates to instance landing page. Attaches volume.
:param volume_id:
:param instance_id:
:param instance_name:
:param device:
:param timeout_in_seconds:
"""
print ""
print "====== Running attach_volume_from_instance_lp ======"
print ""
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).click_action_manage_volumes_on_view_page(instance_id)
InstanceDetailPage(self, instance_id, instance_name).click_action_attach_volume()
AttachVolumeModalSelectVolume(self).attach_volume(volume_id, device)
InstanceDetailPage(self, instance_id).verify_volume_is_attached(volume_id, timeout_in_seconds)
def detach_volume_from_volumes_lp(self, volume_id, timeout_in_seconds=240):
"""
Navigate to volumes landing page. Detach volume.
:param instance_id:
:param volume_id:
"""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).click_action_detach_volume_on_view_page(volume_id)
DetachVolumeModal(self).detach_volume(volume_id)
VolumeLanding(self).verify_volume_status_is_available(volume_id, timeout_in_seconds)
def detach_volume_from_volume_detail_page(self, volume_id, timeout_in_seconds):
"""
Navigates to volume detail page. Detaches volume from instance. Verifies volume is in available state.
:param volume_id:
:param timeout_in_seconds:
"""
NotImplementedError
def detach_volume_from_instance_detail_page(self, volume_id, instance_id, timeout_in_seconds):
"""
Navigates to instance detail page. Detaches a given volume. Verifies volume is in available state.
:param volume_id:
:param instance_id:
:param timeout_in_seconds:
"""
NotImplementedError
def detach_volume_from_instance_lp(self, volume_id, instance_id, timeout_in_seconds):
"""
Navigates to instance landing page. Goes to volumes tab by "Manage Volumes" action. Detaches given volume. Verifies volume is in available state.
:param volume_id:
:param instance_id:
:param timeout_in_seconds:
"""
NotImplementedError
def click_sortable_column_header_on_volumes_landing_page(self, column_name='name'):
"""
Sort volumes table by a given column (see <th> element's st-sort attr for possible column_name values)
:param column_name: header column name
:type column_name: str
"""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).click_sortable_column_header(column_name=column_name)
def verify_sort_position_for_volume(self, volume_id, position=1):
"""
:param volume_id:
:param position: sorting position. Note: not zero-based (e.g. use 1 for first row)
:type position: int
"""
VolumeLanding(self).verify_volume_id_by_sort_position(volume_id, position=position)
def verify_charts_on_volume_monitoring_page(self, volume_id):
"""
Volume Monitoring page should display charts when attached to an instance
:param volume_id:
"""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).goto_volume_detail_page_via_actions(volume_id)
VolumeDetailPage(self).goto_monitoring_tab(volume_id)
VolumeDetailPage(self).verify_charts_on_volume_monitoring_page(volume_id)
def verify_attach_notice_on_volume_monitoring_page(self, volume_id):
"""
Volume Monitoring page should display notice to attach volume to instance when unattached
:param volume_id:
"""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).goto_volume_detail_page_via_actions(volume_id)
VolumeDetailPage(self).goto_monitoring_tab(volume_id)
VolumeDetailPage(self).verify_attach_notice_on_volume_monitoring_page(volume_id)
def create_snapshot_on_volumes_view_page(self, volume_id, snapshot_name=None, snapshot_description=None, timeout_in_seconds=240):
"""
Navigates to volumes view page and creates a snapshot of a volume.
:param snapshot_name:
:param snapshot_description:
:param timeout_in_seconds:
:param volume_id:
"""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).click_action_manage_snaspshots(volume_id)
VolumeDetailPage(self).click_create_snapshot_from_volume_tile(volume_id)
CreateSnapshotModal(self).create_snapshot(snapshot_name, snapshot_description)
VolumeDetailPage(self).goto_detail_page_of_newly_created_snapshot(volume_id)
snapshot=SnapshotDetailPage(self).get_snapshot_name_and_id(snapshot_name)
SnapshotDetailPage(self).verify_snapshot_status_is_completed(timeout_in_seconds)
print snapshot
return snapshot
def create_snapshot_on_volume_detail_page(self, volume_id, snapshot_name=None, snapshot_description=None, timeout_in_seconds=240):
"""
Navigates to volume detail page and creates a snapshot.
:param timeout_in_seconds:
:param volume_id:
:param snapshot_name:
:param snapshot_description:
"""
BasePage(self).goto_volumes_view_via_menu()
VolumeLanding(self).goto_volume_detail_page_via_actions(volume_id)
VolumeDetailPage(self).click_create_snapshot_from_volume_tile(volume_id)
CreateSnapshotModal(self).create_snapshot(snapshot_name, snapshot_description)
VolumeDetailPage(self).goto_detail_page_of_newly_created_snapshot(volume_id)
snapshot=SnapshotDetailPage(self).get_snapshot_name_and_id(snapshot_name)
SnapshotDetailPage(self).verify_snapshot_status_is_completed(timeout_in_seconds)
print snapshot
return snapshot
def create_snapshot_on_snapshot_view_page(self, volume_id, snapshot_name=None, snapshot_description=None, timeout_in_seconds=240):
"""
Navigates to snapshot landing page, creates snapshot.
:param volume_id:
:param snapshot_name:
:param snapshot_description:
:param timeout_in_seconds:
"""
BasePage(self).goto_snapshots_view_via_menu()
SnapshotLanding(self).click_create_snapshot_btn_on_view_page()
CreateSnapshotPage(self).create_snapshot(volume_id=volume_id, snapshot_name=snapshot_name, snapshot_description=snapshot_description)
snapshot = SnapshotDetailPage(self).get_snapshot_name_and_id(snapshot_name)
SnapshotDetailPage(self).verify_snapshot_status_is_completed(timeout_in_seconds)
print snapshot
return snapshot
def create_snapshot_from_dashboard(self, volume_id, snapshot_name=None, snapshot_description=None, timeout_in_seconds=240):
"""
Navigates to snapshot landing page, creates snapshot.
:param volume_id:
:param snapshot_name:
:param snapshot_description:
:param timeout_in_seconds:
"""
BasePage(self).goto_dashboard_via_menu()
Dashboard(self).click_create_snapshot_link()
CreateSnapshotPage(self).create_snapshot(volume_id=volume_id, snapshot_name=snapshot_name, snapshot_description=snapshot_description)
snapshot = SnapshotDetailPage(self).get_snapshot_name_and_id(snapshot_name)
SnapshotDetailPage(self).verify_snapshot_status_is_completed(timeout_in_seconds)
print snapshot
return snapshot
def delete_snapshot_from_landing_page(self, snapshot_id):
"""
Navigates to landing page, deletes snapshot, verifies snapshot is gone from landing page.
:param snapshot_id:
"""
BasePage(self).goto_snapshots_view_via_menu()
SnapshotLanding(self).click_action_delete_snapshot_on_view_page(snapshot_id)
DeleteSnapshotModal(self).delete_snapshot()
SnapshotLanding(self).verify_snapshot_not_present(snapshot_id)
def delete_snapshot_from_detail_page(self, snapshot_id):
"""
Navigates to detail page, deletes snapshot, verifies snapshot is gone from landing page.
:param snapshot_id:
"""
BasePage(self).goto_snapshots_view_via_menu()
SnapshotLanding(self).goto_snapshot_detail_page_via_link(snapshot_id)
SnapshotDetailPage(self).click_action_delete_snapshot_on_detail_page()
DeleteSnapshotModal(self).delete_snapshot()
SnapshotLanding(self).verify_snapshot_not_present(snapshot_id)
def verify_snapshot_not_present_on_lp(self, snapshot_id):
"""
Navigates to snapshot landing page. Verifies snapshot not on landing page
:param snapshot_id:
"""
BasePage(self).goto_snapshots_view_via_menu()
SnapshotLanding(self).verify_snapshot_not_present(snapshot_id)
def create_volume_from_snapshot_on_snapshot_lp(self, snapshot_id, volume_name=None, availability_zone=None, volume_size=None, timeout_in_seconds=240):
"""
Navigates to snapshot landing page. Goes to "create volume from snapshot" in the actions menu. Creates volume from snapshot.
:param snapshot_id:
:param volume_name:
:param availability_zone:
:param volume_size:
:param timeout_in_seconds:
"""
BasePage(self).goto_snapshots_view_via_menu()
SnapshotLanding(self).click_action_create_volume_from_snapshot(snapshot_id)
CreateVolumeDialog(self).create_volume(volume_name, volume_size=volume_size, availability_zone=availability_zone, timeout_in_seconds=timeout_in_seconds)
VolumeDetailPage(self).verify_volume_status_is_available(timeout_in_seconds=timeout_in_seconds)
volume = VolumeDetailPage(self).get_volume_name_and_id()
print volume
return volume
def create_volume_from_snapshot_on_snapshot_detail_page(self, snapshot_id, volume_name=None, availability_zone=None, volume_size=None, timeout_in_seconds=240):
"""
Navigates to snapshot detail page. Goes to "create volume from snapshot" in the actions menu. Creates volume from snapshot.
:param snapshot_id:
:param volume_name:
:param availability_zone:
:param volume_size:
:param timeout_in_seconds:
"""
BasePage(self).goto_snapshots_view_via_menu()
SnapshotLanding(self).goto_snapshot_detail_page_via_link(snapshot_id)
SnapshotDetailPage(self).click_action_create_volume_from_snapshot_on_detail_page()
CreateVolumeDialog(self).create_volume(volume_name, volume_size=volume_size, availability_zone=availability_zone, timeout_in_seconds=timeout_in_seconds)
VolumeDetailPage(self).verify_volume_status_is_available(timeout_in_seconds=timeout_in_seconds)
volume = VolumeDetailPage(self).get_volume_name_and_id()
print volume
return volume
def delete_snapshot_from_tab_on_volume_detail_page(self):
NotImplementedError
def delete_snapshot_from_lp(self):
NotImplementedError
def register_snapshot_as_an_image_from_snapshot_landing_page(self, snapshot_id, image_name, description=None, delete_on_terminate=True, register_as_windows_image=False):
BasePage(self).goto_snapshots_view_via_menu()
SnapshotLanding(self).click_action_register_as_image(snapshot_id)
RegisterSnapshotAsImageModal(self).register_as_image(name=image_name, description=description, delete_on_terminate=delete_on_terminate, register_as_windows_image=register_as_windows_image)
image_id = ImageDetailPage(self).get_image_id()
image = {'image_name': image_name, 'image_id': image_id}
print image
return image
def remove_image_from_cloud_on_images_lp(self, image_id, delete_associated_snapshot=False):
BasePage(self).goto_images_view_via_menu()
ImageLanding(self).click_action_remove_image_from_cloud(image_id)
RemoveImageFromCloudDialog(self).remove_image(delete_associated_snapshot)
def register_snapshot_as_an_image_from_snapshot_detail_page(self, snapshot_id, image_name, description=None,
delete_on_terminate=True,
register_as_windows_image=False
):
BasePage(self).goto_snapshots_view_via_menu()
SnapshotLanding(self).goto_snapshot_detail_page_via_link(snapshot_id)
SnapshotDetailPage(self).click_action_register_as_image_on_detail_page()
RegisterSnapshotAsImageModal(self).register_as_image(name=image_name, description=description,
delete_on_terminate=delete_on_terminate,
register_as_windows_image=register_as_windows_image)
if ImageDetailPage(self).is_image_detail_page_loaded():
image_id = ImageDetailPage(self).get_image_id()
image = {'image_name': image_name, 'image_id': image_id}
else:
BasePage(self).goto_images_view_via_menu()
image_id = ImageLanding(self).get_image_id_by_name(image_name)
image = {'image_name': image_name, 'image_id': image_id}
print image
return image
def allocate_eip_from_lp(self, number=1):
"""
:param number: how many IPs to allocate
:return: allocated IPs as a list of strings
"""
BasePage(self).goto_elastic_ip_view_via_menu()
EipLanding(self).click_allocate_elastic_ips_button()
return AllocateEipDialog(self).allocate_elastic_ips(number=number)
def allocate_eip_from_dashboard(self, number=1):
"""
:param number: how many IPs to allocate
:return: allocated IPs as a list of strings
"""
BasePage(self).goto_dashboard_via_menu()
Dashboard(self).click_allocate_elastic_ips_link()
return AllocateEipDialog(self).allocate_elastic_ips(number=number)
def release_eip_from_eip_lp(self, elastic_ip):
"""
Release a single Elastic IP via the item row's actions menu
:param elastic_ip: IP address to release
"""
BasePage(self).goto_elastic_ip_view_via_menu()
EipLanding(self).select_release_ip_actions_menu_item(elastic_ip)
ReleaseEipDialog(self).release_elastic_ips()
EipLanding(self).verify_elastic_ip_is_released(elastic_ip)
def release_eips_from_eip_lp(self, elastic_ips):
"""
Batch-release Elastic IPs from landing page via More Actions button
:param elastic_ips: List of Elastic IPs to be released
:return: released Elastic IPs as a list of strings
"""
BasePage(self).goto_elastic_ip_view_via_menu()
EipLanding(self).click_elastic_ips_checkboxes(elastic_ips)
EipLanding(self).select_release_ips_more_actions_item()
return ReleaseEipDialog(self).release_elastic_ips()
def release_eip_from_eip_detail_page(self, elastic_ip):
"""
Release a single Elastic IP from the EIP detail page
:param elastic_ip: Elastic IP to be released
"""
BasePage(self).goto_elastic_ip_view_via_menu()
EipLanding(self).click_elastic_ip(elastic_ip)
EipDetailPage(self, elastic_ip)
EipDetailPage(self, elastic_ip).click_action_release_ip_address_on_detail_page()
ReleaseEipDialog(self).release_elastic_ips()
EipLanding(self).verify_elastic_ip_is_released(elastic_ip)
def associate_eip_from_eip_lp(self, elastic_ip, instance_id):
BasePage(self).goto_elastic_ip_view_via_menu()
EipLanding(self).associate_with_instance_actions_menu_item(elastic_ip)
AssociateEipDialog(self).associate_eip_with_instance(instance_id)
EipLanding(self).verify_elastic_ip_associate_instance(instance_id, elastic_ip)
def associate_eip_from_instances_lp(self, elastic_ip, instance_id):
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).click_action_associate_ip_address_from_landing_page(instance_id)
AssociateEipDialog(self).associate_eip_from_instance(elastic_ip)
InstanceLanding(self).verify_elastic_ip_address_on_instance_lp(elastic_ip)
def associate_eip_from_instance_detail_page(self, elastic_ip, instance_id):
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id).click_action_associate_ip_address()
AssociateEipDialog(self).associate_eip_from_instance(elastic_ip)
InstanceDetailPage(self, instance_id).verify_eip_address_associated_to_instance(elastic_ip)
def associate_eip_from_eip_detail_page(self, elastic_ip, instance_id):
EipLanding(self).click_elastic_ip(elastic_ip)
EipDetailPage(self, elastic_ip)
EipDetailPage(self, elastic_ip).click_action_associate_ip_address_on_detail_page()
AssociateEipDialog(self).associate_eip_with_instance(instance_id)
EipLanding(self).click_elastic_ip(elastic_ip)
EipDetailPage(self, elastic_ip).verify_instance_id_on_detail_page(instance_id)
def disassociate_eip_from_eip_lp(self, elastic_ip, instance_id):
BasePage(self).goto_elastic_ip_view_via_menu()
EipLanding(self).disassociate_with_instance_actions_menu_item(elastic_ip, instance_id)
DisassociateEipDialog(self).disassociate_eip()
EipLanding(self).verify_disassociate_eip_from_lp(instance_id)
def disassociate_eip_from_eip_detail_page(self, elastic_ip, instance_id):
BasePage(self).goto_elastic_ip_view_via_menu()
EipLanding(self).click_elastic_ip(elastic_ip)
EipDetailPage(self, elastic_ip)
EipDetailPage(self, elastic_ip).click_action_disassociate_ip_address_on_detail_page()
DisassociateEipDialog(self).disassociate_eip()
EipLanding(self).click_elastic_ip(elastic_ip)
EipDetailPage(self, elastic_ip).verify_instance_id_off_detail_page(instance_id)
def disassociate_eip_from_instances_lp(self, elastic_ip, instance_id):
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).click_action_disassociate_ip_address_from_landing_page(instance_id)
DisassociateEipDialog(self).disassociate_eip_from_instance()
InstanceLanding(self).verify_elastic_ip_address_off_instance_lp(elastic_ip)
def disassociate_eip_from_instance_detail_page(self, elastic_ip, instance_id):
BasePage(self).goto_instances_via_menu()
InstanceLanding(self).goto_instance_detail_page_via_link(instance_id)
InstanceDetailPage(self, instance_id).click_action_disassociate_ip_address()
DisassociateEipDialog(self).disassociate_eip_from_instance()
InstanceDetailPage(self, instance_id).verify_eip_address_disassociated_to_instance(elastic_ip)
|
py | 1a4532540d1644ad48f005c62604ad8e9ca37d59 | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
# A container class to record "before"/"after" state of a skeleton
# subjected to a series of NodeMove operations.
class SkeletonDiff:
def __init__(self):
self.changes = {}
def addChanges(self, node, before, after):
if node in self.changes:
self.changes[node][1] = after
else:
self.changes[node] = [before, after]
def undoChanges(self):
for node in self.changes:
node.moveTo(self.changes[node][0])
def redoChanges(self):
for node in self.changes:
node.moveTo(self.changes[node][1])
# A utility class for book-keeping a series of NodeMoves for a skeleton
class NodeMoveHistory:
def __init__(self):
self._data = []
self.ndata = 0
self.current = -1
self.skeldiff = None
def undo(self):
self._data[self.current].undoChanges()
self.current -= 1
def redo(self):
self.current += 1
self._data[self.current].redoChanges()
def undoable(self):
return self.current>=0
def redoable(self):
return self.ndata>(self.current+1)
def update(self, node, before, after):
if not self.skeldiff:
self.skeldiff = SkeletonDiff()
self.skeldiff.addChanges(node, before, after)
def finish(self):
self.append()
self.skeldiff = None
def append(self):
if self.ndata!=(self.current+1):
del self._data[(self.current+1):]
self._data.append(self.skeldiff)
self.ndata = len(self._data)
self.current += 1
|
py | 1a45326720b16c2af69d18465b3b98b47482317d | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-01-18 15:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0032_auto_20190118_1720'),
]
operations = [
# migrations.RenameField(
# model_name='dataset',
# old_name='spatiaNorth',
# new_name='spatialNorth',
# ),
]
|
py | 1a4532e38e4d1c389eb8f3b0becfc414b07588d0 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
#
#from
# class Followers(models.Model):
# uuid=models.UUIDField(primary_key=True)
#
# class Users(models.Model):
# uuid=models.UUIDField(primary_key=True)
# username=models.CharField(max_length=20,unique=True)
# email=models.EmailField()
# password=models.CharField(max_length=20)
# followers=models.ManyToManyField(Followers)
#
# class Meta:
# db_table='users'
#
class Uploads(models.Model):
images=models.ImageField(blank=True)
videos=models.FileField(blank=True)
class Profile(models.Model):
user=models.OneToOneField(User,related_name='user',on_delete=models.CASCADE)
follows=models.ManyToManyField(User,related_name='followers',symmetrical=False,blank=True,null=True)#symmetrical false means if user 1 is following user 2, then viceversa will not be automatically true unless user 2 follows user 1
uploads=models.ManyToManyField(Uploads,related_name='uploads',blank=True)
profile_pic = models.ImageField(upload_to='ProfilePicture/',default='/pics/GURMUKH DECORATIVE.jpg')
def __str__(self):
return f'{self.user.username} Profile'
|
py | 1a453329f9032d47e1a15ea0c07532719a1876c8 | """Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 5
animation_ttl = range(0, 10)
input_str = event.pattern_match.group(1)
if input_str == "/call":
await event.edit(input_str)
animation_chars = [
"`Connecting To Telegram Headquarters...`",
"`User Authorised.`",
"`Private VOIP Call Connected...`",
"`Me Calling Pavel Durov Shukla....`",
"`Me: Hello Sir, Please Ban This Guys Telegram Account.`",
"`Durov: May I Know Who Is This?`",
"`Me: Yo Brah, I Am` @r4v4n4",
"`Durov: OMG!!! I Am FAN Of You Sir...\nI'll Make Sure That Guy Account Will Get Blocked Within 24Hrs.`",
"`Me: See You Later Brah.`",
"`Private VOIP Call Disconnected.`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
|
py | 1a4533f7238ae3a3fd3e975f66dbcb9ef9144a80 | """
Memento is a behavioral design pattern that
allows making snapshots of an object's state
and restoring it in future.
The Memento doesn't compromise the internal structure
of the object it works with, as well as data kept
inside the snapshots
"""
import string
import random
from datetime import datetime
from typing import List
class MementoInterface:
"""
The Memento interface provides a way to retrieve the memento's metadata,
such as creation date or name. However, it doesn't expose the Originator's state.
"""
def get_name(self) -> str: raise NotImplementedError()
def get_date(self) -> datetime: raise NotImplementedError()
class ConcreteMemento(MementoInterface):
"""
The Concrete Memento contains the infrastructure for storing the Originator's state.
"""
def __init__(self, state: str) -> None:
self._state = state
self._date = datetime.now()
def get_state(self) -> str:
""" The Originator uses this method when restoring its state. """
return self._state
# The rest of the methods are used by caretaker to display metadata.
def get_name(self) -> str:
return f"{self._date}/{self._state[:9]}..."
def get_date(self) -> datetime:
return self._date
class Originator:
"""
The Originator holds some important state that may change over time.
It also defines a method for saving the state inside a memento and
another method for restoring the state from it.
"""
def __init__(self, state: str) -> None:
self._state = state
print(f"Originator: My initial state is: {self._state}")
def do_something(self) -> None:
print("Originator: I'm doing something important.")
self._state = self.generate_random_string()
print(f"Originator: And my state has changed to {self._state}")
def generate_random_string(self) -> str:
random_string: str = ''.join(random.choices(string.ascii_uppercase + string.digits, k=30))
return random_string
def save(self) -> MementoInterface:
""" Saves the current state inside a memento. """
return ConcreteMemento(self._state)
def restore(self, memento: MementoInterface) -> None:
""" Restores the Originator's state from a memento object. """
self._state = memento.get_state()
print(f"Originator: My state has changed to: {self._state}")
class Caretaker:
"""
The Caretaker doesn't depend on the Concrete Memento class.
Therefore, it doesn't have access to the originator's state,
stored inside the memento. It works with all mementos via
the base Memento interface.
"""
def __init__(self, originator: Originator) -> None:
self._originator = originator
self._mementos: List[MementoInterface] = []
def backup(self) -> None:
print("Caretaker: Saving Originator's state...")
self._mementos.append(self._originator.save())
def undo(self) -> None:
if not self._mementos:
return
_memento = self._mementos.pop()
print(f"Caretaker: Restoring state to: {_memento.get_name()}")
self._originator.restore(_memento)
def show_history(self) -> None:
print("Caretaker: Here's the list of mementos:")
for _memento in self._mementos:
print(_memento.get_name())
class Demo:
def run(self) -> None:
originator: Originator = Originator("Super-duper-super-puper-super.")
caretaker: Caretaker = Caretaker(originator)
caretaker.backup()
originator.do_something()
caretaker.backup()
originator.do_something()
caretaker.backup()
originator.do_something()
print("Show history")
caretaker.show_history()
print("Client: Now, let's rollback!")
caretaker.undo()
print("Client: Once more!")
caretaker.undo()
demo: Demo = Demo()
demo.run()
|
py | 1a4535d2888c58bef15dc4ed981b15c50c30464e | #!/usr/bin/env python
"""
Determines the frequencies of residue pair contacts in molecular
dynamics simulations. Given one or more MDContact outputs, this
script determines the frequency of each unique interaction of the
form (itype, residue 1, residue2), weighted by number of frames,
across all inputs.
The inputs are one or more MDContact output file paths as well as an
output path. The user may also specify a subset of interaction types
to compute frequencies for. The user may additionally provide a label
file to convert residue labellings (typically for the use of aligning
sequences for performing frequency comparisons with other
trajectories).
The output is a single tsv file with each row indicating residue
id 1, residue id 2, and contact frequency.
"""
from __future__ import division
from collections import defaultdict
import sys
import argparse
def atomid_to_resid(atom):
return atom[0:atom.rfind(":")]
# return ':'.join(atom.split(':')[1:3])
def gen_counts(input_lines, interaction_types, residuelabels=None):
"""
Parse each line in `input_lines` as a line from MDContacts and return interaction-counts for each residue pair. If
`residuelabels` is defined it is used to modify residue identifiers and to filter out residues not indicated.
For example:
inputs = [
"# total_frames: 3",
"\t".join(["0", "hbbb", "A:ALA:1:N", "A:ARG:4:O"]),
"\t".join(["0", "vdw", "A:ALA:1:CB", "A:ARG:4:CA"]),
"\t".join(["1", "vdw", "A:ALA:1:N", "A:CYS:5:CA"]),
"\t".join(["2", "hbbb", "A:THR:2:N", "A:CYS:5:O"]),
"\t".join(["2", "hbss", "A:ALA:1:N", "A:CYS:5:O"])
]
labels = {"A:ALA:1": "A1", "A:ARG:4": "R4", "A:CYS:5": "C5"}
# Only consider hbbb and vdw, filter away THR, and map to single-letter labels
gen_counts(inputs, ["hbbb", "vdw"], labels)
# Returns: { ("A1", "R4"): 1, ("A1", "C5"): 1 }
Parameters
----------
input_lines: Iterable[str]
Interactions formatted as MDContacts output, e.g. ["0\thbbb\tA:ALA:1:N\tA:ARG:4:H", ...]
interaction_types: list of str
Which interaction types to consider
residuelabels: dict of (str: str)
Remaps and filters residuelabels, e.g. {"A:ARG:4": "R4"}
Returns
-------
(int, dict of (str, str): int)
Total frame-count and mapping of residue-residue interactions to frame-count
"""
# Maps residue pairs to set of frames in which they're present
rescontact_frames = defaultdict(set)
total_frames = 0
for line in input_lines:
line = line.strip()
if "total_frames" in line:
tokens = line.split(" ")
total_frames = int(tokens[1][tokens[1].find(":")+1:])
if len(line) == 0 or line[0] == "#":
continue
tokens = line.split("\t")
# Check that the interaction type is specified
itype = tokens[1]
if itype not in interaction_types:
continue
frame = int(tokens[0])
if frame + 1 > total_frames:
total_frames = frame + 1
res1 = atomid_to_resid(tokens[2])
res2 = atomid_to_resid(tokens[3])
# Change residue id according to `residuelabels` or skip if any of the residues are not present
if residuelabels is not None:
if res1 not in residuelabels or res2 not in residuelabels:
continue
res1 = residuelabels[res1]
res2 = residuelabels[res2]
# Ensure lexicographical order of residue names
if res2 < res1:
res1, res2 = res2, res1
rescontact_frames[(res1, res2)].add(frame)
# Insted of returning list of frames for each interaction, only return number of frames
rescontact_counts = {(res1, res2): len(frames) for (res1, res2), frames in rescontact_frames.items()}
return total_frames, rescontact_counts
def parse_labelfile(label_file):
"""
Parses a label-file and returns a dictionary with the residue label mappings. Unless prepended with a comment-
indicator (#), each line is assumed to have a valid residue identifier (e.g. "A:ALA:1") and a label which the
residue should be mapped to (e.g. "A1").
Example:
parse_labelfile(["A:ALA:1\tA1")
# Returns {"A:ALA:1": "A1"}
Parameters
----------
label_file: Iterable[str]
Lines with tab-separated residue identifier and label
Returns
-------
dict of str: str
Mapping from residue-id in contact-file to label of any format
"""
ret = {}
for line in label_file:
line = line.strip()
# Ignore line if empty or comment
if line[0] == "#" or len(line) == 0:
continue
tokens = line.split("\t")
ret[tokens[0]] = tokens[1]
return ret
def gen_frequencies(count_list):
"""
Take a list of residue contact counts (see output of `gen_counts`) and compute total counts and frequencies.
Example:
clist = [
(4, {("A1", "R4"): 4, ("A1", "C5"): 3}), # First simulation has 4 frames and two contacts
(3, {("A1", "R4"): 2}) # Second simulation has 3 frames and one contact
]
gen_frequencies(clist)
# Returns: (7, {("A1", "R4"): (6, 0.857), ("A1", "C5"): (3, 0.429)})
Parameters
----------
count_list: list of (int, dict of (str, str): int)
List with individual frame counts and dictionaries mapping residue pairs to frame-counts
Return
------
(int, dict of (str, str): (int, float))
Total framecount and mapping of residue ID pairs to the number of frames in which they contact and the frequency
"""
rescontact_count = defaultdict(int)
total_frames = 0
for frames, rescount_dict in count_list:
total_frames += frames
for (res1, res2), count in rescount_dict.items():
rescontact_count[(res1, res2)] += count
respair_freqs = {respair: (count, float(count) / total_frames) for respair, count in rescontact_count.items()}
return total_frames, respair_freqs
def main():
# Parse command line arguments
class MyParser(argparse.ArgumentParser):
def error(self, message):
# Prints full program help when error occurs
self.print_help(sys.stderr)
sys.stderr.write('\nError: %s\n' % message)
sys.exit(2)
parser = MyParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--input_files',
type=argparse.FileType('r'),
required=True,
nargs='+',
metavar='FILE.tsv',
help="Path to one or more contact-file outputs")
parser.add_argument('--label_file',
type=argparse.FileType('r'),
required=False,
metavar='FILE.tsv',
help="A label file for standardizing residue names between different proteins")
parser.add_argument('--output_file',
type=argparse.FileType('w'),
required=True,
metavar='FILE.tsv',
help="Path to output file")
parser.add_argument('--itypes',
required=False,
default="all",
type=str,
nargs="+",
metavar="ITYPE",
help='Include only these interaction types in frequency computation. Valid choices are: \n'
'* all (default), \n'
'* sb (salt-bridges), \n'
'* pc (pi-cation), \n'
'* ps (pi-stacking), \n'
'* ts (t-stacking), \n'
'* vdw (van der Waals), \n'
'* hbbb, hbsb, hbss, (hydrogen bonds with specific backbone/side-chain profile)\n'
'* wb, wb2 (water-bridges and extended water-bridges) \n'
'* hls, hlb (ligand-sidechain and ligand-backbone hydrogen bonds), \n'
'* lwb, lwb2 (ligand water-bridges and extended water-bridges)')
# results, unknown = parser.parse_known_args()
args = parser.parse_args()
# Update itypes if "all" is specified
if "all" in args.itypes:
args.itypes = ["sb", "pc", "ps", "ts", "vdw", "hb", "lhb", "hbbb", "hbsb",
"hbss", "wb", "wb2", "hls", "hlb", "lwb", "lwb2"]
output_file = args.output_file
input_files = args.input_files
itypes = args.itypes
labels = parse_labelfile(args.label_file) if args.label_file else None
counts = [gen_counts(input_file, itypes, labels) for input_file in input_files]
total_frames, frequencies = gen_frequencies(counts)
output_file.write('#\ttotal_frames:%d\tinteraction_types:%s\n' % (total_frames, ','.join(itypes)))
output_file.write('#\tColumns:\tresidue_1,\tresidue_2\tframe_count\tcontact_frequency\n')
for (res1, res2), (count, frequency) in frequencies.items():
output_file.write('\t'.join([res1, res2, "%.3f" % frequency]) + "\n")
if __name__ == '__main__':
main()
|
py | 1a453652f9b128945d1ad0f31ef707e99d495aa0 | """
Makes a chromosome or plasmid item
Example mouse chromosome 5
https://www.wikidata.org/wiki/Q15304656
Example yeast chromosome XII
https://www.wikidata.org/wiki/Q27525657
"""
import os
from datetime import datetime
from io import StringIO
from urllib import request
import pandas as pd
from scheduled_bots import get_default_core_props
from wikidataintegrator import wdi_core, wdi_helpers
core_props = get_default_core_props()
class ChromosomeBot:
chr_type_map = {'Chromosome': 'Q37748',
'Mitochondrion': 'Q18694495',
'Chloroplast': 'Q22329079'}
def __init__(self):
self.retrieved = None
self.login = None
self.ass_sum = None
self.chr_df = dict()
def get_assembly_summaries(self):
table = request.urlopen(request.Request('ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/assembly_summary_refseq.txt')).read().decode()
names = table.split("\n")[1].replace("# ", "").split("\t")
self.ass_sum = pd.read_csv(StringIO(table), sep="\t", comment="#", names=names, low_memory=False)
def get_assembly_report(self, taxid):
if self.ass_sum is None:
self.get_assembly_summaries()
df = self.ass_sum.query("taxid == {} & refseq_category == 'reference genome'".format(taxid))
if len(df) == 0:
# try "representative genome" (needed for mouse and rat)
df = self.ass_sum.query("taxid == {} & refseq_category == 'representative genome'".format(taxid))
if len(df) != 1:
raise ValueError("unknown reference: {}".format(df))
print(df)
ftp_path = list(df.ftp_path)[0]
assembly = os.path.split(ftp_path)[1]
url = os.path.join(ftp_path, assembly + "_assembly_report.txt")
print(url)
# read the column names from the file
table = request.urlopen(request.Request(url)).read().decode()
names = [x for x in table.split("\n") if x.startswith("#")][-1].strip().replace("# ", "").split("\t")
self.chr_df[taxid] = pd.read_csv(StringIO(table), sep="\t", names=names, comment='#')
self.chr_df[taxid] = self.chr_df[taxid].rename(columns={'Sequence-Name': 'SequenceName', 'Sequence-Role': 'SequenceRole',
'Assigned-Molecule': 'AssignedMolecule',
'Assigned-Molecule-Location/Type': 'AssignedMoleculeLocationType',
'GenBank-Accn': 'GenBankAccn', 'RefSeq-Accn': 'RefSeqAccn',
'UCSC-style-name': 'UCSCstylename'})
#print(self.chr_df[taxid].query("SequenceRole == 'assembled-molecule'"))
def get_chrom_info(self, chr_name, taxid):
""" result looks like:
{'Assembly-Unit': 'C57BL/6J',
'Assigned-Molecule': '1',
'Assigned-Molecule-Location/Type': 'Chromosome',
'GenBank-Accn': 'CM000994.2',
'RefSeq-Accn': 'NC_000067.6',
'Relationship': '=',
'Sequence-Length': 195471971,
'Sequence-Name': '1',
'Sequence-Role': 'assembled-molecule',
'UCSC-style-name': 'chr1'}
"""
if taxid not in self.chr_df:
self.get_assembly_report(taxid)
df = self.chr_df[taxid].query("SequenceRole == 'assembled-molecule'")
d_list = df[(df.SequenceName == chr_name) | (df.AssignedMolecule == chr_name) | (df.UCSCstylename == chr_name)].to_dict('records')
if len(d_list) == 1:
return d_list[0]
def get_or_create(self, organism_info, retrieved=None, login=None):
"""
Make sure all chromosome items exist
return a map of chr num to wdid. looks like:
{'1': 'Q28114580', '2': 'Q28114581', ..., 'MT': 'Q28114585'}
:param organism_info: {'name': name, 'taxid': taxid, 'wdid': wdid, 'type': type}
:type organism_info: dict
:param retrieved: for reference statement
:type retrieved: datetime
:param login:
:return:
"""
self.login = login
self.retrieved = datetime.now() if retrieved is None else retrieved
taxid = int(organism_info['taxid'])
if taxid not in self.chr_df:
self.get_assembly_report(taxid)
# map of chr_num to wdid for this taxon ("1" -> "Q1234")
chr_num_wdid = dict()
# get assembled chromosomes, which we will create items for
chrdf = self.chr_df[taxid][self.chr_df[taxid]['SequenceRole'] == 'assembled-molecule']
existing_chr = wdi_helpers.id_mapper("P2249")
existing_chr = {k.split(".")[0]: v for k, v in existing_chr.items()}
for record in chrdf.to_dict("records"):
chrom_num = record['SequenceName']
# if a field has "chr" in it, remove it
chrom_num = chrom_num.replace("chr", "").replace("Chr", "").replace("CHR", "")
genome_id = record['RefSeqAccn']
genome_id = genome_id.split(".")[0]
chr_type = record['AssignedMoleculeLocationType']
# {'Chromosome','Mitochondrion'}
# chrom_type = record['Assigned-Molecule-Location/Type']
if genome_id in existing_chr:
chr_num_wdid[chrom_num] = existing_chr[genome_id]
else:
# chromosome doesn't exist in wikidata. create it
print("chromosome being created: {}, {}".format(chrom_num, genome_id))
chr_num_wdid[chrom_num] = self.create_chrom(organism_info, chrom_num, genome_id, chr_type, login)
return chr_num_wdid
def create_chrom(self, organism_info, chrom_num, genome_id, chr_type, login):
def make_ref(retrieved, genome_id):
"""
Create reference statement for chromosomes
:param retrieved: datetime
:type retrieved: datetime
:param genome_id: refseq genome id
:type genome_id: str
:return:
"""
refs = [
wdi_core.WDItemID(value='Q20641742', prop_nr='P248', is_reference=True), # stated in ncbi gene
wdi_core.WDString(value=genome_id, prop_nr='P2249', is_reference=True), # Link to Refseq Genome ID
wdi_core.WDTime(retrieved.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True)
]
return refs
item_name = '{} chromosome {}'.format(organism_info['name'], chrom_num)
item_description = '{} chromosome'.format(organism_info['type']) if organism_info['type'] else "chromosome"
print(genome_id)
reference = make_ref(self.retrieved, genome_id)
# instance of chr_type
if chr_type not in ChromosomeBot.chr_type_map:
raise ValueError("unknown chromosome type: {}".format(chr_type))
statements = [wdi_core.WDItemID(value=ChromosomeBot.chr_type_map[chr_type], prop_nr='P31', references=[reference])]
# found in taxon
statements.append(wdi_core.WDItemID(value=organism_info['wdid'], prop_nr='P703', references=[reference]))
# genome id
statements.append(wdi_core.WDString(value=genome_id, prop_nr='P2249', references=[reference]))
wd_item = wdi_core.WDItemEngine(data=statements,
append_value=['P31'], fast_run=True,
fast_run_base_filter={'P703': organism_info['wdid'], 'P2249': ''},
core_props=core_props)
if wd_item.wd_item_id:
return wd_item.wd_item_id
wd_item.set_label(item_name)
wd_item.set_description(item_description, lang='en')
wdi_helpers.try_write(wd_item, genome_id, 'P2249', login)
return wd_item.wd_item_id
|
py | 1a4536f138361cf3a7c104e25ac606a63386bc9c | import math
# ZTest
def startZTest(populationAverage, sampleAverage, populationStrdDeviation, sampleSize):
standardError = populationStrdDeviation / math.sqrt(sampleSize)
observedValue = (sampleAverage - populationAverage) / standardError
print("ZTest: " + str(observedValue))
return observedValue
# Euclidean Distance
def startED(p, q, length):
sum = 0.0
for i in range(0, length):
sum = math.pow(p[i] - q[i], 2) + sum
euclideanDistance = math.sqrt(sum)
print("ED: " + str(euclideanDistance))
return euclideanDistance
# Piecewise Aggregate Approximation
def startPAA(pieceCount, dataList):
count = 0
remainderCount = 1
sum = 0.0
i = 0
interval = len(dataList) / pieceCount
remainder = len(dataList) % pieceCount
paaList = [0 for _ in range(pieceCount)]
for data in dataList:
sum = sum + float(data)
count = count + 1
if remainderCount <= remainder:
if count == (interval + 1):
average = sum / interval + 1
paaList[i] = average
remainderCount = remainderCount + 1
i = i + 1
sum = 0.0
count = 0
else:
if count == interval:
average = sum / interval
paaList[i] = average
i = i + 1
sum = 0.0
count = 0
return paaList
# Jensen Shannon Divergence
def startJSD(p, q):
middle = [0 for _ in range(len(p))]
for i in range(0, len(p)):
middle[i] = (p[i] + q[i]) / 2
divergence = (startKLD(p, middle) + startKLD(q, middle)) / 2
print("JSD: " + str(divergence))
return divergence
# Kullback Leibler Divergence
def startKLD(p, q):
divergence = 0.0
for i in range(0, len(p)):
tmp = 0.0
if p[i] != 0.0:
tmp = p[i] * (math.log10(p[i]) - math.log10(q[i]))
divergence = divergence + tmp
return divergence
|
py | 1a4538040f42d6ad7a28e63eb2362649d15106ee | # coding: utf8
from __future__ import unicode_literals
SPACY_MODELS = {}
VECTORS = {}
def get_spacy(lang, **kwargs):
global SPACY_MODELS
import spacy
if lang not in SPACY_MODELS:
SPACY_MODELS[lang] = spacy.load(lang, **kwargs)
return SPACY_MODELS[lang]
def register_vectors(ops, lang, data):
key = (ops.device, lang)
VECTORS[key] = data
def get_vectors(ops, lang):
global VECTORS
key = (ops.device, lang)
if key not in VECTORS:
nlp = get_spacy(lang)
VECTORS[key] = nlp.vocab.vectors.data
return VECTORS[key]
|
py | 1a453882a53ffb4bfd50a75c4b2d096d2bf138ec | # Generated by Django 4.0 on 2021-12-25 02:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Jobs',
fields=[
('id', models.CharField(max_length=128, primary_key=True, serialize=False)),
('name', models.CharField(max_length=128)),
('company', models.CharField(max_length=200)),
('salary', models.CharField(max_length=64)),
('requires', models.TextField()),
('issue', models.DateTimeField(null=True)),
('education', models.CharField(max_length=64)),
('position', models.CharField(max_length=20)),
('platform', models.CharField(max_length=20)),
('get_data', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Label',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=64)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='job.jobs')),
],
),
]
|
py | 1a4538862fe360f93aaa86539980825befbb7a34 | #!/usr/bin/python3
# Adapted from https://github.com/openai/mujoco-py/blob/master/vendor/Xdummy-entrypoint
# Copyright OpenAI; MIT License
import argparse
import os
import sys
import subprocess
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args, extra_args = parser.parse_known_args()
subprocess.Popen(
[
"nohup",
"Xorg",
"-noreset",
"+extension",
"GLX",
"+extension",
"RANDR",
"+extension",
"RENDER",
"-logfile",
"/tmp/xdummy.log",
"-config",
"/etc/dummy_xorg.conf",
":0",
]
)
subprocess.Popen(
["nohup", "Xdummy"],
stdout=open("/dev/null", "w"),
stderr=open("/dev/null", "w"),
)
os.environ["DISPLAY"] = ":0"
if not extra_args:
argv = ["/bin/bash"]
else:
argv = extra_args
# Explicitly flush right before the exec since otherwise things might get
# lost in Python's buffers around stdout/stderr (!).
sys.stdout.flush()
sys.stderr.flush()
os.execvpe(argv[0], argv, os.environ)
|
py | 1a4538cf74176cda8675dd6922f843e6052330b8 | import click
import pytest
from click_rich_help import StyledCommand, StyledGroup
from rich.theme import Theme
def test_styles(runner):
@click.group(cls=StyledGroup, headers_style="yellow", options_style="green")
def cli():
pass
@cli.command(cls=StyledCommand, styles={"headers": "red", "options": "blue"})
@click.option("--name", help="The person to greet.")
def command(name):
pass
result = runner.invoke(cli, ["command", "--help"], color=True)
assert not result.exception
assert result.output.splitlines() == [
"\x1b[31mUsage\x1b[0m: cli command [OPTIONS]",
"",
"\x1b[31mOptions\x1b[0m:",
" \x1b[34m--name \x1b[0m\x1b[34mTEXT\x1b[0m The person to greet.",
" \x1b[34m--help\x1b[0m Show this message and exit.",
]
def test_theme(runner):
@click.group(
cls=StyledGroup,
headers_style="red",
options_style="green",
options_custom_styles={"command1": "red"},
styles={'headers':'bold'},
theme=Theme({"headers": "yellow"}),
)
def cli():
pass
@cli.command()
def command1(name):
pass
@cli.command()
def command2(name):
pass
result = runner.invoke(cli, ["--help"], color=True)
assert not result.exception
assert result.output.splitlines() == [
"\x1b[33mUsage\x1b[0m: cli [OPTIONS] COMMAND [ARGS]...",
"",
"\x1b[33mOptions\x1b[0m:",
" \x1b[32m--help\x1b[0m Show this message and exit.",
"",
"\x1b[33mCommands\x1b[0m:",
" \x1b[31mcommand1\x1b[0m",
" \x1b[32mcommand2\x1b[0m",
]
|
py | 1a4539231036b2e649619161b88cf827fdfdd890 | from __future__ import division
import math
import time
import matplotlib.pyplot as plt
from itertools import count
from os.path import join, exists
from os import makedirs
from IPython.display import clear_output, display, HTML
def simulate(simulation,
controller= None,
fps=60,
visualize_every=1,
action_every=1,
simulation_resolution=None,
wait=False,
disable_training=False,
save_path=None):
"""Start the simulation. Performs three tasks
- visualizes simulation in iPython notebook
- advances simulator state
- reports state to controller and chooses actions
to be performed.
Parameters
-------
simulation: tr_lr.simulation
simulation that will be simulated ;-)
controller: tr_lr.controller
controller used
fps: int
frames per seconds
visualize_every: int
visualize every `visualize_every`-th frame.
action_every: int
take action every `action_every`-th frame
simulation_resolution: float
simulate at most 'simulation_resolution' seconds at a time.
If None, the it is set to 1/FPS (default).
wait: boolean
whether to intentionally slow down the simulation
to appear real time.
disable_training: bool
if true training_step is never called.
save_path: str
save svg visualization (only tl_rl.utils.svg
supported for the moment)
"""
# prepare path to save simulation images
if save_path is not None:
if not exists(save_path):
makedirs(save_path)
last_image = 0
# calculate simulation times
chunks_per_frame = 1
chunk_length_s = 1.0 / fps
if simulation_resolution is not None:
frame_length_s = 1.0 / fps
chunks_per_frame = int(math.ceil(frame_length_s / simulation_resolution))
chunks_per_frame = max(chunks_per_frame, 1)
chunk_length_s = frame_length_s / chunks_per_frame
# state transition bookkeeping
last_observation = None
last_action = None
simulation_started_time = time.time()
# setup rendering handles for reuse
if hasattr(simulation, 'setup_draw'):
simulation.setup_draw()
for frame_no in count():
for _ in range(chunks_per_frame):
simulation.step(chunk_length_s)
if frame_no % action_every == 0:
new_observation = simulation.observe()
reward = simulation.collect_reward()
# store last transition
if last_observation is not None:
controller.store(last_observation, last_action, reward, new_observation)
# act
new_action = controller.action(new_observation)
simulation.perform_action(new_action)
#train
if not disable_training:
controller.training_step()
# update current state as last state.
last_action = new_action
last_observation = new_observation
# adding 1 to make it less likely to happen at the same time as
# action taking.
if (frame_no + 1) % visualize_every == 0:
fps_estimate = frame_no / (time.time() - simulation_started_time)
# draw simulated environment all the rendering is handled within the simulation object
stats = ["fps = %.1f" % (fps_estimate, )]
if hasattr(simulation, 'draw'): # render with the draw function
simulation.draw(stats)
elif hasattr(simulation, 'to_html'): # in case some class only support svg rendering
clear_output(wait=True)
svg_html = simulation.to_html(stats)
display(svg_html)
if save_path is not None:
img_path = join(save_path, "%d.svg" % (last_image,))
with open(img_path, "w") as f:
svg_html.write_svg(f)
last_image += 1
time_should_have_passed = frame_no / fps
time_passed = (time.time() - simulation_started_time)
if wait and (time_should_have_passed > time_passed):
time.sleep(time_should_have_passed - time_passed)
|
py | 1a4539cacf94bc201cbcb0813b86248e9cc8970b | from panda3d.core import *
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
from pirates.util.PythonUtil import reduceAngle, fitSrcAngle2Dest
from pirates.util.PythonUtilPOD import clampScalar, getSetter, ParamObj
from direct.task import Task
from otp.otpbase import OTPGlobals
from pirates.pirate import CameraMode
from pirates.piratesbase import PiratesGlobals
import math
class OrbitCamera(CameraMode.CameraMode, NodePath, ParamObj):
notify = DirectNotifyGlobal.directNotify.newCategory('OrbitCamera')
class ParamSet(ParamObj.ParamSet):
Params = {
'lookAtOffset': Vec3(0, 0, 0),
'escapement': 10.0,
'rotation': 0.0,
'fadeGeom': False,
'idealDistance': 25.0,
'minDistance': 3.0,
'maxDistance': 40.0,
'minEsc': -20.0,
'maxEsc': 25.0,
'minDomeEsc': 0.0,
'maxCamtiltEsc': 0.0,
'autoFaceForward': True,
'autoFaceForwardMaxDur': 14.0 }
UpdateTaskName = 'OrbitCamUpdateTask'
CollisionCheckTaskName = 'OrbitCamCollisionTask'
GeomFadeLerpDur = 1.0
PullFwdDist = 2.0
def __init__(self, subject, params = None):
ParamObj.__init__(self)
NodePath.__init__(self, self._getTopNodeName())
CameraMode.CameraMode.__init__(self)
self.setSubject(subject)
self.lookAtNode = NodePath('orbitCamLookAt')
self.escapementNode = self.attachNewNode('orbitCamEscapement')
self.camParent = self.escapementNode.attachNewNode('orbitCamParent')
self._paramStack = []
if params is None:
self.setDefaultParams()
else:
params.applyTo(self)
self._isAtRear = True
self._rotateToRearIval = None
self._lockAtRear = False
def destroy(self):
self._paramStack = None
self.escapemntNode = None
self.camParent = None
self.lookAtNode.removeNode()
del self.subject
CameraMode.CameraMode.destroy(self)
NodePath.removeNode(self)
ParamObj.destroy(self)
def getName(self):
return 'Orbit'
def _getTopNodeName(self):
return 'OrbitCam'
def setSubject(self, subject = None):
self.subject = subject
def getSubject(self):
return self.subject
def pushParams(self):
self._paramStack.append(self.ParamSet(self))
def popParams(self):
curParams = self.ParamSet(self)
if len(self._paramStack):
self._paramStack.pop().applyTo(self)
else:
OrbitCamera.notify.warning('param stack underflow')
return curParams
def getLookAtOffset(self):
return self.lookAtOffset
def setLookAtOffset(self, lookAtOffset):
self.lookAtOffset = Vec3(lookAtOffset)
def applyLookAtOffset(self):
if self.isActive():
self.lookAtNode.setPos(self.lookAtOffset)
self.setFluidPos(render, self.lookAtNode.getPos(render))
camera.lookAt(self.lookAtNode)
def getEscapement(self):
return self.escapement
def setEscapement(self, escapement):
self.escapement = escapement
def applyEscapement(self):
if self.isActive():
if self.escapement >= self._minDomeEsc:
domeEsc = self.escapement
camEsc = 0.0
elif self.escapement <= self._maxCamtiltEsc:
domeEsc = self._minDomeEsc
camEsc = self._maxCamtiltEsc - self.escapement
else:
domeEsc = self._minDomeEsc
camEsc = 0.0
self.escapementNode.setP(-domeEsc)
self.camParent.setP(camEsc)
def _lerpEscapement(self, escapement, duration = None):
curEsc = self.getEscapement()
escapement = clampScalar(escapement, self._minEsc, self._maxEsc)
if duration is None:
diff = abs(curEsc - escapement)
speed = (max(curEsc, self._maxEsc) - min(curEsc, self._minEsc)) * 0.025000000000000001
duration = diff / speed
self._stopEscapementLerp()
self._escLerpIval = LerpFunctionInterval(self.setEscapement, fromData = curEsc, toData = escapement, duration = duration, blendType = 'easeOut', name = 'OrbitCamera.escapementLerp')
self._escLerpIval.start()
def _stopEscapementLerp(self):
if self._escLerpIval is not None and self._escLerpIval.isPlaying():
self._escLerpIval.pause()
self._escLerpIval = None
def getRotation(self):
return self.getH(self.subject)
def setRotation(self, rotation):
self._rotation = rotation
if self.subject:
self.setH(self.subject, rotation)
def getFadeGeom(self):
return self._fadeGeom
def setFadeGeom(self, fadeGeom):
self._fadeGeom = fadeGeom
def applyFadeGeom(self):
if self.isActive():
if not (self._fadeGeom) and self.getPriorValue():
if hasattr(self, '_hiddenGeoms'):
for np in self._hiddenGeoms.keys():
self._unfadeGeom(np)
self._hiddenGeoms = { }
def getIdealDistance(self):
return self.idealDistance
def setIdealDistance(self, idealDistance):
self.idealDistance = idealDistance
def applyIdealDistance(self):
if self.isActive():
self.idealDistance = clampScalar(self.idealDistance, self._minDistance, self._maxDistance)
if self._practicalDistance is None:
self._zoomToDistance(self.idealDistance)
def popToIdealDistance(self):
self._setCurDistance(self.idealDistance)
def setPracticalDistance(self, practicalDistance):
if practicalDistance is not None and practicalDistance > self.idealDistance:
practicalDistance = None
if self._practicalDistance is None:
if practicalDistance is None:
return None
self._stopZoomIval()
self._setCurDistance(practicalDistance)
else:
self._stopZoomIval()
if practicalDistance is None:
self._zoomToDistance(self.idealDistance)
else:
self._setCurDistance(practicalDistance)
self._practicalDistance = practicalDistance
def getMinDistance(self):
return self._minDistance
def setMinDistance(self, minDistance):
self._minDistance = minDistance
def applyMinDistance(self):
if self.isActive():
self.setIdealDistance(self.idealDistance)
def getMaxDistance(self):
return self._maxDistance
def setMaxDistance(self, maxDistance):
self._maxDistance = maxDistance
def applyMaxDistance(self):
if self.isActive():
self.setIdealDistance(self.idealDistance)
if hasattr(self, '_collSolid'):
self._collSolid.setPointB(0, -(self._maxDistance + OrbitCamera.PullFwdDist), 0)
def getMinEsc(self):
return self._minEsc
def getMaxEsc(self):
return self._maxEsc
def getMinDomeEsc(self):
return self._minDomeEsc
def getMaxCamtiltEsc(self):
return self._maxCamtiltEsc
def setMinEsc(self, minEsc):
self._minEsc = minEsc
def setMaxEsc(self, maxEsc):
self._maxEsc = maxEsc
def setMinDomeEsc(self, minDomeEsc):
self._minDomeEsc = minDomeEsc
def setMaxCamtiltEsc(self, maxCamtiltEsc):
self._maxCamtiltEsc = maxCamtiltEsc
def enterActive(self):
CameraMode.CameraMode.enterActive(self)
self.reparentTo(render)
self.clearTransform()
self.setH(self.subject, self._rotation)
self.setP(0)
self.setR(0)
self.camParent.clearTransform()
camera.reparentTo(self.camParent)
camera.clearTransform()
base.camNode.setLodCenter(self.subject)
if base.wantEnviroDR:
base.enviroCamNode.setLodCenter(self.subject)
self.lookAtNode.reparentTo(self.subject)
self.lookAtNode.clearTransform()
self.lookAtNode.setPos(self.lookAtOffset)
self.setFluidPos(render, self.lookAtNode.getPos(render))
self.escapementNode.setP(-(self.escapement))
self._setCurDistance(self.idealDistance)
camera.lookAt(self.lookAtNode)
self._disableRotateToRear()
self._isAtRear = True
self._rotateToRearIval = None
self._lockAtRear = False
self._zoomIval = None
self._escLerpIval = None
self._practicalDistance = None
self._startUpdateTask()
self._startCollisionCheck()
def exitActive(self):
taskMgr.remove(OrbitCamera.UpdateTaskName)
self.ignoreAll()
self._stopZoomIval()
self._stopEscapementLerp()
self._stopRotateToRearIval()
self._stopCollisionCheck()
self._stopUpdateTask()
self.lookAtNode.detachNode()
self.detachNode()
base.camNode.setLodCenter(NodePath())
if base.wantEnviroDR:
base.enviroCamNode.setLodCenter(NodePath())
CameraMode.CameraMode.exitActive(self)
def _startUpdateTask(self):
self.lastSubjectH = self.subject.getH(render)
taskMgr.add(self._updateTask, OrbitCamera.UpdateTaskName, priority = 40)
self._updateTask()
def _updateTask(self, task = None):
self.setFluidPos(render, self.lookAtNode.getPos(render))
curSubjectH = self.subject.getH(render)
if self._lockAtRear:
self.setRotation(0.0)
elif self._rotateToRearEnabled and self.getAutoFaceForward():
relH = reduceAngle(self.getH(self.subject))
absRelH = abs(relH)
if absRelH < 0.10000000000000001:
self.setRotation(0.0)
self._stopRotateToRearIval()
self._lockAtRear = True
else:
ivalPlaying = self._rotateToRearIvalIsPlaying()
if ivalPlaying and curSubjectH == self.lastSubjectH:
pass
else:
self._stopRotateToRearIval()
duration = self._autoFaceForwardMaxDur * absRelH / 180.0
targetH = curSubjectH
startH = fitSrcAngle2Dest(self.getH(render), targetH)
self._rotateToRearIval = LerpHprInterval(self, duration, Point3(targetH, 0, 0), startHpr = Point3(startH, 0, 0), other = render, blendType = 'easeOut')
self._rotateToRearIval.start()
self.lastSubjectH = curSubjectH
self.setP(0)
self.setR(0)
camera.clearMat()
return Task.cont
def _stopUpdateTask(self):
taskMgr.remove(OrbitCamera.UpdateTaskName)
def setAutoFaceForward(self, autoFaceForward):
if not autoFaceForward:
self._stopRotateToRearIval()
self._autoFaceForward = autoFaceForward
def getAutoFaceForward(self):
return self._autoFaceForward
def setAutoFaceForwardMaxDur(self, autoFaceForwardMaxDur):
self._autoFaceForwardMaxDur = autoFaceForwardMaxDur
def getAutoFaceForwardMaxDur(self):
return self._autoFaceForwardMaxDur
def _enableRotateToRear(self):
self._rotateToRearEnabled = True
def _disableRotateToRear(self):
self._stopRotateToRearIval()
self._rotateToRearEnabled = False
def _rotateToRearIvalIsPlaying(self):
if self._rotateToRearIval is not None:
pass
return self._rotateToRearIval.isPlaying()
def _stopRotateToRearIval(self):
if self._rotateToRearIval is not None and self._rotateToRearIval.isPlaying():
self._rotateToRearIval.pause()
self._rotateToRearIval = None
def _getCurDistance(self):
return -self.camParent.getY()
def _setCurDistance(self, distance):
self.camParent.setY(-distance)
def _zoomToDistance(self, distance):
curDistance = self._getCurDistance()
diff = abs(curDistance - distance)
if diff < 0.01:
self._setCurDistance(distance)
return None
speed = (max(curDistance, self._maxDistance) - min(curDistance, self._minDistance)) * 0.5
duration = diff / speed
self._stopZoomIval()
self._zoomIval = LerpPosInterval(self.camParent, duration, Point3(0, -distance, 0), blendType = 'easeOut', name = 'orbitCamZoom', fluid = 1)
self._zoomIval.start()
def _stopZoomIval(self):
if self._zoomIval is not None and self._zoomIval.isPlaying():
self._zoomIval.pause()
self._zoomIval = None
def _startCollisionCheck(self, shipBarrier = 0):
self._collSolid = CollisionSegment(0, 0, 0, 0, -(self._maxDistance + OrbitCamera.PullFwdDist), 0)
collSolidNode = CollisionNode('OrbitCam.CollSolid')
collSolidNode.addSolid(self._collSolid)
if shipBarrier:
collSolidNode.setFromCollideMask(PiratesGlobals.ShipCameraBarrierBitmask)
else:
collSolidNode.setFromCollideMask(OTPGlobals.CameraBitmask | OTPGlobals.CameraTransparentBitmask | OTPGlobals.FloorBitmask)
collSolidNode.setIntoCollideMask(BitMask32.allOff())
self._collSolidNp = self.escapementNode.attachNewNode(collSolidNode)
self._cHandlerQueue = CollisionHandlerQueue()
self._cTrav = CollisionTraverser('OrbitCam.cTrav')
self._cTrav.addCollider(self._collSolidNp, self._cHandlerQueue)
self._hiddenGeoms = { }
self._fadeOutIvals = { }
self._fadeInIvals = { }
taskMgr.add(self._collisionCheckTask, OrbitCamera.CollisionCheckTaskName, priority = 45)
def _collisionCheckTask(self, task = None):
self._cTrav.traverse(render)
self.cTravOnFloor.traverse(render)
if self._fadeGeom:
nonObstrGeoms = dict(self._hiddenGeoms)
numEntries = self._cHandlerQueue.getNumEntries()
if numEntries > 0:
self._cHandlerQueue.sortEntries()
i = 0
while i < numEntries:
collEntry = self._cHandlerQueue.getEntry(i)
intoNode = collEntry.getIntoNodePath()
cMask = intoNode.node().getIntoCollideMask()
if not (cMask & OTPGlobals.CameraTransparentBitmask).isZero():
if intoNode in nonObstrGeoms:
del nonObstrGeoms[intoNode]
self._fadeGeom(intoNode)
else:
cPoint = collEntry.getSurfacePoint(self.escapementNode)
distance = Vec3(cPoint).length()
self.setPracticalDistance(distance - OrbitCamera.PullFwdDist)
break
i += 1
else:
self.setPracticalDistance(None)
for np in nonObstrGeoms.keys():
self._unfadeGeom(np)
elif self._cHandlerQueue.getNumEntries() > 0:
self._cHandlerQueue.sortEntries()
collEntry = self._cHandlerQueue.getEntry(0)
cPoint = collEntry.getSurfacePoint(self.escapementNode)
distance = Vec3(cPoint).length()
self.setPracticalDistance(distance - OrbitCamera.PullFwdDist)
else:
self.setPracticalDistance(None)
distance = self._getCurDistance()
return Task.cont
def _stopCollisionCheck(self):
while len(self._hiddenGeoms):
self._unfadeGeom(self._hiddenGeoms.keys()[0])
del self._hiddenGeoms
del self._fadeOutIvals
del self._fadeInIvals
taskMgr.remove(OrbitCamera.CollisionCheckTaskName)
self._cTrav.removeCollider(self._collSolidNp)
del self._cHandlerQueue
del self._cTrav
self._collSolidNp.detachNode()
del self._collSolidNp
def _fadeGeom(self, np):
if np in self._fadeInIvals:
self._fadeInIvals[np].finish()
del self._fadeInIvals[np]
if np not in self._hiddenGeoms:
hadTransparency = np.getTransparency()
fadeIval = Sequence(Func(np.setTransparency, 1), LerpColorScaleInterval(np, OrbitCamera.GeomFadeLerpDur, VBase4(1, 1, 1, 0), blendType = 'easeInOut'), name = 'OrbitCamFadeGeomOut')
self._hiddenGeoms[np] = hadTransparency
self._fadeOutIvals[np] = fadeIval
fadeIval.start()
def _unfadeGeom(self, np):
if np in self._hiddenGeoms:
if np in self._fadeOutIvals:
self._fadeOutIvals[np].pause()
del self._fadeOutIvals[np]
fadeIval = Sequence(LerpColorScaleInterval(np, OrbitCamera.GeomFadeLerpDur, VBase4(1, 1, 1, 1), blendType = 'easeInOut'), Func(np.setTransparency, self._hiddenGeoms[np]), name = 'OrbitCamFadeGeomIn')
del self._hiddenGeoms[np]
self._fadeInIvals[np] = fadeIval
fadeIval.start()
|
py | 1a4539d58eb87a3caacea0136907c2a799339f3f | # coding: utf-8
# Copyright 2018 Hiroshi Seki
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import numpy
import pytest
import torch
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.pytorch_backend import e2e_asr
def make_arg(**kwargs):
defaults = dict(
elayers=4,
subsample="1_2_2_1_1",
etype="blstmp",
eunits=100,
eprojs=100,
dtype="lstm",
dlayers=1,
dunits=300,
atype="location",
aconv_chans=10,
aconv_filts=100,
mtlalpha=0.5,
lsm_type="",
lsm_weight=0.0,
sampling_probability=0.0,
adim=320,
dropout_rate=0.0,
dropout_rate_decoder=0.0,
nbest=5,
beam_size=3,
penalty=0.5,
maxlenratio=1.0,
minlenratio=0.0,
ctc_weight=0.2,
ctc_window_margin=0,
verbose=2,
char_list=["a", "i", "u", "e", "o"],
word_list=["<blank>", "<unk>", "ai", "iu", "ue", "eo", "oa", "<eos>"],
outdir=None,
ctc_type="warpctc",
report_cer=False,
report_wer=False,
sym_space="<space>",
sym_blank="<blank>",
context_residual=False,
use_frontend=False,
replace_sos=False,
tgt_lang=False,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
def init_torch_weight_const(m, val):
for p in m.parameters():
p.data.fill_(val)
def init_torch_weight_random(m, rand_range):
for name, p in m.named_parameters():
p.data.uniform_(rand_range[0], rand_range[1])
# set small bias for <blank> output
if "wordlm.lo.bias" in name or "dec.output.bias" in name:
p.data[0] = -10.0
def init_chainer_weight_const(m, val):
for p in m.params():
p.data[:] = val
def make_small_arg(**kwargs):
return make_arg(
elayers=1,
subsample="1_1",
etype="lstm",
eunits=2,
eprojs=2,
dtype="lstm",
dlayers=1,
dunits=2,
atype="dot",
adim=2,
rnnlm="dummy",
lm_weight=0.3,
**kwargs
)
# ctc_weight: 0.0 (attention), 0.5 (hybrid CTC/attention), 1.0 (CTC)
@pytest.mark.parametrize("ctc_weight", [0.0, 0.5, 1.0])
def test_batch_beam_search(ctc_weight):
numpy.random.seed(1)
idim = 10
args = make_small_arg(ctc_weight=ctc_weight)
model = e2e_asr.E2E(idim, 5, args)
torch.manual_seed(1)
rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(len(args.char_list), 2, 2))
init_torch_weight_random(model, (-0.1, 0.1))
init_torch_weight_random(rnnlm, (-0.1, 0.1))
model.eval()
rnnlm.eval()
data = [("aaa", dict(feat=numpy.random.randn(10, idim).astype(numpy.float32)))]
in_data = data[0][1]["feat"]
s_nbest_hyps = model.recognize(in_data, args, args.char_list)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
s_nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
if ctc_weight > 0.0:
args.ctc_window_margin = 10
s_nbest_hyps = model.recognize(in_data, args, args.char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, args.char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
# Test word LM in batch decoding
rand_range = (-0.01, 0.01)
torch.manual_seed(1)
char_list = ["<blank>", "<space>"] + args.char_list + ["<eos>"]
args = make_small_arg(
ctc_weight=ctc_weight,
ctc_window_margin=10,
beam_size=5,
)
model = e2e_asr.E2E(idim, len(char_list), args)
char_dict = {x: i for i, x in enumerate(char_list)}
word_dict = {x: i for i, x in enumerate(args.word_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(len(args.word_list), 2, 2)
)
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor, word_dict, char_dict)
)
init_torch_weight_random(model, rand_range)
init_torch_weight_random(rnnlm, rand_range)
model.eval()
rnnlm.eval()
s_nbest_hyps = model.recognize(in_data, args, char_list, rnnlm)
b_nbest_hyps = model.recognize_batch([in_data], args, char_list, rnnlm)
assert s_nbest_hyps[0]["yseq"] == b_nbest_hyps[0][0]["yseq"]
|
py | 1a453a39e36358fb989fce747b343eb3b98c85d6 | import os
import sys
seed_data = 7
lunarc = int(sys.argv[1])
nbr_params = int(sys.argv[2])
data_set = str(sys.argv[3])
seed = int(sys.argv[4])
# remove disp setting
if lunarc == 1 and 'DISPLAY' in os.environ:
del os.environ['DISPLAY']
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev/hodgkin_huxley')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev/hodgkin_huxley')
import torch
import HodgkinHuxley
import numpy as np
import functions as func
import time
import sys
if lunarc == 1:
sys.path.append('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev/algorithms')
else:
sys.path.append(
'/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev/algorithms')
import snpla as snpla
nbr_samples = int(len(HodgkinHuxley.h.t_vec) * HodgkinHuxley.h.dt)
job = str(data_set) + "_" + str(nbr_params) + "_" + str(nbr_samples) + "_" + str(seed) # + "extended"
# Gen sbi data
model = HodgkinHuxley.HodgkinHuxley(data_set, nbr_params, "snpla")
v_true, Iinj = model.simulator(model.log_theta_true, seed_data, True)
summary_stats_obs = model.calculate_summary_statistics(v_true)
# set up model simulator
def simulator_wrapper(params):
# return tensor
return model.calculate_summary_statistics(model.simulator(params, None))
# run pilot to calc mean and std of summary stats
whiteness_params = func.pilot_run(model, simulator_wrapper, summary_stats_obs)
summary_stats_obs_w = func.whiten(summary_stats_obs, whiteness_params)
def simulator(params):
N = params.shape[0]
data = torch.zeros(params.shape[0], 19)
for i in range(N):
data[i, :] = torch.as_tensor(func.whiten(simulator_wrapper(params[i, :]), whiteness_params))
return data
flow_lik, flow_post = func.set_up_networks(model.prior.low,
model.prior.high,
dim_post=model.nbr_params)
# setting for not exteded:
# decay_rate_post = 0.95
# prob_prior_decay_rate = 0.9
# 1000, 10000
# setting for exteded:
# decay_rate_post = 0.9
# prob_prior_decay_rate = 0.9
# 2000, 10000
optimizer_lik = torch.optim.Adam(flow_lik.parameters())
optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=0.001, weight_decay=0.0) # used def value before
decay_rate_post = 0.95 # was 0.95
s_x_o = torch.from_numpy(summary_stats_obs_w).to(dtype=torch.float32).reshape(1, 19)
nbr_rounds = 12
prob_prior_decay_rate = 0.8 # was 0.95
prob_prior = snpla.calc_prob_prior(nbr_rounds, prob_prior_decay_rate)
print(prob_prior)
nbr_lik = [2000 for _ in range(nbr_rounds)] # [1000, 1000, 1000, 1000, 1000] # , 2000, 2000]
nbr_epochs_lik = [100 for _ in range(nbr_rounds)] # [100, 100, 100, 100, 100]
batch_size = 50
batch_size_post = 2000
nbr_post = [10000 for _ in range(nbr_rounds)] # [10000, 10000, 10000, 10000, 10000] # , 10000, 10000]
nbr_epochs_post = [50 for _ in range(nbr_rounds)] # [50, 50, 50, 50, 50, 50]
#print("----------------")
#print(model.prior.low)
#print(flow_post.sample(1000, context=s_x_o).min(dim=1))
#print("---")
#print(model.prior.high)
#print(flow_post.sample(1000, context=s_x_o).max(dim=1))
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
prior_samples = model.prior.sample(sample_shape=(1,))
print(prior_samples)
print(prior_samples.shape)
data_sets = simulator(prior_samples)
print(prior_samples)
print(data_sets)
print(data_sets.shape)
s_x_o = data_sets
x_o_batch_post = torch.zeros(batch_size_post, 19)
for i in range(batch_size_post):
x_o_batch_post[i, :] = s_x_o
dim_post = nbr_params
start = time.time()
models_lik, models_post = snpla.inference_snpla(flow_lik,
flow_post,
model.prior,
simulator,
optimizer_lik,
optimizer_post,
decay_rate_post,
s_x_o,
x_o_batch_post,
dim_post,
prob_prior,
nbr_lik,
nbr_epochs_lik,
nbr_post,
nbr_epochs_post,
batch_size,
batch_size_post)
end = time.time()
run_time = end - start
L = 5
M = L
K = nbr_params
indications = torch.zeros(K)
post_samples = models_post[-1].sample(M, context=s_x_o)
post_samples = post_samples.reshape((M, K))
for k in range(K):
indications[k] = (post_samples[:, k] < prior_samples[0, k]).sum()
np.savetxt('sbc/ranks_snpla_' + job + '.csv', indications.numpy(), delimiter=",")
|
py | 1a453a3db1e06ac9f136801e735b3cf1964d7225 | # -*- coding: utf-8 -*-
"""
gishaku.models
~~~~~~~~~~~~~~
Functions for modifying or interfacing with guilded.py models.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import copy
import guilded
from guilded.ext import commands
async def copy_context_with(ctx, *, author=None, channel=None, **kwargs):
"""
Makes a new :class:`Context` with changed message properties.
"""
# copy the message and update the attributes
alt_message: guilded.Message = copy.copy(ctx.message)
alt_message._update(kwargs) # pylint: disable=protected-access
if author is not None:
alt_message.author = author
if channel is not None:
alt_message.channel = channel
# obtain and return a context of the same type
return await ctx.bot.get_context(alt_message, cls=type(ctx))
|
py | 1a453d7ab48275d1e9afd11d10db6cc633303bdf | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from databox import Client
from db import *
from time import sleep
import itertools
from sys import stderr
import argparse
class Streamer(object):
databox_client = Client
delay = 10
def __init__(self, db_name, db_user, databox_push_token, delay=10):
self.delay = delay
db_proxy.initialize(MySQLDatabase(db_name, user=db_user))
self.databox_client = Client(token=databox_push_token)
def observe_and_stream(self):
"""Function will start infinitive loop that will retrive biggest id from
stocks table. If will compare that id with current biggest id. If its bigger
it will stream new records from old biggest id on."""
max_id = Stock.max_id()
while True:
new_max_id = Stock.max_id()
if new_max_id > max_id:
if self.stream_records_from(max_id):
print "Inserted new records from %d,..." % max_id
max_id = new_max_id
else:
print >>stderr, "Error inserting batch!"
else:
print "Nothing to do. Sleeping,..."
sleep(self.delay)
def stream_records_from(self, min_id):
"""Selects IDs bigger than min_id and converts them into KPIs.
Converted KPIs are then pushed to Databox with helo of Databox
Python SDK."""
print "Streaming from %d" % min_id
kpis = [stock.kpis for stock in Stock.select() \
.where(Stock.id > min_id) \
.order_by(Stock.id.asc())]
flat_kpis = list(itertools.chain.from_iterable(kpis))
return self.databox_client.insert_all(flat_kpis)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Streaming MySQL → Databox')
parser.add_argument('database', metavar='database', type=str)
parser.add_argument('-u', required=True, metavar='mysql_user', type=str, help="MySQL user")
parser.add_argument('-t', required=True, metavar='push_token', type=str, help="Databox push token")
args = parser.parse_args()
options = vars(args)
Streamer(options['database'], options['u'], options['t']).observe_and_stream()
|
py | 1a453ff63701420c48ce5303c7521399ec330fa0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkopenanalytics_open.endpoint import endpoint_data
class CancelQueryByIdRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'openanalytics-open', '2018-06-19', 'CancelQueryById','openanalytics-cap')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_QueryId(self):
return self.get_body_params().get('QueryId')
def set_QueryId(self,QueryId):
self.add_body_params('QueryId', QueryId) |
py | 1a453ffdbca06e0522be923f8509304e7cc1b9f3 | from ai_harness import xml2object
from ai_harness import harnessutils as utils
log = utils.getLogger('test')
def test_xml2object():
obj = xml2object.parse('configuration.xml')
assert len(obj.configuration.group) == 2
assert obj.configuration.group[0]['name'] == 'model'
assert obj.configuration.group[0].arg[0]['name'] == 'test1'
assert hasattr(obj.configuration, 'group')
assert isinstance(obj.configuration.group, list)
|
py | 1a4541a4d7f04281726566bc6250836734540f5e | # coding: utf-8
"""
Cloud Manager API
This API allows access to Cloud Manager programs, pipelines, and environments by an authorized technical account created through the Adobe I/O Console. The base url for this API is https://cloudmanager.adobe.io, e.g. to get the list of programs for an organization, you would make a GET request to https://cloudmanager.adobe.io/api/programs (with the correct set of headers as described below). This swagger file can be downloaded from https://raw.githubusercontent.com/AdobeDocs/cloudmanager-api-docs/master/swagger-specs/api.yaml. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import pyaemcloudmanagerapi
from pyaemcloudmanagerapi.models.requested_page_details import RequestedPageDetails # noqa: E501
from pyaemcloudmanagerapi.rest import ApiException
class TestRequestedPageDetails(unittest.TestCase):
"""RequestedPageDetails unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test RequestedPageDetails
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = pyaemcloudmanagerapi.models.requested_page_details.RequestedPageDetails() # noqa: E501
if include_optional :
return RequestedPageDetails(
start = 56,
limit = 56,
order_by = '0',
_property = [
'0'
],
type = '0',
next = 56,
prev = 56
)
else :
return RequestedPageDetails(
)
def testRequestedPageDetails(self):
"""Test RequestedPageDetails"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
py | 1a45453826462458869e06f4cdaf3fc575a40812 | # -*- coding: utf-8 -*-
"""
模型任务插件 torch 实现
"""
from . import distill_classification
from . import simple_classification
from . import sequence_label
from . import task_output
from . import task_base
from . import yolo_head
|
py | 1a454594e295bd34da9c2aad5211e333f2d9ada3 | #### Training agent in Pusher7Dof gym env using a single real-world env
## Wrtitten by : leopauly | [email protected]
## Courtesy for DDPG implementation : Steven Spielberg Pon Kumar (github.com/stevenpjg)
####
##Imports
import gym
from gym.spaces import Box, Discrete
import numpy as np
np.set_printoptions(suppress=True)
import cv2
from ddpg import DDPG
from ou_noise import OUNoise
import matplotlib.pyplot as plt
import scipy.misc as misc
## Imports for DNN
import os
from threading import Thread, Lock
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
import PIL.Image as Image
import random
import numpy as np
import cv2
import time
import math
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras import backend as K
## Custom scripts
import lscript as lsp
import modelling as md
## Defining env
env = gym.make('Pusher7DOF-v1')
assert isinstance(env.observation_space, Box), "observation space must be continuous"
assert isinstance(env.action_space, Box), "action space must be continuous"
## Defining vars for reinfrocement learning algo
num_episodes=200
num_rollouts=20 # Each roll out represent a complete activity : activity could be pushing an object, reaching to a point or similar !
steps=16 # No of actions taken in a roll out
is_batch_norm = False #batch normalization switch
xrange=range # For python3
start_training=64 # Buffer size, before starting to train the RL algorithm
## vars for feature extraction
height=112
width=112
channel=3
crop_size=112
cluster_length=16 # Length of one activity
nb_classes=2
feature_size=4608 #8192 #16384 #487
#frame_feature_size=
saved_path='/home/ironman/trained_activity_nets/'
demo_folder='./Demo_reach_1/'
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
## FRAME FEATURE EXTRACTION
def frame_feature_extractor(frame_):
frame= preprocess(frame_)
frame=frame.reshape(-1,height,width,channel)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)) as sess:
with tf.device('/cpu:0'):
base_model=keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(height,width,channel), pooling=None, classes=1000)
#base_model=md.get_vgg16_imagenet(summary=True,include_fc=False)
frame_features=base_model.predict(frame)
return frame_features
def preprocess(im):
im = np.float32(im)
im[:,:,2] -= 103.939
im[:,:,1] -= 116.779
im[:,:,0] -= 123.68
im = im[:, :, ::-1] # change to BGR
return im
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
### DEMO FEATURE EXTRACTION
def get_compress_frames_data(filename, num_frames_per_clip=cluster_length):
ret_arr = []
for parent, dirnames, filenames in os.walk(filename):
filenames = sorted(filenames)
jump=math.floor((len(filenames)/num_frames_per_clip))
loop=0
for i in range(0,len(filenames),jump):
if (loop>15):
break
if (filenames[i].endswith('.png')):
image_name = str(filename) + '/' + str(filenames[i])
img = Image.open(image_name)
img_data = np.array(img)
ret_arr.append(img_data)
loop=loop+1
ret_arr=np.array(ret_arr)
#ret_arr=ret_arr/255
return ret_arr
def demo_feature_extractor(demo_vid_path):
demo_vid_array=get_compress_frames_data(demo_vid_path)
return feature_extractor(demo_vid_array)
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
### VIDEO FEATURE EXTRACTION
## Defining placeholders in tf for images and targets
x_image = tf.placeholder(tf.float32, [None, 16,height,width,channel],name='x')
y_true = tf.placeholder(tf.float32, [None, nb_classes],name='y_true')
y_true_cls = tf.placeholder(tf.int64, [None],name='y_true_cls')
model_keras = md.C3D_ucf101_training_model_tf(summary=True)
out=model_keras(x_image)
y_pred = tf.nn.softmax(out)
y_pred_cls = tf.argmax(out, dimension=1)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Loading netwrok framework finished..!!',flush=True)
## Start the session with logging placement.
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
sess.run(init_op)
## Restore model weights from previously saved model
saver = tf.train.import_meta_graph(os.path.join(saved_path,'activity_model.ckpt-104.meta'))
saver.restore(sess, os.path.join(saved_path,'activity_model.ckpt-104'))
print("Model restored from file: %s" % saved_path,flush=True)
## For extracting activity features
def feature_extractor(vid_np):
#print('shape of video for feature extraction:',vid_np.shape)
vid_=vid_np.reshape(-1,cluster_length,height,width,channel)
#print(tf.contrib.graph_editor.get_tensors(tf.get_default_graph()))
#print(tf.get_default_graph().as_graph_def())
f_v = sess.graph.get_tensor_by_name('flatten_1/Reshape:0')
f_v_val=np.array(sess.run([f_v], feed_dict={'conv1_input:0':vid_,x_image:vid_,K.learning_phase(): 0 }))
#print('extracted video features shape:',f_v_val.shape)
features=np.reshape(f_v_val,(-1))
#print('features_shape',features.shape)
return features
def distance(f_demo,f_robo):
#print('shape f_demo',f_demo.shape,'shape f_demo',f_robo.shape)
return np.linalg.norm(f_demo-f_robo)
def s2l():
#Randomly initialize critic,actor,target critic, target actor network and replay buffer
num_states = feature_size #num_states = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
print ("Number of States:", num_states)
print ("Number of Actions:", num_actions)
agent = DDPG(env, is_batch_norm,num_states,num_actions)
exploration_noise = OUNoise(env.action_space.shape[0])
counter=0
total_reward=0
print ("Number of Rollouts per episode:", num_rollouts)
print ("Number of Steps per roll out:", steps)
reward_st = np.array([0]) #saving reward
reward_st_all = np.array([0]) #saving reward after every step
demo_features=demo_feature_extractor(demo_folder)
for episode in range(num_episodes):
print ("==== Starting episode no:",episode,"====","\n")
env.reset() # Reset env in the begining of each episode
env.render()
obs_img=env.render(mode='rgb_array') # Get the observation
obs_img=np.array(misc.imresize(obs_img,[112,112,3]))
observation =np.array(frame_feature_extractor(obs_img))
observation=observation.reshape(-1)
reward_per_episode = 0
for t in range(num_rollouts):
reward_per_rollout=0
vid_robo_=[]
for i in range(steps):
x = observation
action = agent.evaluate_actor(np.reshape(x,[1,num_states]))
noise = exploration_noise.noise()
action = action[0] + noise #Select action according to current policy and exploration noise
print ('Action at episode-',episode,'rollout-',t, 'step-', i ," :",action)
_,_,done,info=env.step(action)
env.render()
obs_robo_=env.render(mode='rgb_array') # Get the observation
obs_robo=misc.imresize(obs_robo_,[112,112,3])
vid_robo_.append(obs_robo)
observation=np.array(frame_feature_extractor(np.array(obs_robo)))
observation=observation.reshape(-1)
#pasue()
if(i==15):
vid_robo=np.array(vid_robo_)
robo_features=feature_extractor(vid_robo)
reward=-(distance(demo_features,robo_features))
reward=np.array(reward)
print('reward: ',reward)
else:
reward=0
reward=np.array(reward)
print('reward: ',reward)
reward_st_all = np.append(reward_st_all,reward)
np.savetxt('reward_all.txt',reward_st_all, newline="\n")
#add s_t,s_t+1,action,reward to experience memory
print('x','observation',x.shape,observation.shape)
agent.add_experience(x,observation,action,reward,False)
reward_per_rollout+=reward
counter+=1
#train critic and actor network
if counter > start_training:
agent.train()
print ('\n\n')
reward_per_episode+=reward_per_rollout
#check if episode ends:
print ('EPISODE: ',episode,' Total Reward: ',reward_per_episode)
print ("Printing reward to file")
exploration_noise.reset() #reinitializing random noise for action exploration
reward_st = np.append(reward_st,reward_per_episode)
np.savetxt('episode_reward.txt',reward_st, fmt='%f', newline="\n")
print ('\n\n')
total_reward+=reward_per_episode
print ("Average reward per episode {}".format(total_reward / num_episodes))
s2l()
|
py | 1a454614c4907c9500ebd1f7d4e08903c14a154b | # -*- coding: utf-8 -*-
'''
Utils for making various web calls. Primarily designed for REST, SOAP, webhooks
and the like, but also useful for basic HTTP testing.
.. versionaddedd:: 2015.2
'''
from __future__ import absolute_import
# Import python libs
import pprint
import os.path
import json
import logging
# pylint: disable=no-name-in-module
import salt.ext.six.moves.http_cookiejar
import salt.ext.six.moves.urllib.request as urllib_request
# pylint: enable=no-name-in-module
from salt.ext.six import string_types
from salt._compat import ElementTree as ET
import ssl
try:
from ssl import CertificateError # pylint: disable=E0611
from ssl import match_hostname # pylint: disable=E0611
HAS_MATCHHOSTNAME = True
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
HAS_MATCHHOSTNAME = True
except ImportError:
try:
from salt.ext.ssl_match_hostname import CertificateError
from salt.ext.ssl_match_hostname import match_hostname
HAS_MATCHHOSTNAME = True
except ImportError:
HAS_MATCHHOSTNAME = False
import socket
# Import salt libs
import salt.utils
import salt.utils.xmlutil as xml
import salt.loader
import salt.config
import salt.version
from salt.template import compile_template
from salt import syspaths
import salt.ext.six.moves.http_client # pylint: disable=no-name-in-module
# Import 3rd party libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
import msgpack
HAS_MSGPACK = True
except ImportError:
HAS_MSGPACK = False
try:
import certifi
HAS_CERTIFI = True
except ImportError:
HAS_CERTIFI = False
log = logging.getLogger(__name__)
JARFILE = os.path.join(syspaths.CACHE_DIR, 'cookies.txt')
SESSIONJARFILE = os.path.join(syspaths.CACHE_DIR, 'cookies.session.p')
USERAGENT = 'Salt/{0}'.format(salt.version.__version__)
def query(url,
method='GET',
params=None,
data=None,
data_file=None,
header_dict=None,
header_list=None,
header_file=None,
username=None,
password=None,
auth=None,
decode=False,
decode_type='auto',
status=False,
headers=False,
text=False,
cookies=None,
cookie_jar=JARFILE,
cookie_format='lwp',
persist_session=False,
session_cookie_jar=SESSIONJARFILE,
data_render=False,
data_renderer=None,
header_render=False,
header_renderer=None,
template_dict=None,
test=False,
test_url=None,
node='minion',
port=80,
opts=None,
requests_lib=None,
ca_bundle=None,
verify_ssl=None,
cert=None,
text_out=None,
headers_out=None,
decode_out=None,
stream=False,
handle=False,
agent=USERAGENT,
**kwargs):
'''
Query a resource, and decode the return data
'''
ret = {}
if opts is None:
if node == 'master':
opts = salt.config.master_config(
os.path.join(syspaths.CONFIG_DIR, 'master')
)
elif node == 'minion':
opts = salt.config.minion_config(
os.path.join(syspaths.CONFIG_DIR, 'minion')
)
else:
opts = {}
if requests_lib is None:
requests_lib = opts.get('requests_lib', False)
if requests_lib is True:
if HAS_REQUESTS is False:
ret['error'] = ('http.query has been set to use requests, but the '
'requests library does not seem to be installed')
log.error(ret['error'])
return ret
else:
requests_log = logging.getLogger('requests')
requests_log.setLevel(logging.WARNING)
if ca_bundle is None:
ca_bundle = get_ca_bundle(opts)
if verify_ssl is None:
verify_ssl = opts.get('verify_ssl', True)
if cert is None:
cert = opts.get('cert', None)
if data_file is not None:
data = _render(
data_file, data_render, data_renderer, template_dict, opts
)
log.debug('Using {0} Method'.format(method))
if method == 'POST':
log.trace('POST Data: {0}'.format(pprint.pformat(data)))
if header_file is not None:
header_tpl = _render(
header_file, header_render, header_renderer, template_dict, opts
)
if isinstance(header_tpl, dict):
header_dict = header_tpl
else:
header_list = header_tpl.splitlines()
if header_dict is None:
header_dict = {}
if header_list is None:
header_list = []
if persist_session is True and HAS_MSGPACK:
# TODO: This is hackish; it will overwrite the session cookie jar with
# all cookies from this one connection, rather than behaving like a
# proper cookie jar. Unfortunately, since session cookies do not
# contain expirations, they can't be stored in a proper cookie jar.
if os.path.isfile(session_cookie_jar):
with salt.utils.fopen(session_cookie_jar, 'r') as fh_:
session_cookies = msgpack.load(fh_)
if isinstance(session_cookies, dict):
header_dict.update(session_cookies)
else:
with salt.utils.fopen(session_cookie_jar, 'w') as fh_:
msgpack.dump('', fh_)
for header in header_list:
comps = header.split(':')
if len(comps) < 2:
continue
header_dict[comps[0].strip()] = comps[1].strip()
if username and password:
auth = (username, password)
else:
auth = None
if requests_lib is True:
sess = requests.Session()
sess.auth = auth
sess.headers.update(header_dict)
log.trace('Request Headers: {0}'.format(sess.headers))
sess_cookies = sess.cookies
sess.verify = verify_ssl
else:
sess_cookies = None
if cookies is not None:
if cookie_format == 'mozilla':
sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(cookie_jar)
else:
sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(cookie_jar)
if not os.path.isfile(cookie_jar):
sess_cookies.save()
else:
sess_cookies.load()
if agent == USERAGENT:
agent = '{0} http.query()'.format(agent)
header_dict['User-agent'] = agent
if test is True:
if test_url is None:
return {}
else:
url = test_url
ret['test'] = True
if requests_lib is True:
req_kwargs = {}
if stream is True:
if requests.__version__[0] == '0':
# 'stream' was called 'prefetch' before 1.0, with flipped meaning
req_kwargs['prefetch'] = False
else:
req_kwargs['stream'] = True
# Client-side cert handling
if cert is not None:
if isinstance(cert, string_types):
if os.path.exists(cert):
req_kwargs['cert'] = cert
elif isinstance(cert, tuple):
if os.path.exists(cert[0]) and os.path.exists(cert[1]):
req_kwargs['cert'] = cert
else:
log.error('The client-side certificate path that was passed is '
'not valid: {0}'.format(cert))
result = sess.request(
method, url, params=params, data=data, **req_kwargs
)
result.raise_for_status()
if stream is True or handle is True:
return {'handle': result}
result_status_code = result.status_code
result_headers = result.headers
result_text = result.text
result_cookies = result.cookies
else:
request = urllib_request.Request(url, data)
handlers = [
urllib_request.HTTPHandler,
urllib_request.HTTPCookieProcessor(sess_cookies)
]
if url.startswith('https') or port == 443:
if not HAS_MATCHHOSTNAME:
log.warn(('match_hostname() not available, SSL hostname checking '
'not available. THIS CONNECTION MAY NOT BE SECURE!'))
elif verify_ssl is False:
log.warn(('SSL certificate verification has been explicitly '
'disabled. THIS CONNECTION MAY NOT BE SECURE!'))
else:
hostname = request.get_host()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, 443))
sockwrap = ssl.wrap_socket(
sock,
ca_certs=ca_bundle,
cert_reqs=ssl.CERT_REQUIRED
)
try:
match_hostname(sockwrap.getpeercert(), hostname)
except CertificateError as exc:
ret['error'] = (
'The certificate was invalid. '
'Error returned was: {0}'.format(
pprint.pformat(exc)
)
)
return ret
# Client-side cert handling
if cert is not None:
cert_chain = None
if isinstance(cert, string_types):
if os.path.exists(cert):
cert_chain = (cert)
elif isinstance(cert, tuple):
if os.path.exists(cert[0]) and os.path.exists(cert[1]):
cert_chain = cert
else:
log.error('The client-side certificate path that was '
'passed is not valid: {0}'.format(cert))
return
if hasattr(ssl, 'SSLContext'):
# Python >= 2.7.9
context = ssl.SSLContext.load_cert_chain(*cert_chain)
handlers.append(urllib_request.HTTPSHandler(context=context)) # pylint: disable=E1123
else:
# Python < 2.7.9
cert_kwargs = {
'host': request.get_host(),
'port': port,
'cert_file': cert_chain[0]
}
if len(cert_chain) > 1:
cert_kwargs['key_file'] = cert_chain[1]
handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs)
opener = urllib_request.build_opener(*handlers)
for header in header_dict:
request.add_header(header, header_dict[header])
request.get_method = lambda: method
result = opener.open(request)
if stream is True or handle is True:
return {'handle': result}
result_status_code = result.code
result_headers = result.headers.headers
result_text = result.read()
if isinstance(result_headers, list):
result_headers_dict = {}
for header in result_headers:
comps = header.split(':')
result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip()
result_headers = result_headers_dict
log.debug('Response Status Code: {0}'.format(result_status_code))
log.trace('Response Headers: {0}'.format(result_headers))
log.trace('Response Cookies: {0}'.format(sess_cookies))
try:
log.trace('Response Text: {0}'.format(result_text))
except UnicodeEncodeError as exc:
log.trace(('Cannot Trace Log Response Text: {0}. This may be due to '
'incompatibilities between requests and logging.').format(exc))
if text_out is not None and os.path.exists(text_out):
with salt.utils.fopen(text_out, 'w') as tof:
tof.write(result_text)
if headers_out is not None and os.path.exists(headers_out):
with salt.utils.fopen(headers_out, 'w') as hof:
hof.write(result_headers)
if cookies is not None:
sess_cookies.save()
if persist_session is True and HAS_MSGPACK:
# TODO: See persist_session above
if 'set-cookie' in result_headers:
with salt.utils.fopen(session_cookie_jar, 'w') as fh_:
session_cookies = result_headers.get('set-cookie', None)
if session_cookies is not None:
msgpack.dump({'Cookie': session_cookies}, fh_)
else:
msgpack.dump('', fh_)
if status is True:
ret['status'] = result_status_code
if headers is True:
ret['headers'] = result_headers
if decode is True:
if decode_type == 'auto':
content_type = result_headers.get(
'content-type', 'application/json'
)
if 'xml' in content_type:
decode_type = 'xml'
elif 'json' in content_type:
decode_type = 'json'
else:
decode_type = 'plain'
valid_decodes = ('json', 'xml', 'plain')
if decode_type not in valid_decodes:
ret['error'] = (
'Invalid decode_type specified. '
'Valid decode types are: {0}'.format(
pprint.pformat(valid_decodes)
)
)
log.error(ret['error'])
return ret
if decode_type == 'json':
ret['dict'] = json.loads(result_text)
elif decode_type == 'xml':
ret['dict'] = []
items = ET.fromstring(result_text)
for item in items:
ret['dict'].append(xml.to_dict(item))
else:
text = True
if decode_out and os.path.exists(decode_out):
with salt.utils.fopen(decode_out, 'w') as dof:
dof.write(result_text)
if text is True:
ret['text'] = result_text
return ret
def get_ca_bundle(opts=None):
'''
Return the location of the ca bundle file. See the following article:
http://tinyurl.com/k7rx42a
'''
if hasattr(get_ca_bundle, '__return_value__'):
return get_ca_bundle.__return_value__
if opts is None:
opts = {}
opts_bundle = opts.get('ca_bundle', None)
if opts_bundle is not None and os.path.exists(opts_bundle):
return opts_bundle
file_roots = opts.get('file_roots', {'base': [syspaths.SRV_ROOT_DIR]})
salt_root = file_roots['base'][0]
log.debug('file_roots is {0}'.format(salt_root))
# Please do not change the order without good reason
for path in (
# Check Salt first
os.path.join(salt_root, 'cacert.pem'),
os.path.join(salt_root, 'ca-bundle.crt'),
# Debian has paths that often exist on other distros
'/etc/ssl/certs/ca-certificates.crt',
# RedHat is also very common
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/pki/tls/certs/ca-bundle.trust.crt',
# RedHat's link for Debian compatability
'/etc/ssl/certs/ca-bundle.crt',
# Suse has an unusual path
'/var/lib/ca-certificates/ca-bundle.pem',
):
if os.path.exists(path):
return path
if salt.utils.is_windows() and HAS_CERTIFI:
return certifi.where()
return None
def update_ca_bundle(
target=None,
source=None,
opts=None,
merge_files=None,
):
'''
Attempt to update the CA bundle file from a URL
If not specified, the local location on disk (``target``) will be
auto-detected, if possible. If it is not found, then a new location on disk
will be created and updated.
The default ``source`` is:
http://curl.haxx.se/ca/cacert.pem
This is based on the information at:
http://curl.haxx.se/docs/caextract.html
A string or list of strings representing files to be appended to the end of
the CA bundle file may also be passed through as ``merge_files``.
'''
if opts is None:
opts = {}
if target is None:
target = get_ca_bundle(opts)
if target is None:
log.error('Unable to detect location to write CA bundle to')
return
if source is None:
source = opts.get('ca_bundle_url', 'http://curl.haxx.se/ca/cacert.pem')
log.debug('Attempting to download {0} to {1}'.format(source, target))
query(
source,
text=True,
decode=False,
headers=False,
status=False,
text_out=target
)
if merge_files is not None:
if isinstance(merge_files, string_types):
merge_files = [merge_files]
if not isinstance(merge_files, list):
log.error('A value was passed as merge_files which was not either '
'a string or a list')
return
merge_content = ''
for cert_file in merge_files:
if os.path.exists(cert_file):
log.debug(
'Queueing up {0} to be appended to {1}'.format(
cert_file, target
)
)
try:
with salt.utils.fopen(cert_file, 'r') as fcf:
merge_content = '\n'.join((merge_content, fcf.read()))
except IOError as exc:
log.error(
'Reading from {0} caused the following error: {1}'.format(
cert_file, exc
)
)
if merge_content:
log.debug('Appending merge_files to {0}'.format(target))
try:
with salt.utils.fopen(target, 'a') as tfp:
tfp.write('\n')
tfp.write(merge_content)
except IOError as exc:
log.error(
'Writing to {0} caused the following error: {1}'.format(
target, exc
)
)
def _render(template, render, renderer, template_dict, opts):
'''
Render a template
'''
if render:
if template_dict is None:
template_dict = {}
if not renderer:
renderer = opts.get('renderer', 'yaml_jinja')
rend = salt.loader.render(opts, {})
return compile_template(template, rend, renderer, **template_dict)
with salt.utils.fopen(template, 'r') as fh_:
return fh_.read()
|
py | 1a45465a8804a64c6a6d762ff4a6c24cde92b4c0 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libbytesize(AutotoolsPackage):
"""The goal of this project is to provide a tiny library that would
facilitate the common operations with sizes in bytes."""
homepage = "https://github.com/storaged-project/libbytesize"
url = "https://github.com/storaged-project/libbytesize/releases/download/2.4/libbytesize-2.4.tar.gz"
version('2.4', sha256='25ccb5762bb8c860b63ed1d40e0c5564e3e0084693fbe6554467a8ca1c1d8c7f')
version('2.3', sha256='3c74113fc8cd1a2fbd8870fa0ed7cef2ef24d60ef91e7145fbc041f9aa144479')
version('2.2', sha256='b93c54b502880c095c9f5767a42464853e2687db2e5e3084908a615bafe73baa')
extends('python')
depends_on('pcre2')
depends_on('gmp')
depends_on('mpfr')
|
py | 1a4546d9b4cfc3c6c15c42093d5154ec1a7f976e | import logging
import mimetypes
import time
from typing import Iterator, Callable
from urllib.parse import urlparse
import pymongo
from bson import ObjectId
from requests import HTTPError
from tsing_spider.porn.caoliu import CaoliuIndexPage, CaoliuThread
from ghs.spiders.base import BaseSpiderTaskGenerator
from ghs.utils.storage import create_s3_client, url_to_s3, create_mongodb_client, put_json
log = logging.getLogger(__file__)
mongodb_client = create_mongodb_client()
s3_client = create_s3_client()
collection = mongodb_client.get_database("resman").get_collection("spider_t66y")
def initialize():
"""
Initialize mongodb and s3
:return:
"""
log.info("Initializing database")
collection.create_index([("published", pymongo.ASCENDING)])
collection.create_index([("url", pymongo.ASCENDING)])
def thread_item_processor(caoliu_thread: CaoliuThread):
def wrapper():
if collection.find_one({"url": caoliu_thread.url}) is None:
_id = ObjectId()
data = dict(
_id=_id,
published=False,
url=caoliu_thread.url,
tid=caoliu_thread.tid,
title=caoliu_thread.title,
image_list=caoliu_thread.image_list,
comments=caoliu_thread.comments,
content_text=caoliu_thread.content_text
)
image_wrote_count = 0
for i, image_url in enumerate(caoliu_thread.image_list):
log.debug(f"Downloading image {i} for page {caoliu_thread.url}")
url_path = urlparse(image_url).path
mime_type = mimetypes.guess_type(url_path)[0]
file_suffix = url_path.split(".")[-1]
s3_path = f"t66y/{str(_id)}/images/{i}.{file_suffix}"
if url_to_s3(
s3_client,
image_url,
s3_path,
headers={"Referer": caoliu_thread.url},
content_type=mime_type,
ignore_4xx=True
):
image_wrote_count += 1
data["all_images_wrote"] = image_wrote_count >= len(caoliu_thread.image_list)
put_json(s3_client, data, f"t66y/{str(_id)}/meta.json")
collection.insert_one(data)
log.info(f"{caoliu_thread.url} already processed successfully.")
return wrapper
class CaoliuSpiderTaskGenerator(BaseSpiderTaskGenerator):
def __init__(self, max_page_index: int):
self.max_page_index = max_page_index
def generate(self) -> Iterator[Callable[[None], None]]:
initialize()
submitted_tasks = set()
page_index = 0
errors_remain = 3
while errors_remain > 0:
page_index += 1
if page_index > self.max_page_index:
break
base_page = CaoliuIndexPage(page_index)
log.info(f"Reading {base_page.url}.")
for i in range(errors_remain):
time.sleep(5.0)
try:
for thread_item in base_page.threads:
if thread_item.url not in submitted_tasks:
submitted_tasks.add(thread_item.url)
yield thread_item_processor(thread_item)
break
except HTTPError as he:
if he.response.status_code == 404:
errors_remain -= 1
else:
log.error(f"HTTP Error while reading {base_page.url}.", exc_info=he)
except Exception as ex:
log.error(f"Error while reading {base_page.url}.", exc_info=ex)
|
py | 1a4547ca79e4a8781bd9d8a48734ef26e1cd3af6 | import warnings
import numpy as np
from .metrics import concordance_index_censored
from .bess_base import bess_base
def fix_docs(cls):
# This function is to inherit the docstring from base class
# and avoid unnecessary duplications on description.
index = cls.__doc__.find("Examples\n --------\n")
if index != -1:
cls.__doc__ = cls.__doc__[:index] + \
cls.__bases__[0].__doc__ + cls.__doc__[index:]
return cls
@ fix_docs
class LogisticRegression(bess_base):
"""
Adaptive Best-Subset Selection (ABESS) algorithm for logistic regression.
Parameters
----------
splicing_type: {0, 1}, optional
The type of splicing in `fit()` (in Algorithm.h).
"0" for decreasing by half, "1" for decresing by one.
Default: splicing_type = 0.
important_search : int, optional
The size of inactive set during updating active set when splicing.
It should be a non-positive integer and if important_search=128, it would be set as
the size of whole inactive set.
Default: 0.
Examples
--------
>>> ### Sparsity known
>>>
>>> from abess.linear import LogisticRegression
>>> from abess.datasets import make_glm_data
>>> import numpy as np
>>> np.random.seed(12345)
>>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'binomial')
>>> model = LogisticRegression(support_size = [10])
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>> ### Sparsity unknown
>>>
>>> # path_type="seq",
>>> # Default: support_size = list(range(0, max(min(p, int(n / (np.log(np.log(n)) * np.log(p)))), 1))).
>>> model = LogisticRegression(path_type = "seq")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>>
>>> # path_type="gs",
>>> # Default: s_min=1, s_max=min(p, int(n / (np.log(np.log(n)) * np.log(p)))), K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
>>> model = LogisticRegression(path_type="gs")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
"""
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
approximate_Newton=False,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128,
):
super().__init__(
algorithm_type="abess", model_type="Logistic", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
approximate_Newton=approximate_Newton,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
def predict_proba(self, X):
"""
The predict_proba function is used to give the probabilities of new data begin assigned to different classes.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
intercept_ = np.ones(X.shape[0]) * self.intercept_
xbeta = X.dot(self.coef_) + intercept_
return np.exp(xbeta) / (1 + np.exp(xbeta))
def predict(self, X):
"""
For Logistic model,
the predict function returns a \\code{dict} of \\code{pr} and \\code{y}, where \\code{pr} is the probability of response variable is 1 and \\code{y} is predicted to be 1 if \\code{pr} > 0.5 else \\code{y} is 0
on given data.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
intercept_ = np.ones(X.shape[0]) * self.intercept_
xbeta = X.dot(self.coef_) + intercept_
y = np.zeros(xbeta.size)
y[xbeta > 0] = 1
return y
def score(self, X, y):
"""
Give new data, and it returns the entropy function.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
y : array-like of shape (n_samples, n_features), optional
Test response (real class).
"""
X, y = self.new_data_check(X, y)
intercept_ = np.ones(X.shape[0]) * self.intercept_
xbeta = X.dot(self.coef_) + intercept_
xbeta[xbeta > 30] = 30
xbeta[xbeta < -30] = -30
pr = np.exp(xbeta) / (1 + np.exp(xbeta))
return (y * np.log(pr) +
(np.ones(X.shape[0]) - y) * np.log(np.ones(X.shape[0]) - pr)).sum()
@ fix_docs
class LinearRegression(bess_base):
"""
Adaptive Best-Subset Selection(ABESS) algorithm for linear regression.
Parameters
----------
splicing_type: {0, 1}, optional
The type of splicing in `fit()` (in Algorithm.h).
"0" for decreasing by half, "1" for decresing by one.
Default: splicing_type = 0.
important_search : int, optional
The size of inactive set during updating active set when splicing.
It should be a non-positive integer and if important_search=128, it would be set as
the size of whole inactive set.
Default: 0.
Examples
--------
>>> ### Sparsity known
>>>
>>> from abess.linear import LinearRegression
>>> from abess.datasets import make_glm_data
>>> import numpy as np
>>> np.random.seed(12345)
>>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'gaussian')
>>> model = LinearRegression(support_size = [10])
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>> ### Sparsity unknown
>>>
>>> # path_type="seq",
>>> # Default: support_size = list(range(0, max(min(p, int(n / (np.log(np.log(n)) * np.log(p)))), 1))).
>>> model = LinearRegression(path_type = "seq")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>>
>>> # path_type="gs",
>>> # Default: s_min=1, s_max=min(p, int(n / (np.log(np.log(n)) * np.log(p)))), K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
>>> model = LinearRegression(path_type="gs")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
"""
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
thread=1, covariance_update=False,
sparse_matrix=False,
splicing_type=0,
important_search=128,
# primary_model_fit_max_iter=10,
# primary_model_fit_epsilon=1e-8, approximate_Newton=False
):
super().__init__(
algorithm_type="abess", model_type="Lm", normalize_type=1, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
thread=thread, covariance_update=covariance_update,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
def predict(self, X):
"""
For linear regression problem,
the predict function returns a numpy array of the prediction of the mean
on given data.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
intercept_ = np.ones(X.shape[0]) * self.intercept_
return X.dot(self.coef_) + intercept_
def score(self, X, y):
"""
Give new data, and it returns the prediction error.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
y : array-like of shape (n_samples, n_features), optional
Test response.
"""
X, y = self.new_data_check(X, y)
y_pred = self.predict(X)
return -((y - y_pred) * (y - y_pred)).sum()
@ fix_docs
class CoxPHSurvivalAnalysis(bess_base):
"""
Adaptive Best-Subset Selection(ABESS) algorithm for COX proportional hazards model.
Parameters
----------
splicing_type: {0, 1}, optional
The type of splicing in `fit()` (in Algorithm.h).
"0" for decreasing by half, "1" for decresing by one.
Default: splicing_type = 0.
important_search : int, optional
The size of inactive set during updating active set when splicing.
It should be a non-positive integer and if important_search=128, it would be set as
the size of whole inactive set.
Default: 0.
Examples
--------
>>> ### Sparsity known
>>>
>>> from abess.linear import CoxPHSurvivalAnalysis
>>> from abess.datasets import make_glm_data
>>> import numpy as np
>>> np.random.seed(12345)
>>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'cox')
>>> model = CoxPHSurvivalAnalysis(support_size = [10])
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>> ### Sparsity unknown
>>>
>>> # path_type="seq",
>>> # Default: support_size = list(range(0, max(min(p, int(n / (np.log(np.log(n)) * np.log(p)))), 1))).
>>> model = CoxPHSurvivalAnalysis(path_type = "seq")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>>
>>> # path_type="gs",
>>> # Default: s_min=1, s_max=min(p, int(n / (np.log(np.log(n)) * np.log(p)))), K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
>>> model = CoxPHSurvivalAnalysis(path_type="gs")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
"""
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
approximate_Newton=False,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
super().__init__(
algorithm_type="abess", model_type="Cox", normalize_type=3, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
approximate_Newton=approximate_Newton,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
def predict(self, X):
"""
For Cox model,
the predict function returns the time-independent part of hazard function, i.e. :math:`\\exp(X\\beta)`,
on given data.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
return np.exp(X.dot(self.coef_))
def score(self, X, y):
"""
Give new data, and it returns C-index.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
y : array-like of shape (n_samples, n_features), optional
Test response.
"""
X, y = self.new_data_check(X, y)
risk_score = X.dot(self.coef_)
y = np.array(y)
result = concordance_index_censored(
np.array(y[:, 1], np.bool_), y[:, 0], risk_score)
return result[0]
@ fix_docs
class PoissonRegression(bess_base):
"""
Adaptive Best-Subset Selection(ABESS) algorithm for Poisson regression.
Parameters
----------
splicing_type: {0, 1}, optional
The type of splicing in `fit()` (in Algorithm.h).
"0" for decreasing by half, "1" for decresing by one.
Default: splicing_type = 0.
important_search : int, optional
The size of inactive set during updating active set when splicing.
It should be a non-positive integer and if important_search=128, it would be set as
the size of whole inactive set.
Default: 0.
Examples
--------
>>> ### Sparsity known
>>>
>>> from abess.linear import PoissonRegression
>>> from abess.datasets import make_glm_data
>>> import numpy as np
>>> np.random.seed(12345)
>>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'poisson')
>>> model = PoissonRegression(support_size = [10])
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>> ### Sparsity unknown
>>>
>>> # path_type="seq",
>>> # Default: support_size = list(range(0, max(min(p, int(n / (np.log(np.log(n)) * np.log(p)))), 1))).
>>> model = PoissonRegression(path_type = "seq")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>>
>>> # path_type="gs",
>>> # Default: s_min=1, s_max=min(p, int(n / (np.log(np.log(n)) * np.log(p)))), K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
>>> model = PoissonRegression(path_type="gs")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
"""
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
super().__init__(
algorithm_type="abess", model_type="Poisson", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
def predict(self, X):
"""
For Poisson model,
the predict function returns a numpy array of the prediction of the mean of response,
on given data.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
intercept_ = np.ones(X.shape[0]) * self.intercept_
xbeta_exp = np.exp(X.dot(self.coef_) + intercept_)
return xbeta_exp
def score(self, X, y):
"""
Give new data, and it returns the prediction error.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
y : array-like of shape (n_samples, n_features), optional
Test response.
"""
X, y = self.new_data_check(X, y)
intercept_ = np.ones(X.shape[0]) * self.intercept_
eta = X.dot(self.coef_) + intercept_
exp_eta = np.exp(eta)
return (y * eta - exp_eta).sum()
@ fix_docs
class MultiTaskRegression(bess_base):
"""
Adaptive Best-Subset Selection(ABESS) algorithm for multitasklearning.
Parameters
----------
splicing_type: {0, 1}, optional
The type of splicing in `fit()` (in Algorithm.h).
"0" for decreasing by half, "1" for decresing by one.
Default: splicing_type = 0.
important_search : int, optional
The size of inactive set during updating active set when splicing.
It should be a non-positive integer and if important_search=128, it would be set as
the size of whole inactive set.
Default: 0.
Examples
--------
>>> ### Sparsity known
>>>
>>> from abess.linear import MultipleLinearRegression
>>> from abess.datasets import make_multivariate_glm_data
>>> import numpy as np
>>> np.random.seed(12345)
>>> data = make_multivariate_glm_data(n = 100, p = 50, k = 10, M = 3, family = 'multigaussian')
>>> model = MultipleLinearRegression(support_size = [10])
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>> ### Sparsity unknown
>>>
>>> # path_type="seq",
>>> # Default: support_size = list(range(0, max(min(p, int(n / (np.log(np.log(n)) * np.log(p)))), 1))).
>>> model = MultipleLinearRegression(path_type = "seq")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>>
>>> # path_type="gs",
>>> # Default: s_min=1, s_max=min(p, int(n / (np.log(np.log(n)) * np.log(p)))), K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
>>> model = MultipleLinearRegression(path_type="gs")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
"""
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
thread=1, covariance_update=False,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
super().__init__(
algorithm_type="abess", model_type="Multigaussian", normalize_type=1, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
thread=thread, covariance_update=covariance_update,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
def predict(self, X):
"""
For Multigaussian model,
the predict function returns a numpy matrix of the prediction of the mean of responses,
on given data.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
intercept_ = np.repeat(
self.intercept_[np.newaxis, ...], X.shape[0], axis=0)
return X.dot(self.coef_) + intercept_
def score(self, X, y):
"""
Give new data, and it returns prediction error.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
y : array-like of shape (n_samples, n_features), optional
Test response.
"""
X, y = self.new_data_check(X, y)
y_pred = self.predict(X)
return -((y - y_pred) * (y - y_pred)).sum()
@ fix_docs
class MultinomialRegression(bess_base):
"""
Adaptive Best-Subset Selection(ABESS) algorithm for multiclassification problem.
Parameters
----------
splicing_type: {0, 1}, optional
The type of splicing in `fit()` (in Algorithm.h).
"0" for decreasing by half, "1" for decresing by one.
Default: splicing_type = 0.
important_search : int, optional
The size of inactive set during updating active set when splicing.
It should be a non-positive integer and if important_search=128, it would be set as
the size of whole inactive set.
Default: 0.
Examples
--------
>>> ### Sparsity known
>>>
>>> from abess.linear import MultinomialRegression
>>> from abess.datasets import make_multivariate_glm_data
>>> import numpy as np
>>> np.random.seed(12345)
>>> data = make_multivariate_glm_data(n = 100, p = 50, k = 10, M = 3, family = 'multinomial')
>>> model = MultinomialRegression(support_size = [10])
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>> ### Sparsity unknown
>>>
>>> # path_type="seq",
>>> # Default: support_size = list(range(0, max(min(p, int(n / (np.log(np.log(n)) * np.log(p)))), 1))).
>>> model = MultinomialRegression(path_type = "seq")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>>
>>> # path_type="gs",
>>> # Default: s_min=1, s_max=min(p, int(n / (np.log(np.log(n)) * np.log(p)))), K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
>>> model = MultinomialRegression(path_type="gs")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
"""
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
approximate_Newton=False,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
super().__init__(
algorithm_type="abess", model_type="Multinomial", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
approximate_Newton=approximate_Newton,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
def predict_proba(self, X):
"""
The predict_proba function is used to give the probabilities of new data begin assigned to different classes.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
intercept_ = np.repeat(
self.intercept_[np.newaxis, ...], X.shape[0], axis=0)
xbeta = X.dot(self.coef_) + intercept_
eta = np.exp(xbeta)
for i in range(X.shape[0]):
pr = eta[i, :] / np.sum(eta[i, :])
return pr
def predict(self, X):
"""
For Multinomial model,
the predict function returns return the most possible class the given data may be.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
intercept_ = np.repeat(
self.intercept_[np.newaxis, ...], X.shape[0], axis=0)
xbeta = X.dot(self.coef_) + intercept_
max_item = np.argmax(xbeta, axis=1)
y_pred = np.zeros_like(xbeta)
for i in range(X.shape[0]):
y_pred[i, max_item[i]] = 1
return y_pred
def score(self, X, y):
"""
Give new data, and it returns the entropy function.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
y : array-like of shape (n_samples, n_features), optional
Test response (dummy variables of real class).
"""
X, y = self.new_data_check(X, y)
pr = self.predict_proba(X)
return np.sum(y * np.log(pr))
@ fix_docs
class GammaRegression(bess_base):
"""
Adaptive Best-Subset Selection(ABESS) algorithm for Gamma regression.
Parameters
----------
splicing_type: {0, 1}, optional
The type of splicing in `fit()` (in Algorithm.h).
"0" for decreasing by half, "1" for decresing by one.
Default: splicing_type = 0.
important_search : int, optional
The size of inactive set during updating active set when splicing.
It should be a non-positive integer and if important_search=128, it would be set as
the size of whole inactive set.
Default: 0.
Examples
--------
>>> ### Sparsity known
>>>
>>> from abess.linear import GammaRegression
>>> import numpy as np
>>> model = GammaRegression(support_size = [10])
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>> ### Sparsity unknown
>>>
>>> # path_type="seq",
>>> # Default: support_size = list(range(0, max(min(p, int(n / (np.log(np.log(n)) * np.log(p)))), 1))).
>>> model = GammaRegression(path_type = "seq")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
>>>
>>> # path_type="gs",
>>> # Default: s_min=1, s_max=min(p, int(n / (np.log(np.log(n)) * np.log(p)))), K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
>>> model = GammaRegression(path_type="gs")
>>> model.fit(data.x, data.y)
>>> model.predict(data.x)
"""
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
super().__init__(
algorithm_type="abess", model_type="Gamma", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
def predict(self, X):
"""
For Gamma model,
the predict function returns a numpy array of the prediction of the mean of response,
on given data.
Parameters
----------
X : array-like of shape (n_samples, p_features)
Test data.
"""
X = self.new_data_check(X)
intercept_ = np.ones(X.shape[0]) * self.intercept_
xbeta_exp = np.exp(X.dot(self.coef_) + intercept_)
return xbeta_exp
def score(self, X, y, weights=None):
"""
Give new data, and it returns the prediction error.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
y : array-like of shape (n_samples, n_features), optional
Test response.
"""
if weights is None:
X = np.array(X)
weights = np.ones(X.shape[0])
X, y, weights = self.new_data_check(X, y, weights)
def deviance(y, y_pred):
dev = 2 * (np.log(y_pred / y) + y / y_pred - 1)
return np.sum(weights * dev)
y_pred = self.predict(X)
y_mean = np.average(y, weights=weights)
dev = deviance(y, y_pred)
dev_null = deviance(y, y_mean)
return 1 - dev / dev_null
class abessLogistic(LogisticRegression):
warning_msg = "Class `abessLogistic` has been renamed to `LogisticRegression`. The former will be deprecated in version 0.5.0."
__doc__ = warning_msg + '\n' + LogisticRegression.__doc__
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
approximate_Newton=False,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128,
):
warnings.warn(self.warning_msg, FutureWarning)
super().__init__(
path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
approximate_Newton=approximate_Newton,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
class abessLm(LinearRegression):
warning_msg = "Class `abessLm` has been renamed to `LinearRegression`. The former will be deprecated in version 0.5.0."
__doc__ = warning_msg + '\n' + LinearRegression.__doc__
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
thread=1, covariance_update=False,
sparse_matrix=False,
splicing_type=0,
important_search=128,
# primary_model_fit_max_iter=10,
# primary_model_fit_epsilon=1e-8, approximate_Newton=False
):
warnings.warn(self.warning_msg, FutureWarning)
super().__init__(
path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
thread=thread, covariance_update=covariance_update,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
class abessCox(CoxPHSurvivalAnalysis):
warning_msg = "Class `abessCox` has been renamed to `CoxPHSurvivalAnalysis`. The former will be deprecated in version 0.5.0."
__doc__ = warning_msg + '\n' + CoxPHSurvivalAnalysis.__doc__
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
approximate_Newton=False,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
warnings.warn(self.warning_msg, FutureWarning)
super().__init__(
path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
approximate_Newton=approximate_Newton,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
class abessPoisson(PoissonRegression):
warning_msg = "Class `abessPoisson` has been renamed to `PoissonRegression`. The former will be deprecated in version 0.5.0."
__doc__ = warning_msg + '\n' + PoissonRegression.__doc__
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
warnings.warn(self.warning_msg, FutureWarning)
super().__init__(
path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
class abessMultigaussian(MultiTaskRegression):
warning_msg = "Class `abessMultigaussian` has been renamed to `MultiTaskRegression`. The former will be deprecated in version 0.5.0."
__doc__ = warning_msg + '\n' + MultiTaskRegression.__doc__
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
thread=1, covariance_update=False,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
warnings.warn(self.warning_msg, FutureWarning)
super().__init__(
path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
thread=thread, covariance_update=covariance_update,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
class abessMultinomial(MultinomialRegression):
warning_msg = "Class `abessMultinomial` has been renamed to `MultinomialRegression`. The former will be deprecated in version 0.5.0."
__doc__ = warning_msg + '\n' + MultinomialRegression.__doc__
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
approximate_Newton=False,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
warnings.warn(self.warning_msg, FutureWarning)
super().__init__(
path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
approximate_Newton=approximate_Newton,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
class abessGamma(GammaRegression):
warning_msg = "Class `abessGamma` has been renamed to `GammaRegression`. The former will be deprecated in version 0.5.0."
__doc__ = warning_msg + '\n' + GammaRegression.__doc__
def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
ic_type="ebic", ic_coef=1.0, cv=1, screening_size=-1,
always_select=None,
primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,
thread=1,
sparse_matrix=False,
splicing_type=0,
important_search=128
):
warnings.warn(self.warning_msg, FutureWarning)
super().__init__(
path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max,
ic_type=ic_type, ic_coef=ic_coef, cv=cv, screening_size=screening_size,
always_select=always_select,
primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon,
thread=thread,
sparse_matrix=sparse_matrix,
splicing_type=splicing_type,
important_search=important_search
)
# @fix_docs
# class PdasLm(bess_base):
# '''
# PdasLm
# The PDAS solution to the best subset selection for linear regression.
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345) # fix seed to get the same result
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> noise = np.random.normal(0, 1, 100)
# >>> y = np.matmul(x, beta) + noise
# >>> model = PdasLm(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = PdasLm(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = PdasLm(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# '''
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.):
# super(PdasLm, self).__init__(
# algorithm_type="Pdas", model_type="Lm", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau)
# self.normalize_type = 1
# @fix_docs
# class PdasLogistic(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345)
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> xbeta = np.matmul(x, beta)
# >>> p = np.exp(xbeta)/(1+np.exp(xbeta))
# >>> y = np.random.binomial(1, p)
# >>> model = PdasLogistic(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = PdasLogistic(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = PdasLogistic(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(PdasLogistic, self).__init__(
# algorithm_type="Pdas", model_type="Logistic", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau)
# self.normalize_type = 2
# @fix_docs
# class PdasPoisson(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345)
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> lam = np.exp(np.matmul(x, beta))
# >>> y = np.random.poisson(lam=lam)
# >>> model = PdasPoisson(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = PdasPoisson(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = PdasPoisson(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(PdasPoisson, self).__init__(
# algorithm_type="Pdas", model_type="Poisson", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau
# )
# self.normalize_type = 2
# @fix_docs
# class PdasCox(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345)
# >>> data = make_glm_data(100, 200, family="cox", cv=1, rho=0, sigma=1, c=10)
# >>> model = PdasCox(path_type="seq", support_size=[5])
# >>> model.fit(data.x, data.y, is_normal=True)
# >>> model.predict(data.x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = PdasCox(path_type="seq")
# >>> model.fit(data.x, data.y, is_normal=True)
# >>> model.predict(data.x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = PdasCox(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(PdasCox, self).__init__(
# algorithm_type="Pdas", model_type="Cox", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau)
# self.normalize_type = 3
# @fix_docs
# class L0L2Lm(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345) # fix seed to get the same result
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> noise = np.random.normal(0, 1, 100)
# >>> y = np.matmul(x, beta) + noise
# >>> model = PdasLm(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = PdasLm(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = PdasLm(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(L0L2Lm, self).__init__(
# algorithm_type="L0L2", model_type="Lm", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau
# )
# self.normalize_type = 1
# @fix_docs
# class L0L2Logistic(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345) # fix seed to get the same result
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> noise = np.random.normal(0, 1, 100)
# >>> y = np.matmul(x, beta) + noise
# >>> model = PdasLm(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = PdasLm(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = PdasLm(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(L0L2Logistic, self).__init__(
# algorithm_type="L0L2", model_type="Logistic", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau)
# self.normalize_type = 2
# @fix_docs
# class L0L2Poisson(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345)
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> lam = np.exp(np.matmul(x, beta))
# >>> y = np.random.poisson(lam=lam)
# >>> model = PdasPoisson(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = PdasPoisson(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = PdasPoisson(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(L0L2Poisson, self).__init__(
# algorithm_type="L0L2", model_type="Poisson", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau
# )
# self.normalize_type = 2
# @fix_docs
# class L0L2Cox(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345)
# >>> data = make_glm_data(100, 200, family="cox", cv=1, rho=0, sigma=1, c=10)
# >>> model = PdasCox(path_type="seq", support_size=[5])
# >>> model.fit(data.x, data.y, is_normal=True)
# >>> model.predict(data.x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = PdasCox(path_type="seq")
# >>> model.fit(data.x, data.y, is_normal=True)
# >>> model.predict(data.x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = PdasCox(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(L0L2Cox, self).__init__(
# algorithm_type="L0L2", model_type="Cox", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau)
# self.normalize_type = 3
# @fix_docs
# class GroupPdasLm(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345) # fix seed to get the same result
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> noise = np.random.normal(0, 1, 100)
# >>> y = np.matmul(x, beta) + noise
# >>> model = GroupPdasLm(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = GroupPdasLm(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = GroupPdasLm(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(GroupPdasLm, self).__init__(
# algorithm_type="GroupPdas", model_type="Lm", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau)
# self.normalize_type = 1
# @fix_docs
# class GroupPdasLogistic(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345)
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> xbeta = np.matmul(x, beta)
# >>> p = np.exp(xbeta)/(1+np.exp(xbeta))
# >>> y = np.random.binomial(1, p)
# >>> model = GroupPdasLogistic(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = GroupPdasLogistic(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = GroupPdasLogistic(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(GroupPdasLogistic, self).__init__(
# algorithm_type="GroupPdas", model_type="Logistic", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau
# )
# self.normalize_type = 2
# @fix_docs
# class GroupPdasPoisson(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345)
# >>> x = np.random.normal(0, 1, 100 * 150).reshape((100, 150))
# >>> beta = np.hstack((np.array([1, 1, -1, -1, -1]), np.zeros(145)))
# >>> lam = np.exp(np.matmul(x, beta))
# >>> y = np.random.poisson(lam=lam)
# >>> model = GroupPdasPoisson(path_type="seq", support_size=[5])
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = GroupPdasPoisson(path_type="seq")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = GroupPdasPoisson(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1,
# always_select=[], tau=0.
# ):
# super(GroupPdasPoisson, self).__init__(
# algorithm_type="GroupPdas", model_type="Poisson", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path,
# always_select=always_select, tau=tau)
# self.normalize_type = 2
# @fix_docs
# class GroupPdasCox(bess_base):
# """
# Examples
# --------
# ### Sparsity known
# >>> from bess.linear import *
# >>> import numpy as np
# >>> np.random.seed(12345)
# >>> data = make_glm_data(100, 200, family="cox", cv=1, rho=0, sigma=1, c=10)
# >>> model = GroupPdasCox(path_type="seq", support_size=[5])
# >>> model.fit(data.x, data.y, is_normal=True)
# >>> model.predict(data.x)
# ### Sparsity unknown
# >>> # path_type="seq", Default:support_size=[1,2,...,min(x.shape[0], x.shape[1])]
# >>> model = GroupPdasCox(path_type="seq")
# >>> model.fit(data.x, data.y, is_normal=True)
# >>> model.predict(data.x)
# >>> # path_type="gs", Default:s_min=1, s_max=X.shape[1], K_max = int(math.log(p, 2/(math.sqrt(5) - 1)))
# >>> model = GroupPdasCox(path_type="gs")
# >>> model.fit(X=x, y=y)
# >>> model.predict(x)
# """
# def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None,
# K_max=1, epsilon=0.0001, lambda_min=None, lambda_max=None, ic_type="ebic", cv=1, screening_size=-1, powell_path=1
# ):
# super(GroupPdasCox, self).__init__(
# algorithm_type="GroupPdas", model_type="Cox", path_type=path_type, max_iter=max_iter, exchange_num=exchange_num,
# is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, K_max=K_max,
# epsilon=epsilon, lambda_min=lambda_min, lambda_max=lambda_max, ic_type=ic_type, cv=cv, screening_size=screening_size, powell_path=powell_path)
# self.normalize_type = 3
|
py | 1a4548a3894e856bc238158ee72d3a1171d8fd28 | """Customized dataloader for general video classification tasks."""
import os
import warnings
import numpy as np
try:
from decord import VideoReader, cpu
except ImportError:
VideoReader = None
cpu = None
import torch
from torch.utils.data import Dataset
from ..transforms.videotransforms import video_transforms, volume_transforms
from .multigrid_helper import multiGridHelper, MultiGridBatchSampler
__all__ = ['VideoClsDataset', 'build_dataloader', 'build_dataloader_test']
class VideoClsDataset(Dataset):
"""Load your own video classification dataset."""
def __init__(self, anno_path, data_path, mode='train', clip_len=8,
frame_sample_rate=2, crop_size=224, short_side_size=256,
new_height=256, new_width=340, keep_aspect_ratio=False,
num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,
use_multigrid=False):
self.anno_path = anno_path
self.data_path = data_path
self.mode = mode
self.clip_len = clip_len
self.frame_sample_rate = frame_sample_rate
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.use_multigrid = use_multigrid and (mode == 'train')
if VideoReader is None:
raise ImportError("Unable to import `decord` which is required to read videos.")
import pandas as pd
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')
self.dataset_samples = list(cleaned.values[:, 0])
self.label_array = list(cleaned.values[:, 2])
if (mode == 'train'):
if self.use_multigrid:
self.mg_helper = multiGridHelper()
self.data_transform = []
for alpha in range(self.mg_helper.mod_long):
tmp = []
for beta in range(self.mg_helper.mod_short):
info = self.mg_helper.get_resize(alpha, beta)
scale_s = info[1]
tmp.append(video_transforms.Compose([
video_transforms.Resize(int(self.short_side_size / scale_s),
interpolation='bilinear'),
# TODO: multiscale corner cropping
video_transforms.RandomResize(ratio=(1, 1.25),
interpolation='bilinear'),
video_transforms.RandomCrop(size=(int(self.crop_size / scale_s),
int(self.crop_size / scale_s)))]))
self.data_transform.append(tmp)
else:
self.data_transform = video_transforms.Compose([
video_transforms.Resize(int(self.short_side_size),
interpolation='bilinear'),
video_transforms.RandomResize(ratio=(1, 1.25),
interpolation='bilinear'),
video_transforms.RandomCrop(size=(int(self.crop_size),
int(self.crop_size)))])
self.data_transform_after = video_transforms.Compose([
video_transforms.RandomHorizontalFlip(),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif (mode == 'validation'):
self.data_transform = video_transforms.Compose([
video_transforms.Resize(self.short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = video_transforms.Compose([
video_transforms.Resize(size=(short_side_size), interpolation='bilinear')
])
self.data_transform = video_transforms.Compose([
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
sample_label = self.label_array[idx]
self.test_label_array.append(sample_label)
self.test_dataset.append(self.dataset_samples[idx])
self.test_seg.append((ck, cp))
def __getitem__(self, index):
if self.mode == 'train':
if self.use_multigrid is True:
index, alpha, beta = index
info = self.mg_helper.get_resize(alpha, beta)
scale_t = info[0]
data_transform_func = self.data_transform[alpha][beta]
else:
scale_t = 1
data_transform_func = self.data_transform
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during training".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)
buffer = data_transform_func(buffer)
buffer = self.data_transform_after(buffer)
return buffer, self.label_array[index], sample.split("/")[-1].split(".")[0]
elif self.mode == 'validation':
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during validation".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split("/")[-1].split(".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \
/ (self.test_num_segment - 1), 0)
temporal_start = int(chunk_nb * temporal_step)
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def loadvideo_decord(self, sample, sample_rate_scale=1):
"""Load video content using Decord"""
# pylint: disable=line-too-long, bare-except, unnecessary-comprehension
fname = self.data_path + sample
if not (os.path.exists(fname)):
return []
# avoid hanging issue
if os.path.getsize(fname) < 1 * 1024:
print('SKIP: ', fname, " - ", os.path.getsize(fname))
return []
try:
if self.keep_aspect_ratio:
vr = VideoReader(fname, num_threads=1, ctx=cpu(0))
else:
vr = VideoReader(fname, width=self.new_width, height=self.new_height,
num_threads=1, ctx=cpu(0))
except:
print("video cannot be loaded by decord: ", fname)
return []
if self.mode == 'test':
all_index = [x for x in range(0, len(vr), self.frame_sample_rate)]
while len(all_index) < self.clip_len:
all_index.append(all_index[-1])
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
# handle temporal segments
converted_len = int(self.clip_len * self.frame_sample_rate)
seg_len = len(vr) // self.num_segment
all_index = []
for i in range(self.num_segment):
if seg_len <= converted_len:
index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)
index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len))
index = np.clip(index, 0, seg_len - 1).astype(np.int64)
else:
end_idx = np.random.randint(converted_len, seg_len)
str_idx = end_idx - converted_len
index = np.linspace(str_idx, end_idx, num=self.clip_len)
index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)
index = index + i*seg_len
all_index.extend(list(index))
all_index = all_index[::int(sample_rate_scale)]
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
def build_dataloader(cfg):
"""Build dataloader for training/validation"""
train_dataset = VideoClsDataset(anno_path=cfg.CONFIG.DATA.TRAIN_ANNO_PATH,
data_path=cfg.CONFIG.DATA.TRAIN_DATA_PATH,
mode='train',
use_multigrid=cfg.CONFIG.TRAIN.MULTIGRID.USE_SHORT_CYCLE \
or cfg.CONFIG.TRAIN.MULTIGRID.USE_LONG_CYCLE,
clip_len=cfg.CONFIG.DATA.CLIP_LEN,
frame_sample_rate=cfg.CONFIG.DATA.FRAME_RATE,
num_segment=cfg.CONFIG.DATA.NUM_SEGMENT,
num_crop=cfg.CONFIG.DATA.NUM_CROP,
keep_aspect_ratio=cfg.CONFIG.DATA.KEEP_ASPECT_RATIO,
crop_size=cfg.CONFIG.DATA.CROP_SIZE,
short_side_size=cfg.CONFIG.DATA.SHORT_SIDE_SIZE,
new_height=cfg.CONFIG.DATA.NEW_HEIGHT,
new_width=cfg.CONFIG.DATA.NEW_WIDTH)
val_dataset = VideoClsDataset(anno_path=cfg.CONFIG.DATA.VAL_ANNO_PATH,
data_path=cfg.CONFIG.DATA.VAL_DATA_PATH,
mode='validation',
use_multigrid=False,
clip_len=cfg.CONFIG.DATA.CLIP_LEN,
frame_sample_rate=cfg.CONFIG.DATA.FRAME_RATE,
num_segment=cfg.CONFIG.DATA.NUM_SEGMENT,
num_crop=cfg.CONFIG.DATA.NUM_CROP,
keep_aspect_ratio=cfg.CONFIG.DATA.KEEP_ASPECT_RATIO,
crop_size=cfg.CONFIG.DATA.CROP_SIZE,
short_side_size=cfg.CONFIG.DATA.SHORT_SIDE_SIZE,
new_height=cfg.CONFIG.DATA.NEW_HEIGHT,
new_width=cfg.CONFIG.DATA.NEW_WIDTH)
if cfg.DDP_CONFIG.DISTRIBUTED:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
train_sampler = None
val_sampler = None
mg_sampler = None
if cfg.CONFIG.TRAIN.MULTIGRID.USE_LONG_CYCLE or cfg.CONFIG.TRAIN.MULTIGRID.USE_SHORT_CYCLE:
mg_sampler = MultiGridBatchSampler(train_sampler, batch_size=cfg.CONFIG.TRAIN.BATCH_SIZE,
drop_last=True,
use_long=cfg.CONFIG.TRAIN.MULTIGRID.USE_LONG_CYCLE,
use_short=cfg.CONFIG.TRAIN.MULTIGRID.USE_SHORT_CYCLE)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=False,
num_workers=9, pin_memory=True,
batch_sampler=mg_sampler)
else:
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=cfg.CONFIG.TRAIN.BATCH_SIZE, shuffle=(train_sampler is None),
num_workers=9, sampler=train_sampler, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=cfg.CONFIG.VAL.BATCH_SIZE, shuffle=(val_sampler is None),
num_workers=9, sampler=val_sampler, pin_memory=True)
return train_loader, val_loader, train_sampler, val_sampler, mg_sampler
def build_dataloader_test(cfg):
"""Build dataloader for testing"""
test_dataset = VideoClsDataset(anno_path=cfg.CONFIG.DATA.VAL_ANNO_PATH,
data_path=cfg.CONFIG.DATA.VAL_DATA_PATH,
mode='test',
clip_len=cfg.CONFIG.DATA.CLIP_LEN,
frame_sample_rate=cfg.CONFIG.DATA.FRAME_RATE,
test_num_segment=cfg.CONFIG.DATA.TEST_NUM_SEGMENT,
test_num_crop=cfg.CONFIG.DATA.TEST_NUM_CROP,
keep_aspect_ratio=cfg.CONFIG.DATA.KEEP_ASPECT_RATIO,
crop_size=cfg.CONFIG.DATA.CROP_SIZE,
short_side_size=cfg.CONFIG.DATA.SHORT_SIDE_SIZE,
new_height=cfg.CONFIG.DATA.NEW_HEIGHT,
new_width=cfg.CONFIG.DATA.NEW_WIDTH)
if cfg.DDP_CONFIG.DISTRIBUTED:
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
else:
test_sampler = None
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=cfg.CONFIG.VAL.BATCH_SIZE, shuffle=(test_sampler is None),
num_workers=9, sampler=test_sampler, pin_memory=True)
return test_loader
|
py | 1a45495b54e87b7c566a26391545293e1cfc0f6a | # -*- coding: utf-8 -*-
# Copyright 2020 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ("HTTPProvider", "Provider")
from .http_provider import HTTPProvider
from .provider import Provider
|
py | 1a4549c0e52f08224fbd6955932a0142903b3df4 | # Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Grab bag file for transaction."""
import logging
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from google.appengine.ext.ndb import tasklets
from google.appengine.runtime import apiproxy_errors
__all__ = [
'CommitError',
'transaction',
'transaction_async',
'transactional_async',
'transactional',
'transactional_tasklet',
]
class CommitError(Exception):
"""A transaction probably failed but it may or may not have occurred.
The caller may want to run a second transaction to verify if the previous one
succeeded.
"""
@ndb.tasklet
def transaction_async(callback, **ctx_options):
"""Converts all sorts of random exceptions into CommitError.
Arguments:
callback: function to run in the transaction. See
https://cloud.google.com/appengine/docs/python/ndb/functions for more
details.
Sets retries default value to 1 instead 3 (!)
"""
ctx_options.setdefault('retries', 1)
try:
result = yield ndb.transaction_async(callback, **ctx_options)
raise ndb.Return(result)
except (
datastore_errors.InternalError,
datastore_errors.Timeout,
datastore_errors.TransactionFailedError) as e:
# https://cloud.google.com/appengine/docs/python/datastore/transactions
# states the result is ambiguous, it could have succeeded.
logging.info('Transaction likely failed: %s', e)
raise CommitError(e)
except (
apiproxy_errors.CancelledError,
datastore_errors.BadRequestError,
RuntimeError) as e:
logging.info('Transaction failure: %s', e.__class__.__name__)
raise CommitError(e)
def transaction(callback, **ctx_options):
"""Synchronous version of transaction_async()."""
future = transaction_async(callback, **ctx_options)
return future.get_result()
@ndb.utils.decorator
def transactional_async(func, args, kwds, **ctx_options):
"""The async version of @txn.transactional."""
if args or kwds:
return transaction_async(lambda: func(*args, **kwds), **ctx_options)
return transaction_async(func, **ctx_options)
@ndb.utils.decorator
def transactional(func, args, kwds, **ctx_options):
"""Decorator that wraps a function with txn.transaction."""
return transactional_async.wrapped_decorator(
func, args, kwds, **ctx_options).get_result()
@ndb.utils.decorator
def transactional_tasklet(func, args, kwds, **options):
"""The tasklet version of @txn.transactional_async."""
func = tasklets.tasklet(func)
return transactional_async.wrapped_decorator(func, args, kwds, **options)
|
py | 1a454a6d021545eddda479530d73eb2611b98185 |
import os
import sys
import argparse
import csv
HR = "################################################################################"
################################################################################
# Print Debug
################################################################################
def printWarning(message):
print "Tensile::WARNING: %s" % message
sys.stdout.flush()
def printExit(message):
print "Tensile::FATAL: %s" % message
sys.stdout.flush()
sys.exit(-1)
try:
import yaml
except ImportError:
printExit("You must install PyYAML to use Tensile (to parse config files). See http://pyyaml.org/wiki/PyYAML for installation instructions.")
def ensurePath( path ):
if not os.path.exists(path):
os.makedirs(path)
return path
################################################################################
# Library Logic Container
################################################################################
class LibraryLogic:
def __init__(self,filename=None):
if filename is not None:
print ("# Reading Library Logic: " + filename)
try:
stream = open(filename, "r")
except IOError:
printExit("Cannot open file: %s" % filename )
data = yaml.load(stream, yaml.SafeLoader)
self.__set_versionString(data[0]["MinimumRequiredVersion"])
self.__set_scheduleName(data[1])
self.__set_architectureName(data[2])
self.__set_deviceNames(data[3])
self.__set_problemType(data[4])
self.__set_solutionStates(data[5])
self.__set_indexOrder(data[6])
self.__set_exactLogic(data[7])
self.__set_rangeLogic(data[8])
stream.close()
else:
self.__set_versionString(None)
self.__set_scheduleName(None)
self.__set_architectureName(None)
self.__set_deviceNames(None)
self.__set_problemType(None)
self.__set_solutionStates(None)
self.__set_indexOrder(None)
self.__set_exactLogic(None)
self.__set_rangeLogic(None)
#versionString
def __get_versionString(self):
return self.__versionString
def __set_versionString(self,value):
self.__versionString = value
versionString = property(__get_versionString,__set_versionString)
#scheduleName
def __get_scheduleName(self):
return self.__scheduleName
def __set_scheduleName(self, value):
self.__scheduleName = value
scheduleName = property(__get_scheduleName,__set_scheduleName)
#architectureName
def __get_architectureName(self):
return self.__architectureName
def __set_architectureName(self,value):
self.__architectureName = value
architectureName = property(__get_architectureName,__set_architectureName)
#deviceNames
def __get_deviceNames(self):
return self.__deviceNames
def __set_deviceNames(self,value):
self.__deviceNames = value
deviceNames = property(__get_deviceNames,__set_deviceNames)
#problemTypeState
def __get_problemType(self):
return self.__problemType
def __set_problemType(self,value):
self.__problemType = value
problemType = property(__get_problemType,__set_problemType)
#solutionStates
def __get_solutionStates(self):
return self.__solutionStates
def __set_solutionStates(self,value):
self.__solutionStates = value
solutionStates = property(__get_solutionStates,__set_solutionStates)
#indexOrder
def __get_indexOrder(self):
return self.__indexOrder
def __set_indexOrder(self,value):
self.__indexOrder = value
indexOrder = property(__get_indexOrder,__set_indexOrder)
#exactLogic
def __get_exactLogic(self):
return self.__exactLogic
def __set_exactLogic(self,value):
self.__exactLogic = value
exactLogic = property(__get_exactLogic,__set_exactLogic)
#rangeLogic
def __get_rangeLogic(self):
return self.__rangeLogic
def __set_rangeLogic(self,value):
self.__rangeLogic = value
rangeLogic = property(__get_rangeLogic,__set_rangeLogic)
def writeLibraryLogic(self,filename):
data = []
if self.versionString is not None:
data.append({"MinimumRequiredVersion":self.versionString})
if self.scheduleName is not None:
data.append(self.scheduleName)
if self.architectureName is not None:
data.append(self.architectureName)
if self.deviceNames is not None:
data.append(self.deviceNames)
if self.problemType is not None:
data.append(self.problemType)
if self.solutionStates is not None:
data.append(self.solutionStates)
if self.indexOrder is not None:
data.append(self.indexOrder)
if self.exactLogic is not None:
data.append(self.exactLogic)
if self.rangeLogic is not None:
data.append(self.rangeLogic)
if not data:
printExit("No data to output")
else:
try:
stream = open(filename, "w")
yaml.safe_dump(data, stream)
stream.close()
except IOError:
printExit("Cannot open file: %s" % filename)
def makeCSVFileName(filePath):
_, fullFileName = os.path.split(filePath)
fileName, _ = os.path.splitext(fullFileName)
outputFileName = fileName + "-sizes.csv"
return outputFileName
def makeAugmentedFileName(filePath, tagExtension):
_, fullFileName = os.path.split(filePath)
fileName, _ = os.path.splitext(fullFileName)
outputFileName = fileName + tagExtension
return outputFileName
def ExtractSizes(inputFilePath, outputFilePath):
libraryLogic = LibraryLogic(inputFilePath)
exactLogic = libraryLogic.exactLogic
exactSizes = [esize[0] for esize in exactLogic]
#with open(outputFilePath, "wb") as f:
# writer = csv.writer(f)
# writer.writerows(exactSizes)
return exactSizes
#def sizeToBenchArgs(size):
# m = size[0]
# n = size[1]
# k = size[2]
# l = size[3]
# alpha = 1
# beta = 0
# line = "./rocblas-bench -f gemm -r h --transposeA N --transposeB N -m %u -n %u -k %u --alpha %u --lda %u --ldb %u --beta %u --ldc %u \n" \
# % (m,n,l,alpha,m,k,beta,m)
# return line
def getMapping(label, mapper):
mapped = ""
if label in mapper:
mapped = mapper[label]
return mapped
def getRunParametersFromName(ligicSignature):
fields = ligicSignature.split('_')
nFields = len(fields)
matrixLabelA = fields[nFields-3]
matrixLabelB = fields[nFields-2]
typeLabel = fields[nFields-1]
transposeMapperA = {"Ailk":"N", "Alik":"T"}
transposeMapperB = {"Bjlk":"T", "Bljk":"N"}
functionMapper = {"HB":"gemm","SB":"gemm","DB":"gemm","HBH":"gemm_ex"}
typeNameMapper = {"HB":"h","SB":"s","DB":"d","HBH":"h"}
transposeA = getMapping(matrixLabelA, transposeMapperA)
transposeB = getMapping(matrixLabelB, transposeMapperB)
functionName = getMapping(typeLabel, functionMapper)
typeName = getMapping(typeLabel,typeNameMapper)
runParameters = [transposeA,transposeB,functionName,typeLabel,typeName]
return runParameters
def makeLine(runParams, size):
m = size[0]
n = size[1]
k = size[2]
l = size[3]
alpha = 1
beta = 0
transposeA = runParams[0]
transposeB = runParams[1]
functionName = runParams[2]
label = runParams[3]
typeName = runParams[4]
line = "./rocblas-bench -f %s -r %s --transposeA %s --transposeB %s" % (functionName,typeName,transposeA,transposeB)
line += " -m %u -n %u -k %u --alpha %u --lda %u --ldb %u --beta %u --ldc %u" % (m,n,l,alpha,m,k,beta,m)
if label == "HBH":
line += " --a_type h --b_type h --c_type h --d_type h --compute_type s"
line += " \n"
return line
def writeBenchmarkScript(scriptFilePath, exactSizes, runParams):
f = open(scriptFilePath, "wb")
f.writelines(["#!/bin/sh\n","\n","\n"])
lines = []
for size in exactSizes:
line = makeLine(runParams, size)
lines.append(line)
f.writelines(lines)
f.close()
def RunMergeTensileLogicFiles():
print ""
print HR
print "# Extract sizes"
print HR
print ""
##############################################################################
# Parse Command Line Arguments
##############################################################################
argParser = argparse.ArgumentParser()
argParser.add_argument("ExactLogicPath", help="Path to the exact LibraryLogic.yaml input files.")
argParser.add_argument("OutputPath", help="Where to write library files?")
#argParser.add_argument("-b", dest="BenchmarkScript", help="write benchmark test script")
argParser.add_argument("-b", dest="doBenchmarkScript", action="store_true", help="write benchmark test script")
args = argParser.parse_args()
exactLogicPath = args.ExactLogicPath
outputPath = args.OutputPath
#doBenchmarkScript = args.doBenchmarkScript
ensurePath(outputPath)
if not os.path.exists(exactLogicPath):
printExit("LogicPath %s doesn't exist" % exactLogicPath)
exactLogicFiles = [os.path.join(exactLogicPath, f) for f in os.listdir(exactLogicPath) \
if (os.path.isfile(os.path.join(exactLogicPath, f)) \
and os.path.splitext(f)[1]==".yaml")]
#print exactLogicFiles
for f in exactLogicFiles:
print "processing " + f
fullFilePath = os.path.join(exactLogicPath, f)
name = makeAugmentedFileName(fullFilePath,"")
runParameters = getRunParametersFromName(name)
#print runParameters
outputFileName = makeCSVFileName(fullFilePath)
outputFile = os.path.join(outputPath, outputFileName)
sizes = ExtractSizes(fullFilePath, outputFile)
#print sizes
benchmarkFileName = makeAugmentedFileName(fullFilePath, "-benchmark.sh")
benchmarkScriptName = os.path.join(outputPath, benchmarkFileName)
writeBenchmarkScript(benchmarkScriptName, sizes, runParameters)
################################################################################
# Main
################################################################################
if __name__ == "__main__":
RunMergeTensileLogicFiles()
|
py | 1a454b1987031e5ea2e44d9beb576b91b6d8cd44 | # -*- coding: utf-8 -*-
# vim: ft=python
"""
tests.unit.test_lfulib
"""
from __future__ import absolute_import
# Import 3rd party libs.
import pytest
# Import from local project.
from lfucache.exceptions import InvalidItemException
from lfucache.lfulib import LFUCache
# Import test scaffolding.
from tests.unit.fixtures.all import (
FREQUENCY,
NOT_FOUND,
)
# Mark everything here.
pytestmark = pytest.mark.unit
def test_get():
"""
Test - Use get to return the expected values.
"""
cache = LFUCache(2)
assert cache.get(1) == NOT_FOUND
cache.put(1, 1)
cache.put(2, 2)
# Increment the count for 1, moving 2 to least frequently used.
assert cache.get(1) == 1
cache.put(3, 3)
assert cache.get(2) == NOT_FOUND
assert cache.get(3) == 3
cache.put(4, 4)
assert cache.get(1) == NOT_FOUND
assert cache.get(3) == 3
assert cache.get(4) == 4
def test_put_failed():
"""
Test - Use put to check the expected functionality.
"""
cache = LFUCache(1)
with pytest.raises(InvalidItemException):
cache.put('invalid key', 1)
with pytest.raises(InvalidItemException):
cache.put(1, 'invalid value')
with pytest.raises(InvalidItemException):
cache.put(-1, -1)
def test_peek():
"""
Test - Check the content without incrementing the counter.
"""
cache = LFUCache(2)
cache.put(1, 1)
cache.put(2, 2)
_ = cache.get(1)
_ = cache.peek(1)
assert cache.peek(1) == (1, 2)
def test_get_frequency():
"""
Test - Check the frequency of .
"""
cache = LFUCache(3)
cache.put(1, 1)
cache.put(2, 2)
cache.put(3, 3)
_ = cache.get(1)
assert cache.get_frequency() == FREQUENCY
|
py | 1a454c0db3f73b26b057fc7f19b588770839dc55 | import tensorflow as tf
import numpy as np
import maddpg.common.tf_util as U
from tensorflow.python.ops import math_ops
from multiagent.multi_discrete import MultiDiscrete
from tensorflow.python.ops import nn
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def logp(self, x):
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
class CategoricalPdType(PdType):
def __init__(self, ncat):
self.ncat = ncat
def pdclass(self):
return CategoricalPd
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int32
class SoftCategoricalPdType(PdType):
def __init__(self, ncat):
self.ncat = ncat
def pdclass(self):
return SoftCategoricalPd
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return [self.ncat]
def sample_dtype(self):
return tf.float32
class MultiCategoricalPdType(PdType):
def __init__(self, low, high):
self.low = low
self.high = high
self.ncats = high - low + 1
def pdclass(self):
return MultiCategoricalPd
def pdfromflat(self, flat):
return MultiCategoricalPd(self.low, self.high, flat)
def param_shape(self):
return [sum(self.ncats)]
def sample_shape(self):
return [len(self.ncats)]
def sample_dtype(self):
return tf.int32
class SoftMultiCategoricalPdType(PdType):
def __init__(self, low, high):
self.low = low
self.high = high
self.ncats = high - low + 1
def pdclass(self):
return SoftMultiCategoricalPd
def pdfromflat(self, flat):
return SoftMultiCategoricalPd(self.low, self.high, flat)
def param_shape(self):
return [sum(self.ncats)]
def sample_shape(self):
return [sum(self.ncats)]
def sample_dtype(self):
return tf.float32
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class BernoulliPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return BernoulliPd
def param_shape(self):
return [self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.int32
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self, logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls, flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits, axis=1)
# def logp(self, x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
# def kl(self, other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)), axis=1)
class CategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return U.argmax(self.logits, axis=1)
def logp(self, x):
return -tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
def kl(self, other):
a0 = self.logits - U.max(self.logits, axis=1, keepdims=True)
a1 = other.logits - U.max(other.logits, axis=1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = U.sum(ea0, axis=1, keepdims=True)
z1 = U.sum(ea1, axis=1, keepdims=True)
p0 = ea0 / z0
return U.sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=1)
def entropy(self):
a0 = self.logits - U.max(self.logits, axis=1, keepdims=True)
ea0 = tf.exp(a0)
z0 = U.sum(ea0, axis=1, keepdims=True)
p0 = ea0 / z0
return U.sum(p0 * (tf.log(z0) - a0), axis=1)
def sample(self):
u = tf.random_uniform(tf.shape(self.logits))
return U.argmax(self.logits - tf.log(-tf.log(u)), axis=1)
def random_sample(self):
rand_logits = self.logits * (1+tf.random_normal(tf.shape(self.logits)))
u = tf.random_uniform(tf.shape(self.logits))
return U.argmax(rand_logits - tf.log(-tf.log(u)), axis=1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class SoftCategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return U.softmax(self.logits, axis=-1)
def logp(self, x):
return -tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
def kl(self, other):
a0 = self.logits - U.max(self.logits, axis=1, keepdims=True)
a1 = other.logits - U.max(other.logits, axis=1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = U.sum(ea0, axis=1, keepdims=True)
z1 = U.sum(ea1, axis=1, keepdims=True)
p0 = ea0 / z0
return U.sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=1)
def entropy(self):
a0 = self.logits - U.max(self.logits, axis=1, keepdims=True)
ea0 = tf.exp(a0)
z0 = U.sum(ea0, axis=1, keepdims=True)
p0 = ea0 / z0
return U.sum(p0 * (tf.log(z0) - a0), axis=1)
def sample(self):
u = tf.random_uniform(tf.shape(self.logits))
rand_logits = self.logits - tf.log(-tf.log(u))
# return rand_logits
return U.softmax(rand_logits, axis=-1)
def random_sample(self):
rand_logits = self.logits * (1+tf.random_normal(tf.shape(self.logits)))
u = tf.random_uniform(tf.shape(self.logits))
rand_logits = rand_logits - tf.log(-tf.log(u))
# return rand_logits
return U.softmax(rand_logits, axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class MultiCategoricalPd(Pd):
def __init__(self, low, high, flat):
self.flat = flat
self.low = tf.constant(low, dtype=tf.int32)
self.categoricals = list(map(CategoricalPd, tf.split(flat, high - low + 1, axis=len(flat.get_shape()) - 1)))
def flatparam(self):
return self.flat
def mode(self):
return self.low + tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32)
def logp(self, x):
return tf.add_n([p.logp(px) for p, px in zip(self.categoricals, tf.unstack(x - self.low, axis=len(x.get_shape()) - 1))])
def kl(self, other):
return tf.add_n([
p.kl(q) for p, q in zip(self.categoricals, other.categoricals)
])
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
return self.low + tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32)
def random_sample(self):
return self.low + tf.cast(tf.stack([p.random_sample() for p in self.categoricals], axis=-1), tf.int32)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class SoftMultiCategoricalPd(Pd): # doesn't work yet
def __init__(self, low, high, flat):
self.flat = flat
self.low = tf.constant(low, dtype=tf.float32)
self.categoricals = list(map(SoftCategoricalPd, tf.split(flat, high - low + 1, axis=len(flat.get_shape()) - 1)))
def flatparam(self):
return self.flat
def mode(self):
x = []
for i in range(len(self.categoricals)):
x.append(self.low[i] + self.categoricals[i].mode())
return tf.concat(x, axis=-1)
def logp(self, x):
return tf.add_n([p.logp(px) for p, px in zip(self.categoricals, tf.unstack(x - self.low, axis=len(x.get_shape()) - 1))])
def kl(self, other):
return tf.add_n([
p.kl(q) for p, q in zip(self.categoricals, other.categoricals)
])
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
x = []
for i in range(len(self.categoricals)):
x.append(self.low[i] + self.categoricals[i].sample())
return tf.concat(x, axis=-1)
def random_sample(self):
x = []
for i in range(len(self.categoricals)):
x.append(self.low[i] + self.categoricals[i].random_sample())
return tf.concat(x, axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def logp(self, x):
return - 0.5 * U.sum(tf.square((x - self.mean) / self.std), axis=1) \
- 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[1]) \
- U.sum(self.logstd, axis=1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return U.sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=1)
def entropy(self):
return U.sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), 1)
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
def random_sample(self):
rand_mean = self.mean * (1+tf.random_normal(tf.shape(self.mean)))
rand_std = self.std * (1+tf.random_normal(tf.shape(self.std)))
return rand_mean + rand_std * tf.random_normal(tf.shape(rand_mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class BernoulliPd(Pd):
def __init__(self, logits):
self.logits = logits
self.ps = tf.sigmoid(logits)
def flatparam(self):
return self.logits
def mode(self):
return tf.round(self.ps)
def logp(self, x):
return - U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.to_float(x)), axis=1)
def kl(self, other):
return U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=1) - U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=1)
def entropy(self):
return U.sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=1)
def sample(self):
p = tf.sigmoid(self.logits)
u = tf.random_uniform(tf.shape(p))
return tf.to_float(math_ops.less(u, p))
def random_sample(self):
rand_logits = self.logits * (1+tf.random_normal(tf.shape(self.logits)))
p = tf.sigmoid(rand_logits)
u = tf.random_uniform(tf.shape(p))
return tf.to_float(math_ops.less(u, p))
@classmethod
def fromflat(cls, flat):
return cls(flat)
def make_pdtype(ac_space):
from gym import spaces
if isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1
return DiagGaussianPdType(ac_space.shape[0])
elif isinstance(ac_space, spaces.Discrete):
# return CategoricalPdType(ac_space.n)
return SoftCategoricalPdType(ac_space.n)
elif isinstance(ac_space, MultiDiscrete):
#return MultiCategoricalPdType(ac_space.low, ac_space.high)
return SoftMultiCategoricalPdType(ac_space.low, ac_space.high)
elif isinstance(ac_space, spaces.MultiBinary):
return BernoulliPdType(ac_space.n)
else:
raise NotImplementedError
def shape_el(v, i):
maybe = v.get_shape()[i]
if maybe is not None:
return maybe
else:
return tf.shape(v)[i]
|
py | 1a454ce2dc4b699abd56b4a27df93d6a6b729c4c | """
Django settings for chatdj project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xq_0==fmnb6wj50ik#3skk7i-#!#ma8w311okks!a0dy9yun_t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'chat',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatdj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatdj.wsgi.application'
ASGI_APPLICATION = 'chatdj.routing.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
py | 1a454d136a7e688a537c2a59f29495a148d57c88 | import os
def readtxt():
with open(os.path.join(os.path.dirname(__file__),'log.txt'),'r') as f:
print f.readlines()
print type(f.readlines())
readtxt()
|
py | 1a454d29abcb558e53479b01a7c5d9ada9e2242a | # 单行注释
"""
多行注释
"""
if True:
print("True")
elif False:
print("elif")
else:
print("False")
total = ['item_one', 'item_two', 'item_three',
'item_four', 'item_five']
# 字符串的截取的语法格式如下:变量[头下标:尾下标:步长]
string = 'Runoob'
print(string) # 输出字符串
print(string[0:-1]) # 输出第一个到倒数第二个的所有字符
print(string[0]) # 输出字符串第一个字符
print(string[2:5]) # 输出从第三个开始到第五个的字符
print(string[2:]) # 输出从第三个开始的后的所有字符
print(string * 2) # 输出字符串两次
print(string + '你好') # 连接字符串
# 反斜杠可以用来转义,使用r可以让反斜杠不发生转义。。 如 r"this is a line with \n" 则\n会显示,并不是换行。
print(r"this is a line with \n")
# 内置的 type() 函数可以用来查询变量所指的对象类型。
a, b, c, d = 20, 5.5, True, 4 + 3j
print(type(a), type(b), type(c), type(d))
a1 = set('abracadabra')
print(a1)
dict1 = dict([('Runoob', 1), ('Google', 2), ('Taobao', 3)])
dict2 = {x: x ** 2 for x in (2, 4, 6)}
dict3 = dict(Runoob=1, Google=2, Taobao=3)
print(dict1)
print(dict2)
print(dict3)
a_string = 'Hello'
print('截取后的string:', a_string[1:4])
print("我叫%s,今年%d岁!" % ('小明', 10))
for i in range(5, 9):
print(i)
# 在字典中遍历时,关键字和对应的值可以使用 items() 方法同时解读出来
knights = {'gallahad': 'the pure', 'robin': 'the brave'}
for key, value in knights.items():
print(key, value)
# 在序列中遍历时,索引位置和对应值可以使用 enumerate() 函数同时得到:
for i, v in enumerate(['tic', 'tac', 'toe']):
print(i, v)
# 同时遍历两个或更多的序列,可以使用 zip() 组合:
questions = ['name', 'quest', 'favorite color']
answers = ['lancelot', 'the holy grail', 'blue']
for q, a in zip(questions, answers):
print('What is your {0}? It is {1}.'.format(q, a))
# 元组 不需要括号也可以
tup3 = "a", "b", "c", "d"
tup4 = tup3, (1, 2, 3, 4, 5)
print(tup3)
print('{0}'.format(tup4))
# 迭代器
list_iter = [1, 2, 3, 4]
it = iter(list_iter) # 创建迭代器对象
print(next(it)) # 输出迭代器的下一个元素
print(next(it))
for x in it:
print(x, end=" ")
print('\n分隔符')
# __iter__() 方法返回一个特殊的迭代器对象
# __next__() 方法会返回下一个迭代器对象
class MyNumbers:
def __iter__(self):
self.a = 6
return self
def __next__(self):
if self.a <= 10:
x = self.a
self.a += 1
return x
else:
raise StopIteration
instanceClass = MyNumbers()
instanceIter = iter(instanceClass)
# print(next(instanceIter))
for x in instanceIter:
print(x)
if __name__ == '__main__':
input("\n\n按下 enter 键后退出。")
|
py | 1a454d8f59ba8daf07322fbfb2553e0d44ead243 | # Copyright (c) 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
FC Drivers for EMC VNX and VMAX arrays based on SMI-S.
"""
from cinder import context
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_smis_common
LOG = logging.getLogger(__name__)
class EMCSMISFCDriver(driver.FibreChannelDriver):
"""EMC FC Drivers for VMAX and VNX using SMI-S.
Version history:
1.0.0 - Initial driver
1.1.0 - Multiple pools and thick/thin provisioning,
performance enhancement.
"""
VERSION = "1.1.0"
def __init__(self, *args, **kwargs):
super(EMCSMISFCDriver, self).__init__(*args, **kwargs)
self.common = emc_smis_common.EMCSMISCommon(
'FC',
configuration=self.configuration)
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
volpath = self.common.create_volume(volume)
model_update = {}
volume['provider_location'] = str(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
model_update = {}
volume['provider_location'] = str(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volpath = self.common.create_cloned_volume(volume, src_vref)
model_update = {}
volume['provider_location'] = str(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def delete_volume(self, volume):
"""Deletes an EMC volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
volpath = self.common.create_snapshot(snapshot, volume)
model_update = {}
snapshot['provider_location'] = str(volpath)
model_update['provider_location'] = snapshot['provider_location']
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
self.common.delete_snapshot(snapshot, volume)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
}
}
"""
device_info = self.common.initialize_connection(volume,
connector)
device_number = device_info['hostlunid']
storage_system = device_info['storagesystem']
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, connector)
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': device_number,
'target_discovered': True,
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug(_('Return FC data: %(data)s.')
% {'data': data})
return data
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
self.common.terminate_connection(volume, connector)
loc = volume['provider_location']
name = eval(loc)
storage_system = name['keybindings']['SystemName']
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, connector)
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug(_('Return FC data: %(data)s.')
% {'data': data})
return data
def _build_initiator_target_map(self, storage_system, connector):
"""Build the target_wwns and the initiator target map."""
target_wwns = self.common.get_target_wwns(storage_system, connector)
initiator_wwns = connector['wwpns']
init_targ_map = {}
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return target_wwns, init_targ_map
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
data = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCSMISFCDriver'
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
self._stats = data
|
py | 1a454e27f273ee5f58db7412e9d7d22b099cffa0 | #!/bin/env python
from setuptools import setup
setup(
name="interpol",
version="0.1",
description="A way to interpolate data yielded from iterators",
url="https://github.com/radium226/interpol",
license="GPL",
packages=["interpol"],
zip_safe=True,
install_requires=[
"scipy"
]
)
|
py | 1a45501a770cf7010fffda88435c6d7bf2cc79ba |
from i106 import C10
import pytest
class TestVideoF0:
@pytest.fixture
def p(self):
for packet in C10('tests/video.c10'):
if packet.data_type == 0x40:
return packet
raise Exception('No video packets found!')
@pytest.fixture
def msg(self, p):
for msg in p:
return msg
# Packet level tests
def test_iteration(self, p):
assert len(list(p)) == len(p)
def test_len(self, p):
assert len(p) == 83
@pytest.mark.parametrize('attr,expected', [
('type', 0),
('klv', 0),
('srs', 0),
('iph', 0),
('et', 0),
])
def test_packet_attrs(self, p, attr, expected):
assert getattr(p, attr) == expected
# setattr(p, attr, 1)
# assert getattr(msg, attr) == 1
# Message level tests
def test_bytes(self, msg):
assert bytes(msg)
|
py | 1a4550b576eecc60d1bc693d04815df9f0731a4d | from database.models import *
from database.queries import hasKVPair
from IPython import embed
import numpy as np
from database.imports import printD,ploc,datetime,timedelta,_tdb,FlushError,IntegrityError
#_tdb.tdbOff()
#FIXME TODO, make all these things use queries instead of generating you nub
#and failover to create if absent
#creation order
#order=(Person,Project,Experiment,SlicePrep,Repository,RepoPath,DOB,Mouse,Sire,Dam,MatingRecord,Litter) #TODO
#order=(t_people,t_dob)# you get the idea... maybe make a tree in a dict or something?
class TEST:
def __init__(self,session,num=None,autocommit=True,Thing=None):
self.Thing=Thing
self.num=num
self.session=session
self.records=[] #this is the output
self.setup()
self.make_all()
if autocommit:
self.commit()
self.test_delete()
if autocommit:
self.commit()
self.tests()
def make_date(self):
from datetime import date,timedelta
num=self.num
seed=date.today()
days=np.random.randint(365*15,365*100,num) #historical dates not supported in the test
deltas=[timedelta(days=int(d)) for d in days] #fortunately timedelta defaults to days so I dont have to read the doccumentation for map
return [seed - delta for delta in deltas]
def make_datetime(self,num=None,years=5):
from datetime import datetime,timedelta
if not num:
num=self.num
seed=datetime.now()
days=np.random.randint(0,365*years,num) #historical dates not supported in the test
hours=np.random.randint(0,12,num) #historical dates not supported in the test
deltas=[timedelta(days=int(d),hours=int(h)) for d,h in zip(days,hours)] #fortunately timedelta defaults to days so I dont have to read the doccumentation for map
return [seed - delta for delta in deltas]
def make_sex(self):
num=self.num
#sex_seed=np.random.choice(2,num,.52)
#sex_seed=np.ones(num)
sex_arr=np.array(list('m'*num))
sex_arr[:int(num/2)]='f'
return sex_arr
def make_NONE(self,*arrays): #FIXME very broken for strings
noneArr=[]
num_nones=int(self.num/5)+1
[noneArr.append(None) for i in range(num_nones)]
noneArr=np.array(noneArr)
for array in arrays:
#array=np.array(array)
array[:num_nones]=noneArr
printD([n for n in array])
np.random.shuffle(array)
#methods every class should have
def setup(self):
#self.Thing
query=self.session.query(Mouse)
if not query.count():
pass
def make_all(self):
pass
def tests(self):
"things to do once the things are made"
pass
def test_delete(self):
pass
def commit(self): #XXX now flush, but hey...
assert self.records, 'self.records is empty!!!!'
self.session.add_all(self.records)
self.session.flush()
###--------
### people
###--------
class t_people(TEST):
def make_name(self,names_per=1):
num=self.num
names=open('names.txt')
nlist=[]
while 1:
name=names.readline()
if name:
nlist.append(name[:-1])
else:
break
num=len(nlist)
#FIXME lol broekn!!!! though if I test w/ more than 5000 names..
all_names=[np.random.permutation(nlist)[:num] for i in range(names_per)]
return all_names
def make_role(self):
num=self.num
roles=['wanker','narf','pi','turd','subject','gradstudent','postdoc']
#for the recored putting subjects in with everyone else is TOTALLY a HIPA violation
rollseed=np.random.choice(len(roles),num)
out=[roles[i] for i in rollseed]
return out
def make_all(self):
num=self.num
pfns, fns, mns, lns = self.make_name(4)
#print(pfns[num],fns[num],mns[num],lns[num])
genders=self.make_sex()
#print(genders)
birthdates=self.make_date()
roles=self.make_role()
ntids=np.unique(np.int32(np.random.sample(num*2)*50000))[:num] #still broken
#ntids=np.random.randint(0,99999,num) #test for non unique
#ntids=list(ntids)
ntids=[int(n) for n in ntids]
#self.make_NONE(pfns,fns,mns,lns,genders,birthdates,roles,ntids) #BROKEN
self.records=[]
self.records+=[Person(PrefixName=pfns[i],
FirstName=fns[i],
MiddleName=mns[i],
LastName=lns[i],
neurotree_id=ntids[i],
Birthdate=birthdates[i]) for i in range(8,num)]
self.records+=[Person(FirstName=fns[i],
MiddleName=mns[i],
LastName=lns[i],
neurotree_id=ntids[i],
Birthdate=birthdates[i]) for i in range(1)]
self.records+=[Person(PrefixName=pfns[i],
MiddleName=mns[i],
LastName=lns[i],
neurotree_id=ntids[i],
Birthdate=birthdates[i]) for i in range(1,2)]
self.records+=[Person(PrefixName=pfns[i],
FirstName=fns[i],
LastName=lns[i],
neurotree_id=ntids[i],
Birthdate=birthdates[i]) for i in range(2,3)]
self.records+=[Person(PrefixName=pfns[i],
FirstName=fns[i],
MiddleName=mns[i],
neurotree_id=ntids[i],
Birthdate=birthdates[i]) for i in range(3,4)]
self.records+=[Person(PrefixName=pfns[i],
FirstName=fns[i],
MiddleName=mns[i],
LastName=lns[i],
neurotree_id=ntids[i],
Birthdate=birthdates[i]) for i in range(4,5)]
self.records+=[Person(PrefixName=pfns[i],
FirstName=fns[i],
MiddleName=mns[i],
LastName=lns[i],
neurotree_id=ntids[i],
Birthdate=birthdates[i]) for i in range(5,6)]
self.records+=[Person(PrefixName=pfns[i],
FirstName=fns[i],
MiddleName=mns[i],
LastName=lns[i],
Birthdate=birthdates[i]) for i in range(6,7)]
self.records+=[Person(PrefixName=pfns[i],
FirstName=fns[i],
MiddleName=mns[i],
LastName=lns[i],
neurotree_id=ntids[i]) for i in range(7,8)]
def query(self):
printD([p for p in self.session.query(Person)])
###------
### Mice
###------
'''
class t_dob(TEST):
def __init__(self,session,num=None,datetimes=None):
self.datetimes=datetimes
super().__init__(session,num)
def make_all(self):
if not self.datetimes:
dts=self.make_datetime(years=2)
self.records=[DOB(d) for d in dts]
else:
self.records=[DOB(d) for d in self.datetimes]
class t_breeders(TEST):
"""makes n pairs of breeders"""
def make_all(self):
mice=t_mice(self.session,4*self.num)
sires=self.session.query(Mouse).filter(Mouse.sex_id=='m')
dams=self.session.query(Mouse).filter(Mouse.sex_id=='f')
self.records=[Sire(sire) for sire in sires[:self.num]]+[Dam(dam) for dam in dams[:self.num]]
'''
class t_mating_record(TEST):
def setup(self):
#self.sires=[s for s in hasKVPair(self.session,Mouse,'sex','m')]
#self.dams=[d for d in hasKVPair(self.session,Mouse,'sex','f')]
self.sires=self.session.query(Mouse).filter_by(sex_id='m').all()
self.dams=self.session.query(Mouse).filter_by(sex_id='f').all()
strain=self.session.query(Strain)[0]
s=[Mouse(sex_id='m',strain_id=strain) for i in range(self.num-len(self.sires))]
d=[Mouse(sex_id='f',strain_id=strain) for i in range(self.num-len(self.dams))]
self.session.add_all(s+d)
self.session.flush()
self.sires.extend(s)
self.dams.extend(d)
def make_all(self):
from datetime import datetime,timedelta
sire_arr=np.random.choice(len(self.sires),self.num)
dam_arr=np.random.choice(len(self.dams),self.num)
mins=np.random.randint(-60,60,self.num)
now=datetime.now()
type_=self.session.query(ExperimentType).filter_by(name='Mating Record')[0]
self.records=[Experiment(project_id=1,person_id=1,type_id=type_,Subjects=[self.sires[sire_arr[i]],self.dams[dam_arr[i]]],startDateTime=now+timedelta(hours=i),endDateTime=now+timedelta(hours=int(i)+12,minutes=int(mins[i]))) for i in range(self.num)]
class t_litters(TEST):
def make_all(self): #FIXME also need to test making without a MR
from datetime import timedelta
mrs=t_mating_record(self.session,self.num)
def getBD(exp,days=19):
durd2=(exp.endDateTime-exp.startDateTime)/2
conception=exp.startDateTime+durd2
return conception+timedelta(days)
self.records=[Litter(repro_experiment_id=mr,startDateTime=getBD(mr)) for mr in mrs.records]
def add_members(self):
mice=[] #FIXME there has to be a better way
#litter_sizes=np.random.randint(6,20,self.num) #randomize litter size
litter_sizes=np.int32(np.ones(self.num)*20)
#compare the two following methods: Second one seems faster, need to verify
#ms=[self.records[i].make_members(litter_sizes[i]) for i in range(self.num)]
#[mice.extend(m) for m in ms]
#self.session.add_all(mice)
strain=self.session.query(Strain)[0] #FIXME
for lit,i in zip(self.records,range(self.num)):
lit.children.extend([Mouse(repro_experiment_id=lit.repro_experiment_id,sex_id='u',strain_id=strain,startDateTime=lit.startDateTime) for i in range(litter_sizes[i])])
#VS
#[self.session.add_all(self.records[i].make_members(litter_sizes[i])) for i in range(self.num)]
self.session.commit()
class t_strain(TEST):
def make_all(self):
self.records=[Strain() for i in range(self.num)] #TODO itertools starmap
printD(self.records)
class t_mice(TEST):
def make_all(self):
#dobs=t_dob(self.session,self.num)
tags=np.random.randint(0,1000,self.num)
sexes=self.make_sex()
strain=self.session.query(Strain)[0]
dts=self.make_datetime(years=2)
self.records=[Mouse(Properties={'eartag':int(tags[i])},sex_id=sexes[i],strain_id=strain,startDateTime=dts[i]) for i in range(self.num)]
###--------------------
### subjects continued
###--------------------
class t_slice(TEST):
def make_all(self):
#preps=self.session.query(Experiment).filter(Experiment.type==self.session.query(ExperimentType).filter_by(name='acute slice prep'))[0]
preps=self.session.query(Experiment).join((ExperimentType,Experiment.type)).filter_by(name='acute slice prep').all()
self.records=[]
[[self.records.append(Slice(parent_id=prep.subjects[0],generating_experiment_id=prep,startDateTime=datetime.now()+timedelta(hours=i))) for i in range(self.num)] for prep in preps] #FIXME amplification of numbers
printD(self.records)
class t_cell(TEST):
def make_all(self):
slices=[s for s in self.session.query(Slice) if s.parent_id is not None]
assert slices, 'slices should not be empty here'
#printD([s.parent_id for s in slices])
#patches=[p for p in self.session.query(Experiment).filter_by(type='acute slice prep')]
#patches=[p for p in self.session.query(Experiment).filter(Experiment.type==self.session.query(ExperimentType).filter_by(name='acute slice prep')[0])] #FIXME clearly this expeirment type is wrong and I havent been catching it FIXME FIXME
patches=self.session.query(Experiment).join((ExperimentType,Experiment.type)).filter_by(name='in vitro patch').all()
assert patches, 'patches should not be empty here'
headstages=[h for h in self.session.query(Hardware).filter_by(type_id='headstage')][:2]
self.records=[]
z=0
for p in patches:
for i in range(z,len(slices)): #120 #FIXME pretty sure RI is broken here
s=slices[i]
for j in range(self.num):
self.records.extend([Cell(Hardware=[h],parent_id=s,Experiments=[p],generating_experiment_id=p) for h in headstages])
try:
if slices[i+1].parent_id != s.parent_id: #FIXME this should catch automatically when using session.add
z=i+1 #FIXME constraint!!!!
break
except IndexError: pass
#printD([c.experiments for c in self.records])
class t_c2c(TEST):
def make_all(self):
cells=self.session.query(Cell)
#circular link
self.records=[]
self.records.extend([CellPairs(cells[i-2],cells[i]) for i in range(cells.count())]) #this adds tripplets since a single row here is treated as simultaneous, INCIDENTALLY FIXME this is a problem because it means that a=b=c IS NOT TRUE on this set a=b b=c a!=c fuck
#HOWEVER: this is less of an integrity concern than having to make two entries for each pair, for higher numbers of recordin I should probably do this as cell1 cell2 cell3 cell4 to prevent stuipd combinatorial nightmares
#pairs
self.records.extend([CellPairs(cells[i],cells[i+1]) for i in range(0,cells.count()-1,2)])
#self.records.extend([CellPairs(cells[i+1],cells[i]) for i in range(0,cells.count()-1,2)])
###-------------
### experiments
###-------------
class t_project(TEST):
def make_all(self):
iacuc_protocol_id=None
blurb=None
self.records=[Project(lab='Scanziani',iacuc_protocol_id=iacuc_protocol_id,blurb=blurb) for n in range(self.num)]
count=0
def add_people(self): #has to be called after commit :/
people=t_people(self.session,100)
#HRM only queries can leverage the power of .filter
pis=[pi for pi in self.session.query(Person)]
pi_n=np.random.choice(len(pis),self.num)
#people=[p for p in self.session.query(Person)]
people_n=[np.random.permutation(people.records)[:np.random.randint(1,20)] for i in range(self.num)] #+pis[pi_n[i]]
assocs=[]
count=0
for rec,people in zip(self.records,people_n):
#assocs.append(person_to_project(rec,pis[pi_n[count]]))
assocs+=[person_to_project(rec,person) for person in people]
#[rec.people.append(person) for person in people] #FIXME somehow this no workey
count+=1
self.session.add_all(assocs)
self.session.commit()
class t_exptype(TEST):
def make_all(self):
self.records=[
ExperimentType(name='Mating Record',base_step_id=1),
ExperimentType(name='acute slice prep',abbrev='prep',base_step_id=2),
ExperimentType(name='in vitro patch',abbrev='patch',base_step_id=3),
]
class t_experiment(TEST):
def __init__(self,session,num=None,num_projects=None):
self.num_projects=num_projects
super().__init__(session,num)
def make_all(self):
#from time import sleep
projects=t_project(self.session,self.num_projects)
projects.add_people()
#projects.commit() #FIXME do I need to readd? or can I just commit directly?
lits=t_litters(self.session,1)
lits.add_members()
#lits.commit()
mice=[m for m in self.session.query(Mouse).filter(Mouse.breedingRec==None,Mouse.dod==None)] #FIXME
#mice=[m for m in self.session.query(Mouse).filter(Mouse.dod==None)]
self.records=[]
for p in projects.records:
#printD(p) #FIXME apparently p.__dict__ is not populated until AFTER you call the object...
#printD([t for t in p.__dict__.items()]) #FIXME what the fuck, sometimes this catches nothing!?
ms=[mice[i] for i in np.random.choice(len(mice),self.num)] #FIXME missing mouse
#TODO need to test with bad inputs
exps=[p.people[i] for i in np.random.choice(len(p.people),self.num)]
datetimes=self.make_datetime()
exptype=self.sessison.query(ExperimentType).filter_by(name='in vitro patch')[0]
self.records+=[Experiment(project_id=p,Person=exps[i],startDateTime=datetimes[i],type_id=exptype) for i in range(self.num)] #FIXME lol this is going to reaveal experiments on mice that aren't even born yet hehe
class t_patch(TEST):
def make_all(self):
#mice=[m for m in self.session.query(Mouse).filter(Mouse.dod==None)]
preps=[p for p in self.session.query(Experiment).filter(Experiment.type==self.session.query(ExperimentType).filter_by(name='acute slice prep')[0])]
project=self.session.query(Project)[0]
person=self.session.query(Person)[0]
#acsf=self.session.query(Reagent).filter_by(type_id=2)[0] #FIXME these are terrible useage patterns
#internal=self.session.query(Reagent).filter_by(type_id=3)[0] #FIXME these are terrible useage patterns
#acsf=None
#internal=None
#self.session.add_all([acsf,internal])
#self.session.flush() #shit not working FIXME
self.session.commit()
exptype=self.session.query(ExperimentType).filter_by(abbrev='patch')[0]
self.records=[]
datetimes=self.make_datetime()
[self.records.extend([Experiment(type_id=exptype,project_id=project,person_id=person,Reagents=[],startDateTime=datetimes[i]) for i in range(self.num)]) for p in preps] #FIXME classic mouse not born yet problem
class t_sliceprep(TEST):
def make_all(self):
project=self.session.query(Project)[0]
person=self.session.query(Person)[0]
#sucrose=self.session.query(Reagent).filter_by(type_id=1)[0]
exptype=self.session.query(ExperimentType).filter_by(abbrev='prep')[0]
self.records=[Experiment(type_id=exptype,project_id=project,person_id=person,Reagents=[],startDateTime=datetime.now()-timedelta(int(np.random.randint(1)))) for i in range(self.num)] #FIXME need to find a way to propagate mouse w/ RI
def add_mice(self):
mice=self.session.query(Mouse).filter_by(sex_id='u')[:self.num]
#mice=[s for s in hasKVPair(self.session,Mouse,'sex','u')]
printD(len(mice))
printD(len(self.records))
np.random.shuffle(mice)
for i in range(self.num):
#mice[i].experiment_id=self.records[i].id
self.records[i].subjects.append(mice[i])
self.session.commit()
class t_patch(TEST):
def make_all(self):
project=self.session.query(Project)[0]
person=self.session.query(Person)[0]
#sucrose=self.session.query(Reagent).filter_by(type_id=1)[0]
exptype=self.session.query(ExperimentType).filter_by(abbrev='patch')[0]
self.records=[Experiment(type_id=exptype,project_id=project,person_id=person,Reagents=[],startDateTime=datetime.now()-timedelta(int(np.random.randint(1)))) for i in range(self.num)] #FIXME need to find a way to propagate mouse w/ RI
#def add_mice(self): #add slices?
#mice=self.session.query(Mouse).filter_by(sex_id='u')[:self.num]
##mice=[s for s in hasKVPair(self.session,Mouse,'sex','u')]
#printD(len(mice))
#printD(len(self.records))
#np.random.shuffle(mice)
#for i in range(self.num):
#mice[i].experiment_id=self.records[i].id
#self.records[i].subjects.append(mice[i])
#self.session.commit()
###------
### data
###------
class t_repo(TEST):
def make_all(self):
self.records=[]
repos=(
'file:///C:/asdf/test1',
'file:///C:/asdf/test2//',
'file:///T:/db/Dropbox//',
'http://www.google.com/', #FIXME broken as expected?
'https://www.google.com/' #FIXME broken as expected?
)
for r in repos:
try:
self.records.append(Repository(url=r))
except:
#raise Warning('Local path \'%s\' does not exist!'%r)
print('Path \'%s\' does not exist!'%r)
#FIXME for some reason adding the fully inited Repository(url='asdf') inside the list didn't work...
#figure out why please?!
def tests(self):
self.commit()
repos=self.session.query(Repository).all()
for repo in repos:
[repo.mirrors_from_here.append(r) for r in repos if r!=repo]
print(repo.mirrors)
self.commit()
[print(r.mirrors) for r in repos]
class t_datafilesource(TEST):
def make_all(self):
self.records=[
DataFileSource(name='test',extension='data',docstring='wooo!'),
]
class t_metadatasource(TEST):
def make_all(self):
hw=self.session.query(Hardware).filter_by(name='the void')[0]
self.records=[
MetaDataSource(name='the void',prefix='T',unit='Pa',hardware_id=hw,docstring='yes I am nothing'),
]
class t_datafile(TEST):
#def __init__(self,session,num=None,num_experiments=None,num_projects=None):
#self.num_projects=num_projects
#self.num_experiments=num_experiments
#super().__init__(session,num)
def make_all(self):
repo=t_repo(self.session)
dfs=self.session.query(DataFileSource).filter_by(name='test')[0]
data=[]
#cells=self.session.query(Cell)
#for c1,c2 in zip(cells[:-1],cells[1:]):
subjects=self.session.query(Cell).filter(Cell.experiments.any()).all()
cells=self.session.query(Cell).all()
#printD(cells)
#printD([(subject,subject.experiments) for subject in subjects])
for subject in subjects:
#printD(subject.experiments)
for url in repo.records:
bn='exp%s_subs_%s_'%(subject.experiments[0].id,subject.id)
name=bn+'%s.data'
try:
data+=[DataFile(name%df,url,dfs,subject.experiments[0],
Subjects=[subject]) for df in range(self.num)] #FIXME this use pattern is clearly broken
except FileNotFoundError:
printD('some file was not found')
pass
#data+=[DataFile(Repo=rp,filename='exp%s_cells_%s_%s_%s.data'%(c1.experiments[0].id,c1.id,c2.id,df),Experiment=c1.experiments[0],DataSource=ds,Subjects=[c1,c2]) for df in range(self.num)]
self.records=data
class t_dfmetadata(TEST):
def make_all(self):
ds=self.session.query(MetaDataSource)[0]
self.records=[]
[self.records.extend([d.MetaData(i,parent_id=d,metadatasource_id=ds) for i in range(self.num)]) for d in self.session.query(DataFile)]
###-----------
### inventory
###-----------
class t_hardware(TEST):
def setup(self):
self.amps=[Hardware(type_id='amplifier',name='lolwut',Properties={'unique_id':'0012312'}),Hardware(type_id='amplifier',name='rudubme',Properties={'unique_id':'bob'})]
self.session.add_all(self.amps)
self.session.flush()
def make_all(self):
self.records=[]
[[self.records.append(Hardware(type_id='headstage',name='wut%s%s'%(amp.id,i),Properties={'unique_id':'%s%s'%(amp.id,i)}, parent_id=amp)) for i in range(2)] for amp in self.amps]
self.records.append(Hardware(type_id='digitizer',name='the void'))
#printD(self.records) #FIXME this whole make all is broken
class t_hwmetadata(TEST):
def make_all(self):
ds=self.session.query(MetaDataSource)[0] #TODO make sure this breaks, FIXME it breaks but not where expected...
self.records=[]
[self.records.extend([h.MetaData(i,h,ds) for i in range(self.num)]) for h in self.session.query(Hardware)]
class t_reagenttype(TEST):
def make_all(self):
self.records=[
ReagentType(name='poop'),
ReagentType(name='poop1'),
ReagentType(name='poop2')
]
class t_reagent(TEST):
def make_all(self):
rts=self.session.query(ReagentType)
self.records=[Reagent(Type=r) for r in rts]
###-------
### steps
###-------
class t_steps(TEST):
def make_all(self):
self.records.extend([Step(name='a%s'%i,dataio_id=1,docstring='') for i in range(self.num)])
class t_edges(TEST):
def make_all(self):
steps=self.session.query(Step).order_by(Step.id).all()
a=steps[0].id
b=a+1
c=b+1
def basic_tests():
failed=False
#cycle 1->1
try:
a1=StepEdge(a,a)
self.session.add(a1)
self.session.flush()
failed=True
except:
pass
self.session.query(StepEdge).all()
assert not failed, 'a==a check FAILED'
#basic add
a2=StepEdge(a,b) #OK
self.session.add(a2)
self.session.flush()
assert a2, 'basic test FAILED'
#cycle 1->2->1
try:
a3=StepEdge(b,a)
self.session.add(a3) #FIXME a3 still in records after delete!
self.session.flush()
failed=True
except:
pass
#printD(a3.__repr__())
assert not failed, 'circular 1-2-1 check FAILED'
#basic add 2 to add one more node to the cycle
a4=StepEdge(b,c) #OK
self.session.add(a4)
self.session.flush()
assert a4, 'basic test #2 FAILED'
#cycle from 1->2->3->1
try:
a5=StepEdge(c,a)
self.session.add(a5)
self.session.flush()
failed=True
except:
pass
assert not failed, 'circular 1-2-3-1 check FAILED'
def adv_tests():
se1=set(self.session.query(StepEdge).all())
assert se1 == se1 , 'A SET IS NOT EQUAL TO ITSELF RUNNNNNN!!!'
try:
[step.dependencies.update([steps[int(i)] for i in np.random.randint(0,100,20)]) for step in steps]
printD(self.session.new)
#[step.dependencies.update((steps[int(i)] for i in np.random.randint(0,100,20))) for step in steps]
except (ValueError, FlushError) as e:
if type(e) is FlushError:
printD('Rolling back!')
self.session.rollback()
printD(e)
printD(self.session.new)
self.session.flush()
self.session.expire_all()
se2=set(self.session.query(StepEdge).all())
assert se2 != se1, 'set used for update probably contained a duplicate'
try:
for i in range(20):
for step in steps:
step.dependencies.add(steps[int(np.random.randint(100))])
printD(self.session.new)
except (ValueError, FlushError) as e:
if type(e) is FlushError:
printD('Rolling back!')
self.session.rollback()
printD(e)
printD(self.session.new)
self.session.flush()
se3=set(self.session.query(StepEdge).all())
assert se3 != se2
for i in range(100): #FIXME somehow all this stuff really does not work well with the versioning
a,b=(steps[int(i)] for i in np.random.randint(0,len(steps),2))
try:
self.session.add(StepEdge(a,b))
self.session.flush()
except (ValueError, FlushError) as e:
printD(e)
#self.session.rollback() #FIXME <<< this is what causes all the good edges to get zapped
se4=set(self.session.query(StepEdge).all())
assert se4 != se3
printD('Num StepEdges',len(se4)) #FIXME this is patently wrong
def custom():
start=a
for i in range(0,10):
self.session.add(StepEdge(start+i,start+i+1))
self.session.flush()
for i in range(2,10):
self.session.add(StepEdge(start+i,start+i+12))
self.session.flush()
for i in range(4,10):
self.session.add(StepEdge(start+i,start+i+25))
self.session.flush()
self.session.add(StepEdge(start,14))
self.session.flush()
self.session.add(StepEdge(start+7,start+9))
self.session.flush()
#basic_tests()
custom()
#adv_tests()
def commit(self):
self.session.commit()
#todo test a double cycle and a split tree
def test_delete(self):
printD('running delete tests!')
#edges=self.session.query(StepEdge).all()
#[self.session.delete(edge) for edge in edges]
pass
def run_tests(session):
#FIXME for some reason running these sequentially causes all sorts of problems...
#RESPONSE turns out it is because I'm trying to make EXACTLY the same tables again and an identical mapped instance already exists
#so it doesnt happen with people, but a collision *could* happen
#FIXME the real test will be to vary the number of projects, experiments and datafiles
#compare these two cases with profiler
#d=t_datafile(session,5000,2,4) #add 1000 datafiles to 3 projects each with 10 experiments takes about 16 seconds, I'd say we're ok here
#d=t_datafile(session,20,500,4) #add 1000 datafiles to 3 projects each with 10 experiments takes about 16 seconds, I'd say we're ok here
#d=t_datafile(session,10,50,4)
#[print(df.creation_DateTime) for df in session.query(DataFile)]
if session.connection().engine.url.database != 'db_test':
return None
t_strain(session,2)
t_steps(session,3)
#t_edges(session)
expt=t_exptype(session)
hw=t_hardware(session)
ds=t_datafilesource(session)
mds=t_metadatasource(session)
#h=t_hardware(session)
hwmd=t_hwmetadata(session,5)
#t_experiment(session,1,4) #FIXME argh, so many things can become inconsistent...
t_people(session,20)
t_project(session,1)
t_mice(session,20)
l=t_litters(session,20)
l.add_members()
rt=t_reagenttype(session)
#i=t_reagent(session)
sp=t_sliceprep(session,5)
sp.add_mice()
p=t_patch(session,1) #FIXME you know it might be good to force a new exp rec when any of the variables changes... like the internal...? think think
s=t_slice(session,4)
pa=t_patch(session,2)
c=t_cell(session,5)
#c2c=t_c2c(session) #no longer used
d=t_datafile(session,1)#,2,1) #FIXME eating memory
dfmd=t_dfmetadata(session,1) #as in 8 gigs of memory...
session.commit()
#l=t_litters(session,20) #FIXME another wierd error here... saying that I tried to add a mouse as a breeder twice... hrm...
#l.add_members()
#printD([m for m in session.query(Mouse)]) #FIXME mice arent getting made?
def main():
pass
if __name__=='__main__':
main()
|
py | 1a4550d2ac9ef51dac6aa96bde0f85485d69d968 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import base64
from zoo.serving.client import InputQueue, OutputQueue, http_json_to_ndarray
import os
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
class TestSerialization:
def test_encode(self):
input_api = InputQueue()
b64 = input_api.data_to_b64(t1=np.array([1, 2]), t2=np.array([3, 4]))
byte = base64.b64decode(b64)
def test_http_response_to_ndarray(self):
with open(os.path.join(resource_path, "serving/http_response")) as f:
data = f.read()
arr = http_json_to_ndarray(data)
assert isinstance(arr, np.ndarray)
assert len(arr.shape) == 1
assert arr.shape[0] == 128
|
py | 1a45516b774562110f07e43f6b3df3eba1c0902b | #!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import AutoSlugField
from v1.recipe.models import Recipe
class GroceryList(models.Model):
"""
The GroceryList is the core of list app.
It offers a home to many GroceryItems.
title = The name of the GroceryList.
slug = The HTML safe name of the GroceryList.
author = The User who created the GroceryList.
pub_date = The date that the GroceryList was created on.
"""
title = models.CharField(_("grocery list title"), max_length=250)
slug = AutoSlugField(_('slug'), populate_from='title')
author = models.ForeignKey(User, verbose_name=_('user'))
pub_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['pub_date']
def __unicode__(self):
return '%s' % self.title
def item_count(self):
"""get the number of items in the list"""
return GroceryItem.objects.filter(list=self).count()
class GroceryItem(models.Model):
"""
The GroceryItem is an item on a GroceryList.
list = The GroceryList that owns the GroceryItem.
title = The name of the GroceryItem.
completed = Whether or not the GroceryItem has been purchased or
added to the users shopping cart in the supermarket.
"""
list = models.ForeignKey(GroceryList, verbose_name=_('grocery_list'), related_name='items')
title = models.CharField(_("title"), max_length=550)
completed = models.BooleanField(_("completed"), default=False)
class Meta:
ordering = ['pk']
def __unicode__(self):
return '%s' % self.title
class GroceryShared(models.Model):
"""
Determines whether or not a GroceryList is shared to another user.
Shared lists allow other uses to add/delete/edit the GroceryList.
list = The GroceryList to be shared.
shared_by = The User that shared the List.
shared_to = The User that is given access to a GroceryList.
"""
list = models.ForeignKey(GroceryList, verbose_name=_('grocery list'))
shared_by = models.ForeignKey(User, verbose_name=_('shared by'), related_name="shared_by")
shared_to = models.ForeignKey(User, verbose_name=_('shared to'), related_name="shared_to")
def __unicode__(self):
return '%s' % self.list.title
class GroceryRecipe(models.Model):
"""
This model links a GroceryList to a Recipe.
list = The GroceryList has holds the Recipe.
recipe = The Recipe that is on a GroceryList.
"""
list = models.ForeignKey(GroceryList, verbose_name=_('grocery list'))
recipe = models.ForeignKey(Recipe, verbose_name=_('recipe'))
def __unicode__(self):
return '%s' % self.recipe.title
|
py | 1a4551a515a05cc080b67f3ade7322e85ee2d23b | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Coroutine, Dict, Generic, Optional, Tuple, Type, TypeVar
from ..interactions import Interaction
__all__ = ("Item",)
if TYPE_CHECKING:
from ..components import Component
from ..enums import ComponentType
from .view import View
ItemT = TypeVar("ItemT", bound="Item")
ViewT = TypeVar("ViewT", bound="View", covariant=True)
ItemCallbackType = Callable[[Any, ItemT, Interaction], Coroutine[Any, Any, Any]]
class Item(Generic[ViewT]):
"""Represents the base UI item that all UI components inherit from.
The current UI items supported are:
- :class:`discord.ui.Button`
- :class:`discord.ui.Select`
.. versionadded:: 2.0
"""
__item_repr_attributes__: Tuple[str, ...] = ("row",)
def __init__(self):
self._view: Optional[ViewT] = None
self._row: Optional[int] = None
self._rendered_row: Optional[int] = None
# This works mostly well but there is a gotcha with
# the interaction with from_component, since that technically provides
# a custom_id most dispatchable items would get this set to True even though
# it might not be provided by the library user. However, this edge case doesn't
# actually affect the intended purpose of this check because from_component is
# only called upon edit and we're mainly interested during initial creation time.
self._provided_custom_id: bool = False
def to_component_dict(self) -> Dict[str, Any]:
raise NotImplementedError
def refresh_component(self, component: Component) -> None:
return None
def refresh_state(self, interaction: Interaction) -> None:
return None
@classmethod
def from_component(cls: Type[ItemT], component: Component) -> ItemT:
return cls()
@property
def type(self) -> ComponentType:
raise NotImplementedError
def is_dispatchable(self) -> bool:
return False
def is_persistent(self) -> bool:
return self._provided_custom_id
def __repr__(self) -> str:
attrs = " ".join(f"{key}={getattr(self, key)!r}" for key in self.__item_repr_attributes__)
return f"<{self.__class__.__name__} {attrs}>"
@property
def row(self) -> Optional[int]:
return self._row
@row.setter
def row(self, value: Optional[int]):
if value is None:
self._row = None
elif 5 > value >= 0:
self._row = value
else:
raise ValueError("row cannot be negative or greater than or equal to 5")
@property
def width(self) -> int:
return 1
@property
def view(self) -> Optional[ViewT]:
"""Optional[:class:`View`]: The underlying view for this item."""
return self._view
async def callback(self, interaction: Interaction):
"""|coro|
The callback associated with this UI item.
This can be overriden by subclasses.
Parameters
-----------
interaction: :class:`.Interaction`
The interaction that triggered this UI item.
"""
pass
|
py | 1a4551b6cacb7a52cddd7598268ba58bd93459cb | # Copyright (c) 2014-2016, ConfigSpace developers
# Matthias Feurer
# Katharina Eggensperger
# and others (see commit history).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from io import StringIO
import os
import tempfile
import unittest
from ConfigSpace.configuration_space import ConfigurationSpace
import ConfigSpace.read_and_write.pcs as pcs
import ConfigSpace.read_and_write.pcs_new as pcs_new
from ConfigSpace.hyperparameters import CategoricalHyperparameter, \
UniformIntegerHyperparameter, UniformFloatHyperparameter, OrdinalHyperparameter
from ConfigSpace.conditions import EqualsCondition, InCondition, \
AndConjunction, OrConjunction, NotEqualsCondition, \
GreaterThanCondition
from ConfigSpace.forbidden import ForbiddenInClause, ForbiddenAndConjunction
# More complex search space
classifier = CategoricalHyperparameter("classifier", ["svm", "nn"])
kernel = CategoricalHyperparameter("kernel", ["rbf", "poly", "sigmoid"])
kernel_condition = EqualsCondition(kernel, classifier, "svm")
C = UniformFloatHyperparameter("C", 0.03125, 32768, log=True)
C_condition = EqualsCondition(C, classifier, "svm")
gamma = UniformFloatHyperparameter("gamma", 0.000030518, 8, log=True)
gamma_condition = EqualsCondition(gamma, kernel, "rbf")
degree = UniformIntegerHyperparameter("degree", 1, 5)
degree_condition = InCondition(degree, kernel, ["poly", "sigmoid"])
neurons = UniformIntegerHyperparameter("neurons", 16, 1024)
neurons_condition = EqualsCondition(neurons, classifier, "nn")
lr = UniformFloatHyperparameter("lr", 0.0001, 1.0)
lr_condition = EqualsCondition(lr, classifier, "nn")
preprocessing = CategoricalHyperparameter("preprocessing", ["None", "pca"])
conditional_space = ConfigurationSpace()
conditional_space.add_hyperparameter(preprocessing)
conditional_space.add_hyperparameter(classifier)
conditional_space.add_hyperparameter(kernel)
conditional_space.add_hyperparameter(C)
conditional_space.add_hyperparameter(neurons)
conditional_space.add_hyperparameter(lr)
conditional_space.add_hyperparameter(degree)
conditional_space.add_hyperparameter(gamma)
conditional_space.add_condition(C_condition)
conditional_space.add_condition(kernel_condition)
conditional_space.add_condition(lr_condition)
conditional_space.add_condition(neurons_condition)
conditional_space.add_condition(degree_condition)
conditional_space.add_condition(gamma_condition)
float_a = UniformFloatHyperparameter("float_a", -1.23, 6.45)
e_float_a = UniformFloatHyperparameter("e_float_a", .5E-2, 4.5e+06)
int_a = UniformIntegerHyperparameter("int_a", -1, 6)
log_a = UniformFloatHyperparameter("log_a", 4e-1, 6.45, log=True)
int_log_a = UniformIntegerHyperparameter("int_log_a", 1, 6, log=True)
cat_a = CategoricalHyperparameter("cat_a", ["a", "b", "c", "d"])
crazy = CategoricalHyperparameter(r"@.:;/\?!$%&_-<>*+1234567890", ["const"])
easy_space = ConfigurationSpace()
easy_space.add_hyperparameter(float_a)
easy_space.add_hyperparameter(e_float_a)
easy_space.add_hyperparameter(int_a)
easy_space.add_hyperparameter(log_a)
easy_space.add_hyperparameter(int_log_a)
easy_space.add_hyperparameter(cat_a)
easy_space.add_hyperparameter(crazy)
class TestPCSConverter(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_read_configuration_space_basic(self):
# TODO: what does this test has to do with the PCS converter?
float_a_copy = UniformFloatHyperparameter("float_a", -1.23, 6.45)
a_copy = {"a": float_a_copy, "b": int_a}
a_real = {"b": int_a, "a": float_a}
self.assertDictEqual(a_real, a_copy)
'''
Tests for the "older pcs" version
'''
def test_read_configuration_space_easy(self):
expected = StringIO()
expected.write('# This is a \n')
expected.write(' # This is a comment with a leading whitespace ### ffds \n')
expected.write('\n')
expected.write('float_a [-1.23, 6.45] [2.61] # bla\n')
expected.write('e_float_a [.5E-2, 4.5e+06] [2250000.0025]\n')
expected.write('int_a [-1, 6] [2]i\n')
expected.write('log_a [4e-1, 6.45] [1.6062378404]l\n')
expected.write('int_log_a [1, 6] [2]il\n')
expected.write('cat_a {a,"b",c,d} [a]\n')
expected.write(r'@.:;/\?!$%&_-<>*+1234567890 {"const"} ["const"]\n')
expected.seek(0)
cs = pcs.read(expected)
self.assertEqual(cs, easy_space)
def test_read_configuration_space_conditional(self):
# More complex search space as string array
complex_cs = list()
complex_cs.append("preprocessing {None, pca} [None]")
complex_cs.append("classifier {svm, nn} [svm]")
complex_cs.append("kernel {rbf, poly, sigmoid} [rbf]")
complex_cs.append("C [0.03125, 32768] [32]l")
complex_cs.append("neurons [16, 1024] [520]i # Should be Q16")
complex_cs.append("lr [0.0001, 1.0] [0.50005]")
complex_cs.append("degree [1, 5] [3]i")
complex_cs.append("gamma [0.000030518, 8] [0.0156251079996]l")
complex_cs.append("C | classifier in {svm}")
complex_cs.append("kernel | classifier in {svm}")
complex_cs.append("lr | classifier in {nn}")
complex_cs.append("neurons | classifier in {nn}")
complex_cs.append("degree | kernel in {poly, sigmoid}")
complex_cs.append("gamma | kernel in {rbf}")
cs = pcs.read(complex_cs)
self.assertEqual(cs, conditional_space)
def test_read_configuration_space_conditional_with_two_parents(self):
config_space = list()
config_space.append("@1:0:restarts {F,L,D,x,+,no}[x]")
config_space.append("@1:S:Luby:aryrestarts {1,2}[1]")
config_space.append("@1:2:Luby:restarts [1,65535][1000]il")
config_space.append("@1:2:Luby:restarts | @1:0:restarts in {L}")
config_space.append("@1:2:Luby:restarts | @1:S:Luby:aryrestarts in {2}")
cs = pcs.read(config_space)
self.assertEqual(len(cs.get_conditions()), 1)
self.assertIsInstance(cs.get_conditions()[0], AndConjunction)
def test_write_illegal_argument(self):
sp = {"a": int_a}
self.assertRaisesRegex(TypeError, r"pcs_parser.write expects an "
r"instance of "
r"<class "
r"'ConfigSpace.configuration_"
r"space.ConfigurationSpace'>, you provided "
r"'<(type|class) 'dict'>'", pcs.write, sp)
def test_write_int(self):
expected = "int_a [-1, 6] [2]i"
cs = ConfigurationSpace()
cs.add_hyperparameter(int_a)
value = pcs.write(cs)
self.assertEqual(expected, value)
def test_write_log_int(self):
expected = "int_log_a [1, 6] [2]il"
cs = ConfigurationSpace()
cs.add_hyperparameter(int_log_a)
value = pcs.write(cs)
self.assertEqual(expected, value)
def test_write_q_int(self):
expected = "Q16_int_a [16, 1024] [520]i"
cs = ConfigurationSpace()
cs.add_hyperparameter(
UniformIntegerHyperparameter("int_a", 16, 1024, q=16))
value = pcs.write(cs)
self.assertEqual(expected, value)
def test_write_q_float(self):
expected = "Q16_float_a [16.0, 1024.0] [520.0]"
cs = ConfigurationSpace()
cs.add_hyperparameter(
UniformFloatHyperparameter("float_a", 16, 1024, q=16))
value = pcs.write(cs)
self.assertEqual(expected, value)
def test_write_log10(self):
expected = "a [10.0, 1000.0] [100.0]l"
cs = ConfigurationSpace()
cs.add_hyperparameter(
UniformFloatHyperparameter("a", 10, 1000, log=True))
value = pcs.write(cs)
self.assertEqual(expected, value)
def test_build_forbidden(self):
expected = "a {a, b, c} [a]\nb {a, b, c} [c]\n\n" \
"{a=a, b=a}\n{a=a, b=b}\n{a=b, b=a}\n{a=b, b=b}"
cs = ConfigurationSpace()
a = CategoricalHyperparameter("a", ["a", "b", "c"], "a")
b = CategoricalHyperparameter("b", ["a", "b", "c"], "c")
cs.add_hyperparameter(a)
cs.add_hyperparameter(b)
fb = ForbiddenAndConjunction(ForbiddenInClause(a, ["a", "b"]),
ForbiddenInClause(b, ["a", "b"]))
cs.add_forbidden_clause(fb)
value = pcs.write(cs)
self.assertIn(expected, value)
"""
Tests for the "newer pcs" version in order to check
if both deliver the same results
"""
def test_read_new_configuration_space_easy(self):
expected = StringIO()
expected.write('# This is a \n')
expected.write(' # This is a comment with a leading whitespace ### ffds \n')
expected.write('\n')
expected.write('float_a real [-1.23, 6.45] [2.61] # bla\n')
expected.write('e_float_a real [.5E-2, 4.5e+06] [2250000.0025]\n')
expected.write('int_a integer [-1, 6] [2]\n')
expected.write('log_a real [4e-1, 6.45] [1.6062378404]log\n')
expected.write('int_log_a integer [1, 6] [2]log\n')
expected.write('cat_a categorical {a,"b",c,d} [a]\n')
expected.write(r'@.:;/\?!$%&_-<>*+1234567890 categorical {"const"} ["const"]\n')
expected.seek(0)
cs = pcs_new.read(expected)
self.assertEqual(cs, easy_space)
def test_read_new_configuration_space_conditional(self):
# More complex search space as string array
complex_cs = list()
complex_cs.append("preprocessing categorical {None, pca} [None]")
complex_cs.append("classifier categorical {svm, nn} [svm]")
complex_cs.append("kernel categorical {rbf, poly, sigmoid} [rbf]")
complex_cs.append("C real [0.03125, 32768] [32]log")
complex_cs.append("neurons integer [16, 1024] [520] # Should be Q16")
complex_cs.append("lr real [0.0001, 1.0] [0.50005]")
complex_cs.append("degree integer [1, 5] [3]")
complex_cs.append("gamma real [0.000030518, 8] [0.0156251079996]log")
complex_cs.append("C | classifier in {svm}")
complex_cs.append("kernel | classifier in {svm}")
complex_cs.append("lr | classifier in {nn}")
complex_cs.append("neurons | classifier in {nn}")
complex_cs.append("degree | kernel in {poly, sigmoid}")
complex_cs.append("gamma | kernel in {rbf}")
cs_new = pcs_new.read(complex_cs)
self.assertEqual(cs_new, conditional_space)
# same in older version
complex_cs_old = list()
complex_cs_old.append("preprocessing {None, pca} [None]")
complex_cs_old.append("classifier {svm, nn} [svm]")
complex_cs_old.append("kernel {rbf, poly, sigmoid} [rbf]")
complex_cs_old.append("C [0.03125, 32768] [32]l")
complex_cs_old.append("neurons [16, 1024] [520]i # Should be Q16")
complex_cs_old.append("lr [0.0001, 1.0] [0.50005]")
complex_cs_old.append("degree [1, 5] [3]i")
complex_cs_old.append("gamma [0.000030518, 8] [0.0156251079996]l")
complex_cs_old.append("C | classifier in {svm}")
complex_cs_old.append("kernel | classifier in {svm}")
complex_cs_old.append("lr | classifier in {nn}")
complex_cs_old.append("neurons | classifier in {nn}")
complex_cs_old.append("degree | kernel in {poly, sigmoid}")
complex_cs_old.append("gamma | kernel in {rbf}")
cs_old = pcs.read(complex_cs_old)
self.assertEqual(cs_old, cs_new)
def test_write_new_illegal_argument(self):
sp = {"a": int_a}
self.assertRaisesRegex(TypeError,
r"pcs_parser.write expects an "
r"instance of "
r"<class "
r"'ConfigSpace.configuration_"
r"space.ConfigurationSpace'>, you provided "
r"'<(type|class) 'dict'>'", pcs_new.write, sp)
def test_write_new_int(self):
expected = "int_a integer [-1, 6] [2]"
cs = ConfigurationSpace()
cs.add_hyperparameter(int_a)
value = pcs_new.write(cs)
self.assertEqual(expected, value)
def test_write_new_log_int(self):
expected = "int_log_a integer [1, 6] [2]log"
cs = ConfigurationSpace()
cs.add_hyperparameter(int_log_a)
value = pcs_new.write(cs)
self.assertEqual(expected, value)
def test_write_new_q_int(self):
expected = "Q16_int_a integer [16, 1024] [520]"
cs = ConfigurationSpace()
cs.add_hyperparameter(
UniformIntegerHyperparameter("int_a", 16, 1024, q=16))
value = pcs_new.write(cs)
self.assertEqual(expected, value)
def test_write_new_q_float(self):
expected = "Q16_float_a real [16.0, 1024.0] [520.0]"
cs = ConfigurationSpace()
cs.add_hyperparameter(
UniformFloatHyperparameter("float_a", 16, 1024, q=16))
value = pcs_new.write(cs)
self.assertEqual(expected, value)
def test_write_new_log10(self):
expected = "a real [10.0, 1000.0] [100.0]log"
cs = ConfigurationSpace()
cs.add_hyperparameter(
UniformFloatHyperparameter("a", 10, 1000, log=True))
value = pcs_new.write(cs)
self.assertEqual(expected, value)
def test_build_new_forbidden(self):
expected = "a categorical {a, b, c} [a]\nb categorical {a, b, c} [c]\n\n" \
"{a=a, b=a}\n{a=a, b=b}\n{a=b, b=a}\n{a=b, b=b}\n"
cs = ConfigurationSpace()
a = CategoricalHyperparameter("a", ["a", "b", "c"], "a")
b = CategoricalHyperparameter("b", ["a", "b", "c"], "c")
cs.add_hyperparameter(a)
cs.add_hyperparameter(b)
fb = ForbiddenAndConjunction(ForbiddenInClause(a, ["a", "b"]),
ForbiddenInClause(b, ["a", "b"]))
cs.add_forbidden_clause(fb)
value = pcs_new.write(cs)
self.assertEqual(expected, value)
def test_build_new_GreaterThanFloatCondition(self):
expected = "b integer [0, 10] [5]\n" \
"a real [0.0, 1.0] [0.5]\n\n" \
"a | b > 5"
cs = ConfigurationSpace()
a = UniformFloatHyperparameter("a", 0, 1, 0.5)
b = UniformIntegerHyperparameter("b", 0, 10, 5)
cs.add_hyperparameter(a)
cs.add_hyperparameter(b)
cond = GreaterThanCondition(a, b, 5)
cs.add_condition(cond)
value = pcs_new.write(cs)
self.assertEqual(expected, value)
expected = "b real [0.0, 10.0] [5.0]\n" \
"a real [0.0, 1.0] [0.5]\n\n" \
"a | b > 5"
cs = ConfigurationSpace()
a = UniformFloatHyperparameter("a", 0, 1, 0.5)
b = UniformFloatHyperparameter("b", 0, 10, 5)
cs.add_hyperparameter(a)
cs.add_hyperparameter(b)
cond = GreaterThanCondition(a, b, 5)
cs.add_condition(cond)
value = pcs_new.write(cs)
self.assertEqual(expected, value)
def test_build_new_GreaterThanIntCondition(self):
expected = "a real [0.0, 1.0] [0.5]\n" \
"b integer [0, 10] [5]\n\n" \
"b | a > 0.5"
cs = ConfigurationSpace()
a = UniformFloatHyperparameter("a", 0, 1, 0.5)
b = UniformIntegerHyperparameter("b", 0, 10, 5)
cs.add_hyperparameter(a)
cs.add_hyperparameter(b)
cond = GreaterThanCondition(b, a, 0.5)
cs.add_condition(cond)
value = pcs_new.write(cs)
self.assertEqual(expected, value)
expected = "a integer [0, 10] [5]\n" \
"b integer [0, 10] [5]\n\n" \
"b | a > 5"
cs = ConfigurationSpace()
a = UniformIntegerHyperparameter("a", 0, 10, 5)
b = UniformIntegerHyperparameter("b", 0, 10, 5)
cs.add_hyperparameter(a)
cs.add_hyperparameter(b)
cond = GreaterThanCondition(b, a, 5)
cs.add_condition(cond)
value = pcs_new.write(cs)
self.assertEqual(expected, value)
def test_read_new_configuration_space_complex_conditionals(self):
classi = OrdinalHyperparameter(
"classi",
["random_forest", "extra_trees", "k_nearest_neighbors", "something"],
)
knn_weights = CategoricalHyperparameter("knn_weights", ["uniform", "distance"])
weather = OrdinalHyperparameter("weather", ["sunny", "rainy", "cloudy", "snowing"])
temperature = CategoricalHyperparameter("temperature", ["high", "low"])
rain = CategoricalHyperparameter("rain", ["yes", "no"])
gloves = OrdinalHyperparameter("gloves", ["none", "yarn", "leather", "gortex"])
heur1 = CategoricalHyperparameter("heur1", ["off", "on"])
heur2 = CategoricalHyperparameter("heur2", ["off", "on"])
heur_order = CategoricalHyperparameter("heur_order", ["heur1then2", "heur2then1"])
gloves_condition = OrConjunction(EqualsCondition(gloves, rain, "yes"),
EqualsCondition(gloves, temperature, "low"))
heur_condition = AndConjunction(EqualsCondition(heur_order, heur1, "on"),
EqualsCondition(heur_order, heur2, "on"))
and_conjunction = AndConjunction(NotEqualsCondition(knn_weights, classi, "extra_trees"),
EqualsCondition(knn_weights, classi, "random_forest"))
Cl_condition = OrConjunction(EqualsCondition(knn_weights, classi, "k_nearest_neighbors"),
and_conjunction,
EqualsCondition(knn_weights, classi, "something"))
and1 = AndConjunction(EqualsCondition(temperature, weather, "rainy"),
EqualsCondition(temperature, weather, "cloudy"))
and2 = AndConjunction(EqualsCondition(temperature, weather, "sunny"),
NotEqualsCondition(temperature, weather, "snowing"))
another_condition = OrConjunction(and1, and2)
complex_conditional_space = ConfigurationSpace()
complex_conditional_space.add_hyperparameter(classi)
complex_conditional_space.add_hyperparameter(knn_weights)
complex_conditional_space.add_hyperparameter(weather)
complex_conditional_space.add_hyperparameter(temperature)
complex_conditional_space.add_hyperparameter(rain)
complex_conditional_space.add_hyperparameter(gloves)
complex_conditional_space.add_hyperparameter(heur1)
complex_conditional_space.add_hyperparameter(heur2)
complex_conditional_space.add_hyperparameter(heur_order)
complex_conditional_space.add_condition(gloves_condition)
complex_conditional_space.add_condition(heur_condition)
complex_conditional_space.add_condition(Cl_condition)
complex_conditional_space.add_condition(another_condition)
complex_cs = list()
complex_cs.append(
"classi ordinal {random_forest,extra_trees,k_nearest_neighbors, something} "
"[random_forest]"
)
complex_cs.append("knn_weights categorical {uniform, distance} [uniform]")
complex_cs.append("weather ordinal {sunny, rainy, cloudy, snowing} [sunny]")
complex_cs.append("temperature categorical {high, low} [high]")
complex_cs.append("rain categorical { yes, no } [yes]")
complex_cs.append("gloves ordinal { none, yarn, leather, gortex } [none]")
complex_cs.append("heur1 categorical { off, on } [off]")
complex_cs.append("heur2 categorical { off, on } [off]")
complex_cs.append("heur_order categorical { heur1then2, heur2then1 } [heur1then2]")
complex_cs.append("gloves | rain == yes || temperature == low")
complex_cs.append("heur_order | heur1 == on && heur2 == on")
complex_cs.append("knn_weights | classi == k_nearest_neighbors || "
"classi != extra_trees && classi == random_forest || classi == something")
complex_cs.append("temperature | weather == rainy && weather == cloudy || "
"weather == sunny && weather != snowing")
cs_new = pcs_new.read(complex_cs)
self.assertEqual(cs_new, complex_conditional_space)
def test_convert_restrictions(self):
# This is a smoke test to make sure that the int/float values in the
# greater or smaller statements are converted to the right type when
# reading them
s = """x1 real [0,1] [0]
x2 real [0,1] [0]
x3 real [0,1] [0]
x4 integer [0,2] [0]
x5 real [0,1] [0]
x6 ordinal {cold, luke-warm, hot} [cold]
x1 | x2 > 0.5
x3 | x4 > 1 && x4 == 2 && x4 in {2}
x5 | x6 > luke-warm"""
pcs_new.read(s.split('\n'))
def test_write_restrictions(self):
s = "c integer [0, 2] [0]\n" + \
"d ordinal {cold, luke-warm, hot} [cold]\n" + \
"e real [0.0, 1.0] [0.0]\n" + \
"b real [0.0, 1.0] [0.0]\n" + \
"a real [0.0, 1.0] [0.0]\n" + \
"\n" + \
"b | d in {luke-warm, hot} || c > 1\n" + \
"a | b == 0.5 && e > 0.5"
a = pcs_new.read(s.split('\n'))
out = pcs_new.write(a)
self.assertEqual(out, s)
def test_read_write(self):
# Some smoke tests whether reading, writing, reading alters makes the
# configspace incomparable
this_file = os.path.abspath(__file__)
this_directory = os.path.dirname(this_file)
configuration_space_path = os.path.join(this_directory,
"..", "test_searchspaces")
configuration_space_path = os.path.abspath(configuration_space_path)
configuration_space_path = os.path.join(configuration_space_path,
"spear-params-mixed.pcs")
with open(configuration_space_path) as fh:
cs = pcs.read(fh)
tf = tempfile.NamedTemporaryFile()
name = tf.name
tf.close()
with open(name, 'w') as fh:
pcs_string = pcs.write(cs)
fh.write(pcs_string)
with open(name, 'r') as fh:
pcs_new = pcs.read(fh)
self.assertEqual(pcs_new, cs, msg=(pcs_new, cs))
|
py | 1a4553340dac0a0e9e5213da33da482aaadc51c5 | import sys
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt, pyqtSignal, QRect
from PyQt5.QtWidgets import QHeaderView, QStyleOptionButton, QStyle, QApplication, QTableView, QWidget, QVBoxLayout
class CheckBoxHeader(QHeaderView):
clicked = pyqtSignal(int, bool) |
py | 1a4553a1fc67b6c4f9410d9ad256d5d2b58200df | import os
import sys
import uuid
import fileinput
import boto3
region = os.getenv('AWS_REGION')
tg = os.getenv('AWS_IOT_THING_NAME')
try:
mac_address = hex(uuid.getnode())
mac_address = mac_address[2:8] + 'fffe' + mac_address[8:]
print('the GatewayEui for the gateway will be', mac_address)
input_file = "./station.conf"
file_object = open( input_file, 'r+' )
for line in fileinput.input(input_file):
file_object.write(line.replace('"routerid": ""', f'"routerid": "{mac_address}"'))
file_object.close()
print('routerid configured in station.conf file')
if os.path.isfile('cups.crt'):
print('Found credentials in the folder, gateway provisioning step skipped')
exit(0)
lora_region = sys.argv[1]
print('Lora region for the gateway:', lora_region)
print('AWS region:', region)
print('ThingName used for the name of the gateway:', tg)
iot = boto3.client('iot', region_name= region)
iotw = boto3.client('iotwireless', region_name= region)
gateway = None
try:
gateway = iotw.get_wireless_gateway(
Identifier= mac_address,
IdentifierType= 'GatewayEui'
)
except iotw.exceptions.from_code('ResourceNotFoundException'):
gateway = iotw.create_wireless_gateway(
Name= tg,
Description=f'The LoRaWAN Gateway {tg} has been registered using an AWS IoT Greengrass component',
LoRaWAN={
'GatewayEui': mac_address,
'RfRegion': lora_region
}
)
except Exception as e:
print(e)
exit(1)
# if the gateway is not created, raise an error
if gateway.get('Id') == None:
raise ValueError('Error when provisioning the gateway')
certs = iot.create_keys_and_certificate(
setAsActive=True
)
cups= iotw.get_service_endpoint(ServiceType = 'CUPS')
with open('cups.uri', 'w') as f:
f.write(cups['ServiceEndpoint'])
with open('cups.trust', 'w') as f:
f.write(cups['ServerTrust'])
cert_id= certs['certificateId']
with open('cups.crt', 'w') as f:
f.write(certs['certificatePem'])
with open('cups.key', 'w') as f:
f.write(certs['keyPair']['PrivateKey'])
associate_gateway = iotw.associate_wireless_gateway_with_certificate(
Id= gateway['Id'],
IotCertificateId= certs['certificateId']
)
print(f"The certificate {certs.get('certificateId')} has been associated with the gateway {tg}")
except Exception as e:
print(e)
exit(1) |
py | 1a4553dca88a15f47a452651a7a6870fe2df5c78 | import csv
import math
import re
import argparse
import random
def calculate_quota(num_winners, num_votes):
return math.floor(num_votes * (1.0 / (1 + num_winners)))
class Candidate:
""" A model for representing vote counts.
Args:
name: String key for the candidate.
ballots: A 2D array where each element is a string list of candidate
names sorted in preferences order.
"""
def __init__(self, name, ballots):
self.__name = name
self.__votes = 10_000 * len(ballots)
self.__ballots = ballots
@property
def votes(self):
return self.__votes
@property
def name(self):
return self.__name
@property
def ballots(self):
return self.__ballots
def surplus_for_candidate(self, starting_votes, surplus, candidate_name):
surplus_per_vote = surplus / len(self.ballots)
votes_for_candidate = sum(
[1 for b in self.__ballots if len(b) > 0 and b[0] == candidate_name])
result = math.ceil(votes_for_candidate * surplus_per_vote)
self.__votes -= result
return result
def exhausted_ballots(self, surplus):
surplus_per_vote = surplus / (self.votes) * 10_000
exhausted_votes = sum([1 for b in self.__ballots if len(b) == 0])
result = math.ceil(exhausted_votes * surplus_per_vote)
self.__votes -= result
return result
def add_surplus(self, surplus):
self.__votes += math.floor(surplus)
def surplus(self, quota):
return max(self.__votes - quota, 0.0)
def add_ballot(self, new_ballot, votes_per_ballot):
self.__ballots.append(new_ballot)
self.__votes += math.floor(votes_per_ballot)
def drop_candidate(self, candidate):
for b in self.__ballots:
if(candidate.name in b):
b.remove(candidate.name)
def __repr__(self):
return str(self)
def __str__(self):
return f"{{name: {self.name}, votes: {self.__votes}}}"
def __lt__(self, other):
return self.votes < other.votes
def __eq__(self, other):
return other is Candidate and self.name == other.name
def award_first_pref(candidate_names, ballot_data):
""" Generates a list of candidates with their approriate 1st round votes
and ballot data.
Returns:
list of Candidates.
"""
num_choices = len(ballot_data)
FIRST = "1"
choices = [str(n) for n in range(2, num_choices)]
ballots = {c: [] for c in candidate_names}
for row in ballot_data:
key = candidate_names[row.index(FIRST)]
value = ballots[key]
ballots[key] = [
*value, [candidate_names[row.index(choice)] for choice in choices if choice in row]]
candidates = [Candidate(name=k, ballots=v) for (k, v) in ballots.items()]
random.shuffle(candidates)
return candidates
def find_winners(candidates, quota):
"""Returns candidates that have met the quota."""
return [c for c in candidates if c.votes >= quota]
def distribute_surplus(candidates, exhausted, quota):
"""
Returns the given list of Candidates with the surplus votes from the
Candidate with the most surpluss votes transfered to their next
preference.
"""
max_surplus = max([c.surplus(quota) for c in candidates])
biggest_winners = [c for c in candidates if c.surplus(quota) == max_surplus]
winner = biggest_winners[0]
candidates.remove(winner)
surplus = winner.surplus(quota)
starting_votes = winner.votes
for c in candidates:
c.drop_candidate(winner)
c.add_surplus(winner.surplus_for_candidate(starting_votes, surplus, c.name))
exhausted.add_surplus(winner.exhausted_ballots(surplus))
return candidates
def redisitribute_loser(candidates, exhausted):
"""
Returns: A list of Candidates with the lowest vote
getting Canidate removed
"""
fewest_votes = min(candidates).votes
biggest_losers = [c for c in candidates if c.votes == fewest_votes]
eliminated = biggest_losers[-1]
candidates.remove(eliminated)
starting_votes = eliminated.votes
for b in eliminated.ballots:
next_choice = b[0] if len(b) > 0 else None
if(next_choice == None):
exhausted.add_ballot(b, eliminated.votes)
for c in candidates:
c.drop_candidate(eliminated)
if c.name == next_choice:
c.add_ballot(b[1:], starting_votes / len(eliminated.ballots))
return candidates
def parse_vote_data(csv_data):
""" Retrieves candidate names from the input file, cleans ballots rows
such that contain only the numeric ranked preference number. This
function also discards invalid ballots.
Returns: A String[] of names, and a String[][] where each row is a String[]
representating a ballot. Each row item is a stringified integer
representing the index of the corresponding candidate in the candidates
array, or an empty string if the ballot does not rank all candidates.
"""
def valid(ballot, num_candidates):
prefs = [int(p) for p in ballot if p]
if(len(prefs) == 0 or len(prefs) > num_candidates):
return False
return sorted(prefs) == list(range(1, len(prefs) + 1))
candidate_names = [n.strip() for n in next(csv_data)]
parsed_vote_data = []
for row in csv_data:
ballot = [re.sub(r"\D", "", c) for c in row]
if(valid(ballot, len(candidate_names))):
parsed_vote_data.append(ballot)
else:
print(f"❌ {ballot}")
return candidate_names, parsed_vote_data
def count_ballots(file_name, num_winners):
with open(file_name) as csv_file:
csv_data = csv.reader(csv_file, delimiter=',')
candidate_names, vote_data = parse_vote_data(csv_data)
candidates_to_votes = award_first_pref(candidate_names, vote_data)
round_num = 1
exhausted = Candidate("Exhausted", [])
winners = []
while True:
num_votes = sum([c.votes for c in candidates_to_votes])
quota = calculate_quota(num_winners, num_votes)
candidates_to_votes.sort(reverse=True)
# Print stats
print(f"Round {round_num}:")
for w in winners:
print(f"🏆: {w}")
print(f"Votes: {num_votes + exhausted.votes + sum([w.votes for w in winners])}")
print(f"Quota to win: {quota}")
for c in candidates_to_votes:
print(f" {c.name}: {c.votes}")
print(f" Exhausted: {exhausted.votes}")
# Update winners
round_winners = find_winners(candidates_to_votes, quota)
for w in round_winners:
winners.append(w)
if(len(winners) >= num_winners):
return winners[:num_winners], exhausted
elif(sum([w.surplus(quota) for w in round_winners]) > 0.0):
candidates_to_votes = distribute_surplus(
candidates_to_votes, exhausted, quota)
else:
candidates_to_votes = redisitribute_loser(
candidates_to_votes, exhausted)
round_num += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Administer election.')
parser.add_argument('-f', '--file', action='store', type=str,
help='Name of the election results file')
parser.add_argument('-n', '--num_winners', action='store',
type=int, default=1, help='Number of winners to pick')
args = parser.parse_args()
winners, exhausted = count_ballots(args.file, num_winners=args.num_winners)
print("")
for w in winners:
print(f"🏆: {w}")
print(f"exhausted votes {exhausted.votes}")
|
py | 1a4554961ecf4a39e27a0f8b41849dda867258b1 | class Student(object):
"""This is a Superclass
It holds holds the total number of students
"""
counter = 0
def __init__(self):
type(self).counter += 1
def __del__(self):
type(self).counter -= 1
def student_stream(self):
"""data encapsulation: color can not be accessed by another class
Abstraction: checks the color if it valid
"""
if self._color == "yelow":
stream = "North"
elif self._color == "pink":
strem = "South"
elif self._color == "red":
stream = "East"
else:
stream = "Invalid stream"
return stream
class Students(Student):
"""Subclass of Student_counter: it inherits methods from Student_counter
It holds student details
"""
def __init__(self, name, school_id, fees_paid, student_class, color):
self.name = name
self.school_id = school_id
self.fees_paid = fees_paid
self.student_class = student_class
self._color = color
Student.__init__(self)
if self.student_class == 4:
self.__fees = 5000
elif student_class == 3:
self.__fees = 4000
elif student_class == 2:
self.__fees = 37122
else:
self.__fees = 56000
def fees_arrears(self):
balance = self.__fees - self.fees_paid
return balance
class Studentsubjects(Student):
"""Subclass of Student_counter: it inherits methods from Student_counter
It holds student details
"""
def __init__(self, name, color):
self.name = name
self._color = color
Student.__init__(self)
def student_stream(self):
"""data encapsulation: color can not be accessed by another class
Abstraction: checks the color if it valid
Polymophism: using the same method to return different outputs
"""
if self._color == "yelow":
stream = "Geography"
elif self._color == "pink":
stream = "Music"
elif self._color == "red":
stream = "Agriculture"
else:
stream = "Invalid stream"
return stream
|
py | 1a45552864820416faaae8139b1c04674d6622b9 |
from unittest import TestCase
from unittest import mock
from multiphase.multiphase import Multiphase
class MultiphaseTests(TestCase):
"""
Test Multiphase.
"""
def setUp(self):
self.app = Multiphase()
def test_run(self):
"""
Test the run code.
"""
args = []
if self.app.TYPE == 'ds':
args.append('inputdir') # you may want to change this inputdir mock
args.append('outputdir') # you may want to change this outputdir mock
# you may want to add more of your custom defined optional arguments to test
# your app with
# eg.
# args.append('--custom-int')
# args.append(10)
options = self.app.parse_args(args)
self.app.run(options)
# write your own assertions
self.assertEqual(options.outputdir, 'outputdir')
|
py | 1a4556b021690a85b10612a1343879342bb3de5b | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from io import StringIO
from shutil import get_terminal_size
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import lib, properties, reshape, tslibs
from pandas._libs.lib import no_default
from pandas._typing import (
ArrayLike,
Axis,
DtypeObj,
FrameOrSeriesUnion,
IndexKeyFunc,
Label,
ValueKeyFunc,
)
from pandas.compat.numpy import function as nv
from pandas.errors import InvalidIndexError
from pandas.util._decorators import Appender, Substitution, doc
from pandas.util._validators import validate_bool_kwarg, validate_percentile
from pandas.core.dtypes.cast import (
convert_dtypes,
maybe_cast_to_extension_array,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool,
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
notna,
remove_na_arraylike,
)
import pandas as pd
from pandas.core import algorithms, base, generic, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.categorical import CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
import pandas.core.common as com
from pandas.core.construction import (
create_series_with_explicit_dtype,
extract_array,
is_empty_data,
sanitize_array,
)
from pandas.core.generic import NDFrame
from pandas.core.indexers import deprecate_ndim_indexing, unpack_1tuple
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.api import Float64Index, Index, MultiIndex, ensure_index
import pandas.core.indexes.base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import check_bool_indexer
from pandas.core.internals import SingleBlockManager
from pandas.core.sorting import ensure_key_mapped
from pandas.core.strings import StringMethods
from pandas.core.tools.datetimes import to_datetime
import pandas.io.formats.format as fmt
import pandas.plotting
if TYPE_CHECKING:
from pandas.core.frame import DataFrame
from pandas.core.groupby.generic import SeriesGroupBy
__all__ = ["Series"]
_shared_doc_kwargs = dict(
axes="index",
klass="Series",
axes_single_arg="{0 or 'index'}",
axis="""axis : {0 or 'index'}
Parameter needed for compatibility with DataFrame.""",
inplace="""inplace : boolean, default False
If True, performs operation inplace and returns None.""",
unique="np.ndarray",
duplicated="Series",
optional_by="",
optional_mapper="",
optional_labels="",
optional_axis="",
versionadded_to_excel="\n .. versionadded:: 0.20.0\n",
)
def _coerce_method(converter):
"""
Install the scalar coercion methods.
"""
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError(f"cannot convert the series to {converter}")
wrapper.__name__ = f"__{converter.__name__}__"
return wrapper
# ----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, generic.NDFrame):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN).
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
.. versionchanged:: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : str, numpy.dtype, or ExtensionDtype, optional
Data type for the output Series. If not specified, this will be
inferred from `data`.
See the :ref:`user guide <basics.dtypes>` for more usages.
name : str, optional
The name to give to the Series.
copy : bool, default False
Copy input data.
"""
_typ = "series"
_name: Label
_metadata: List[str] = ["name"]
_internal_names_set = {"index"} | generic.NDFrame._internal_names_set
_accessors = {"dt", "cat", "str", "sparse"}
_deprecations = (
base.IndexOpsMixin._deprecations
| generic.NDFrame._deprecations
| frozenset(["compress", "ptp"])
)
# Override cache_readonly bc Series is mutable
hasnans = property(
base.IndexOpsMixin.hasnans.func, doc=base.IndexOpsMixin.hasnans.__doc__
)
_mgr: SingleBlockManager
div: Callable[["Series", Any], "Series"]
rdiv: Callable[["Series", Any], "Series"]
# ----------------------------------------------------------------------
# Constructors
def __init__(
self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
):
if (
isinstance(data, SingleBlockManager)
and index is None
and dtype is None
and copy is False
):
# GH#33357 called with just the SingleBlockManager
NDFrame.__init__(self, data)
self.name = name
return
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager.from_array(data, index)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
name = ibase.maybe_extract_name(name, data, type(self))
if is_empty_data(data) and dtype is None:
# gh-17261
warnings.warn(
"The default dtype for empty Series will be 'object' instead "
"of 'float64' in a future version. Specify a dtype explicitly "
"to silence this warning.",
DeprecationWarning,
stacklevel=2,
)
# uncomment the line below when removing the DeprecationWarning
# dtype = np.dtype(object)
if index is not None:
index = ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError(
"initializing a Series from a MultiIndex is not supported"
)
elif isinstance(data, Index):
if dtype is not None:
# astype copies
data = data.astype(dtype)
else:
# GH#24096 we need to ensure the index remains immutable
data = data._values.copy()
copy = False
elif isinstance(data, np.ndarray):
if len(data.dtype):
# GH#13296 we are dealing with a compound dtype, which
# should be treated as 2D
raise ValueError(
"Cannot construct a Series from an ndarray with "
"compound dtype. Use DataFrame instead."
)
elif isinstance(data, Series):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
copy = False
data = data._mgr
elif is_dict_like(data):
data, index = self._init_dict(data, index, dtype)
dtype = None
copy = False
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
elif not data.index.equals(index) or copy:
# GH#19275 SingleBlockManager input should only be called
# internally
raise AssertionError(
"Cannot pass both SingleBlockManager "
"`data` argument and a different "
"`index` argument. `copy` must be False."
)
elif is_extension_array_dtype(data):
pass
elif isinstance(data, (set, frozenset)):
raise TypeError(f"'{type(data).__name__}' type is unordered")
else:
data = com.maybe_iterable_to_list(data)
if index is None:
if not is_list_like(data):
data = [data]
index = ibase.default_index(len(data))
elif is_list_like(data):
# a scalar numpy array is list-like but doesn't
# have a proper length
try:
if len(index) != len(data):
raise ValueError(
f"Length of passed values is {len(data)}, "
f"index implies {len(index)}."
)
except TypeError:
pass
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, errors="ignore", copy=copy)
elif copy:
data = data.copy()
else:
data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
data = SingleBlockManager.from_array(data, index)
generic.NDFrame.__init__(self, data)
self.name = name
self._set_axis(0, index, fastpath=True)
def _init_dict(self, data, index=None, dtype=None):
"""
Derive the "_mgr" and "index" attributes of a new Series from a
dictionary input.
Parameters
----------
data : dict or dict-like
Data used to populate the new Series.
index : Index or index-like, default None
Index for the new Series: if None, use dict keys.
dtype : dtype, default None
The dtype for the new Series: if None, infer from data.
Returns
-------
_data : BlockManager for the new Series
index : index for the new Series
"""
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
keys, values = zip(*data.items())
values = list(values)
elif index is not None:
# fastpath for Series(data=None). Just use broadcasting a scalar
# instead of reindexing.
values = na_value_for_dtype(dtype)
keys = index
else:
keys, values = [], []
# Input is now list-like, so rely on "standard" construction:
# TODO: passing np.float64 to not break anything yet. See GH-17261
s = create_series_with_explicit_dtype(
values, index=keys, dtype=dtype, dtype_if_empty=np.float64
)
# Now we just make sure the order is respected, if any
if data and index is not None:
s = s.reindex(index, copy=False)
return s._mgr, s.index
# ----------------------------------------------------------------------
@property
def _constructor(self) -> Type["Series"]:
return Series
@property
def _constructor_expanddim(self) -> Type["DataFrame"]:
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self):
return self._mgr._can_hold_na
_index = None
def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None:
"""
Override generic, we want to set the _typ here.
This is called from the cython code when we set the `index` attribute
directly, e.g. `series.index = [1, 2, 3]`.
"""
if not fastpath:
labels = ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
try:
labels = DatetimeIndex(labels)
# need to set here because we changed the index
if fastpath:
self._mgr.set_axis(axis, labels)
except (tslibs.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass
object.__setattr__(self, "_index", labels)
if not fastpath:
# The ensure_index call above ensures we have an Index object
self._mgr.set_axis(axis, labels)
# ndarray compatibility
@property
def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
return self._mgr.dtype
@property
def dtypes(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
# DataFrame compatibility
return self.dtype
@property
def name(self) -> Label:
"""
Return the name of the Series.
The name of a Series becomes its index or column name if it is used
to form a DataFrame. It is also used whenever displaying the Series
using the interpreter.
Returns
-------
label (hashable object)
The name of the Series, also the column name if part of a DataFrame.
See Also
--------
Series.rename : Sets the Series name when given a scalar input.
Index.name : Corresponding Index property.
Examples
--------
The Series name can be set initially when calling the constructor.
>>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers')
>>> s
0 1
1 2
2 3
Name: Numbers, dtype: int64
>>> s.name = "Integers"
>>> s
0 1
1 2
2 3
Name: Integers, dtype: int64
The name of a Series within a DataFrame is its column name.
>>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]],
... columns=["Odd Numbers", "Even Numbers"])
>>> df
Odd Numbers Even Numbers
0 1 2
1 3 4
2 5 6
>>> df["Even Numbers"].name
'Even Numbers'
"""
return self._name
@name.setter
def name(self, value: Label) -> None:
if not is_hashable(value):
raise TypeError("Series.name must be a hashable type")
object.__setattr__(self, "_name", value)
@property
def values(self):
"""
Return Series as ndarray or ndarray-like depending on the dtype.
.. warning::
We recommend using :attr:`Series.array` or
:meth:`Series.to_numpy`, depending on whether you need
a reference to the underlying data or a NumPy array.
Returns
-------
numpy.ndarray or ndarray-like
See Also
--------
Series.array : Reference to the underlying data.
Series.to_numpy : A NumPy array representing the underlying data.
Examples
--------
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
>>> pd.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
>>> pd.Series(list('aabc')).astype('category').values
['a', 'a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
... tz='US/Eastern')).values
array(['2013-01-01T05:00:00.000000000',
'2013-01-02T05:00:00.000000000',
'2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
"""
return self._mgr.external_values()
@property
def _values(self):
"""
Return the internal repr of this data (defined by Block.interval_values).
This are the values as stored in the Block (ndarray or ExtensionArray
depending on the Block class), with datetime64[ns] and timedelta64[ns]
wrapped in ExtensionArrays to match Index._values behavior.
Differs from the public ``.values`` for certain data types, because of
historical backwards compatibility of the public attribute (e.g. period
returns object ndarray and datetimetz a datetime64[ns] ndarray for
``.values`` while it returns an ExtensionArray for ``._values`` in those
cases).
Differs from ``.array`` in that this still returns the numpy array if
the Block is backed by a numpy array (except for datetime64 and
timedelta64 dtypes), while ``.array`` ensures to always return an
ExtensionArray.
Overview:
dtype | values | _values | array |
----------- | ------------- | ------------- | ------------- |
Numeric | ndarray | ndarray | PandasArray |
Category | Categorical | Categorical | Categorical |
dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray |
dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray |
td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] |
Period | ndarray[obj] | PeriodArray | PeriodArray |
Nullable | EA | EA | EA |
"""
return self._mgr.internal_values()
@Appender(base.IndexOpsMixin.array.__doc__) # type: ignore
@property
def array(self) -> ExtensionArray:
return self._mgr._block.array_values()
# ops
def ravel(self, order="C"):
"""
Return the flattened underlying data as an ndarray.
Returns
-------
numpy.ndarray or ndarray-like
Flattened data of the Series.
See Also
--------
numpy.ndarray.ravel : Return a flattened array.
"""
return self._values.ravel(order=order)
def __len__(self) -> int:
"""
Return the length of the Series.
"""
return len(self._mgr)
def view(self, dtype=None) -> "Series":
"""
Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8
"""
return self._constructor(
self._values.view(dtype), index=self.index
).__finalize__(self, method="view")
# ----------------------------------------------------------------------
# NDArray Compat
_HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
def __array_ufunc__(
self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any
):
# TODO: handle DataFrame
cls = type(self)
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# Determine if we should defer.
no_defer = (np.ndarray.__array_ufunc__, cls.__array_ufunc__)
for item in inputs:
higher_priority = (
hasattr(item, "__array_priority__")
and item.__array_priority__ > self.__array_priority__
)
has_array_ufunc = (
hasattr(item, "__array_ufunc__")
and type(item).__array_ufunc__ not in no_defer
and not isinstance(item, self._HANDLED_TYPES)
)
if higher_priority or has_array_ufunc:
return NotImplemented
# align all the inputs.
names = [getattr(x, "name") for x in inputs if hasattr(x, "name")]
types = tuple(type(x) for x in inputs)
# TODO: dataframe
alignable = [x for x, t in zip(inputs, types) if issubclass(t, Series)]
if len(alignable) > 1:
# This triggers alignment.
# At the moment, there aren't any ufuncs with more than two inputs
# so this ends up just being x1.index | x2.index, but we write
# it to handle *args.
index = alignable[0].index
for s in alignable[1:]:
index |= s.index
inputs = tuple(
x.reindex(index) if issubclass(t, Series) else x
for x, t in zip(inputs, types)
)
else:
index = self.index
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
name = names[0] if len(set(names)) == 1 else None
def construct_return(result):
if lib.is_scalar(result):
return result
elif result.ndim > 1:
# e.g. np.subtract.outer
if method == "outer":
# GH#27198
raise NotImplementedError
return result
return self._constructor(result, index=index, name=name, copy=False)
if type(result) is tuple:
# multiple return values
return tuple(construct_return(x) for x in result)
elif method == "at":
# no return value
return None
else:
return construct_return(result)
def __array__(self, dtype=None) -> np.ndarray:
"""
Return the values as a NumPy array.
Users should not call this directly. Rather, it is invoked by
:func:`numpy.array` and :func:`numpy.asarray`.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to use for the resulting NumPy array. By default,
the dtype is inferred from the data.
Returns
-------
numpy.ndarray
The values in the series converted to a :class:`numpy.ndarray`
with the specified `dtype`.
See Also
--------
array : Create a new array from data.
Series.array : Zero-copy view to the array backing the Series.
Series.to_numpy : Series method for similar behavior.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> np.asarray(ser)
array([1, 2, 3])
For timezone-aware data, the timezones may be retained with
``dtype='object'``
>>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> np.asarray(tzser, dtype="object")
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or the values may be localized to UTC and the tzinfo discarded with
``dtype='datetime64[ns]'``
>>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', ...],
dtype='datetime64[ns]')
"""
return np.asarray(self.array, dtype)
# ----------------------------------------------------------------------
# Unary Methods
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
# ----------------------------------------------------------------------
# indexers
@property
def axes(self) -> List[Index]:
"""
Return a list of the row axis labels.
"""
return [self.index]
# ----------------------------------------------------------------------
# Indexing Methods
@Appender(generic.NDFrame.take.__doc__)
def take(self, indices, axis=0, is_copy=None, **kwargs) -> "Series":
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self._values.take(indices)
result = self._constructor(new_values, index=new_index, fastpath=True)
return result.__finalize__(self, method="take")
def _take_with_is_copy(self, indices, axis=0):
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning). For Series this does the same
as the public take (it never sets `_is_copy`).
See the docstring of `take` for full explanation of the parameters.
"""
return self.take(indices=indices, axis=axis)
def _ixs(self, i: int, axis: int = 0):
"""
Return the i-th value or values in the Series by location.
Parameters
----------
i : int
Returns
-------
scalar (int) or Series (slice, sequence)
"""
return self._values[i]
def _slice(self, slobj: slice, axis: int = 0) -> "Series":
# axis kwarg is retained for compat with NDFrame method
# _slice is *always* positional
return self._get_values(slobj)
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
if key is Ellipsis:
return self
key_is_scalar = is_scalar(key)
if isinstance(key, (list, tuple)):
key = unpack_1tuple(key)
if is_integer(key) and self.index._should_fallback_to_positional():
return self._values[key]
elif key_is_scalar:
return self._get_value(key)
if is_hashable(key):
# Otherwise index.get_value will raise InvalidIndexError
try:
# For labels that don't resolve as scalars like tuples and frozensets
result = self._get_value(key)
return result
except (KeyError, TypeError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# We still have the corner case where a tuple is a key
# in the first level of our MultiIndex
return self._get_values_tuple(key)
if is_iterator(key):
key = list(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
key = np.asarray(key, dtype=bool)
return self._get_values(key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
# _convert_slice_indexer to determine if this slice is positional
# or label based, and if the latter, convert to positional
slobj = self.index._convert_slice_indexer(key, kind="getitem")
return self._slice(slobj)
elif isinstance(key, ABCDataFrame):
raise TypeError(
"Indexing a Series with DataFrame is not "
"supported, use the appropriate DataFrame column"
)
elif isinstance(key, tuple):
return self._get_values_tuple(key)
elif not is_list_like(key):
# e.g. scalars that aren't recognized by lib.is_scalar, GH#32684
return self.loc[key]
if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key, skipna=False)
# Note: The key_type == "boolean" case should be caught by the
# com.is_bool_indexer check in __getitem__
if key_type == "integer":
# We need to decide whether to treat this as a positional indexer
# (i.e. self.iloc) or label-based (i.e. self.loc)
if not self.index._should_fallback_to_positional():
return self.loc[key]
else:
return self.iloc[key]
# handle the dup indexing case GH#4246
return self.loc[key]
def _get_values_tuple(self, key):
# mpl hackaround
if com.any_none(*key):
result = self._get_values(key)
deprecate_ndim_indexing(result, stacklevel=5)
return result
if not isinstance(self.index, MultiIndex):
raise KeyError("key of type tuple not found and not a MultiIndex")
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer], index=new_index).__finalize__(
self,
)
def _get_values(self, indexer):
try:
return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self,)
except ValueError:
# mpl compat if we look up e.g. ser[:, np.newaxis];
# see tests.series.timeseries.test_mpl_compat_hack
# the asarray is needed to avoid returning a 2D DatetimeArray
return np.asarray(self._values)[indexer]
def _get_value(self, label, takeable: bool = False):
"""
Quickly retrieve single value at passed index label.
Parameters
----------
label : object
takeable : interpret the index as indexers, default False
Returns
-------
scalar value
"""
if takeable:
return self._values[label]
# Similar to Index.get_value, but we do not fall back to positional
loc = self.index.get_loc(label)
return self.index._get_values_for_loc(self, loc, label)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
cacher_needs_updating = self._check_is_chained_assignment_possible()
if key is Ellipsis:
key = slice(None)
try:
self._set_with_engine(key, value)
except (KeyError, ValueError):
values = self._values
if is_integer(key) and not self.index.inferred_type == "integer":
# positional setter
values[key] = value
else:
# GH#12862 adding an new key to the Series
self.loc[key] = value
except TypeError as err:
if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
raise KeyError(
"key of type tuple not found and not a MultiIndex"
) from err
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
key = np.asarray(key, dtype=bool)
try:
self._where(~key, value, inplace=True)
except InvalidIndexError:
self.iloc[key] = value
return
else:
self._set_with(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
# fails with AttributeError for IntervalIndex
loc = self.index._engine.get_loc(key)
validate_numeric_casting(self.dtype, value)
self._values[loc] = value
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
else:
assert not isinstance(key, tuple)
if is_scalar(key):
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
key = key._values
else:
key_type = lib.infer_dtype(key, skipna=False)
# Note: key_type == "boolean" should not occur because that
# should be caught by the is_bool_indexer check in __setitem__
if key_type == "integer":
if not self.index._should_fallback_to_positional():
self._set_labels(key, value)
else:
self._set_values(key, value)
else:
self.loc[key] = value
def _set_labels(self, key, value):
key = com.asarray_tuplesafe(key)
indexer: np.ndarray = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise KeyError(f"{key[mask]} not in index")
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key._values
self._mgr = self._mgr.setitem(indexer=key, value=value)
self._maybe_update_cacher()
def _set_value(self, label, value, takeable: bool = False):
"""
Quickly set single value at passed label.
If label is not contained, a new object is created with the label
placed at the end of the result index.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed.
value : object
Scalar value.
takeable : interpret the index as indexers, default False
"""
try:
if takeable:
self._values[label] = value
else:
loc = self.index.get_loc(label)
validate_numeric_casting(self.dtype, value)
self._values[loc] = value
except KeyError:
# set using a non-recursive method
self.loc[label] = value
# ----------------------------------------------------------------------
# Unsorted
@property
def _is_mixed_type(self):
return False
def repeat(self, repeats, axis=None) -> "Series":
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
0 a
1 b
1 b
2 c
2 c
dtype: object
>>> s.repeat([1, 2, 3])
0 a
1 b
1 b
2 c
2 c
2 c
dtype: object
"""
nv.validate_repeat(tuple(), dict(axis=axis))
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values, index=new_index).__finalize__(
self, method="repeat"
)
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column, or
when the index is meaningless and needs to be reset to the default
before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels
from the index. Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in
the new DataFrame.
name : object, optional
The name to use for the column containing the original Series
values. Uses ``self.name`` by default. This argument is ignored
when `drop` is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
See Also
--------
DataFrame.reset_index: Analogous function for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
The `level` parameter is interesting for Series with a multi-level
index.
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
... np.array(['one', 'two', 'one', 'two'])]
>>> s2 = pd.Series(
... range(4), name='foo',
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
To remove a specific level from the Index, use `level`.
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
If `level` is not set, all levels are removed from the Index.
>>> s2.reset_index()
a b foo
0 bar one 0
1 bar two 1
2 baz one 2
3 baz two 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if drop:
new_index = ibase.default_index(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(
self._values.copy(), index=new_index
).__finalize__(self, method="reset_index")
elif inplace:
raise TypeError(
"Cannot reset_index inplace on a Series to create a DataFrame"
)
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
"""
Return a string representation for a particular Series.
"""
buf = StringIO("")
width, height = get_terminal_size()
max_rows = (
height
if get_option("display.max_rows") == 0
else get_option("display.max_rows")
)
min_rows = (
height
if get_option("display.max_rows") == 0
else get_option("display.min_rows")
)
show_dimensions = get_option("display.show_dimensions")
self.to_string(
buf=buf,
name=self.name,
dtype=self.dtype,
min_rows=min_rows,
max_rows=max_rows,
length=show_dimensions,
)
result = buf.getvalue()
return result
def to_string(
self,
buf=None,
na_rep="NaN",
float_format=None,
header=True,
index=True,
length=False,
dtype=False,
name=False,
max_rows=None,
min_rows=None,
):
"""
Render a string representation of the Series.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
na_rep : str, optional
String representation of NaN to use, default 'NaN'.
float_format : one-parameter function, optional
Formatter function to apply to columns' elements if they are
floats, default None.
header : bool, default True
Add the Series header (index name).
index : bool, optional
Add index (row) labels, default True.
length : bool, default False
Add the Series length.
dtype : bool, default False
Add the Series dtype.
name : bool, default False
Add the Series name if not None.
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
min_rows : int, optional
The number of rows to display in a truncated repr (when number
of rows is above `max_rows`).
Returns
-------
str or None
String representation of Series if ``buf=None``, otherwise None.
"""
formatter = fmt.SeriesFormatter(
self,
name=name,
length=length,
header=header,
index=index,
dtype=dtype,
na_rep=na_rep,
float_format=float_format,
min_rows=min_rows,
max_rows=max_rows,
)
result = formatter.to_string()
# catch contract violations
if not isinstance(result, str):
raise AssertionError(
"result must be of type str, type "
f"of result is {repr(type(result).__name__)}"
)
if buf is None:
return result
else:
try:
buf.write(result)
except AttributeError:
with open(buf, "w") as f:
f.write(result)
@doc(
klass=_shared_doc_kwargs["klass"],
examples=dedent(
"""
Examples
--------
>>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(s.to_markdown())
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
"""
),
)
def to_markdown(
self,
buf: Optional[IO[str]] = None,
mode: Optional[str] = None,
index: bool = True,
**kwargs,
) -> Optional[str]:
"""
Print {klass} in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
index : bool, optional, default True
Add index (row) labels.
.. versionadded:: 1.1.0
**kwargs
These parameters will be passed to `tabulate \
<https://pypi.org/project/tabulate>`_.
Returns
-------
str
{klass} in Markdown-friendly format.
Examples
--------
>>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(s.to_markdown())
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
Output markdown with a tabulate option.
>>> print(s.to_markdown(tablefmt="grid"))
+----+----------+
| | animal |
+====+==========+
| 0 | elk |
+----+----------+
| 1 | pig |
+----+----------+
| 2 | dog |
+----+----------+
| 3 | quetzal |
+----+----------+
"""
return self.to_frame().to_markdown(buf, mode, index, **kwargs)
# ----------------------------------------------------------------------
def items(self) -> Iterable[Tuple[Label, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = pd.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
return zip(iter(self.index), iter(self))
@Appender(items.__doc__)
def iteritems(self) -> Iterable[Tuple[Label, Any]]:
return self.items()
# ----------------------------------------------------------------------
# Misc public methods
def keys(self) -> Index:
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
"""
return self.index
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
into_c = com.standardize_mapping(into)
return into_c(self.items())
def to_frame(self, name=None) -> "DataFrame":
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = pd.Series(["a", "b", "c"],
... name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def _set_name(self, name, inplace=False) -> "Series":
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
Whether to modify `self` directly or return a copy.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ser = self if inplace else self.copy()
ser.name = name
return ser
@Appender(
"""
Examples
--------
>>> ser = pd.Series([390., 350., 30., 20.],
... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed")
>>> ser
Falcon 390.0
Falcon 350.0
Parrot 30.0
Parrot 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", "b"]).mean()
a 210.0
b 185.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(ser > 100).mean()
Max Speed
False 25.0
True 370.0
Name: Max Speed, dtype: float64
**Grouping by Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed")
>>> ser
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Animal
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level="Type").mean()
Type
Captive 210.0
Wild 185.0
Name: Max Speed, dtype: float64
We can also choose to include `NA` in group keys or not by defining
`dropna` parameter, the default setting is `True`:
>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan])
>>> ser.groupby(level=0).sum()
a 3
b 3
dtype: int64
>>> ser.groupby(level=0, dropna=False).sum()
a 3
b 3
NaN 3
dtype: int64
>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']
>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed")
>>> ser.groupby(["a", "b", "a", np.nan]).mean()
a 210.0
b 350.0
Name: Max Speed, dtype: float64
>>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()
a 210.0
b 350.0
NaN 20.0
Name: Max Speed, dtype: float64
"""
)
@Appender(generic._shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> "SeriesGroupBy":
from pandas.core.groupby.generic import SeriesGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return SeriesGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
dropna=dropna,
)
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series.
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series.
Returns
-------
int or Series (if level specified)
Number of non-null values in the Series.
See Also
--------
DataFrame.count : Count non-NA cells for each column or row.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
"""
if level is None:
return notna(self.array).sum()
if isinstance(level, str):
level = self.index._get_level_number(level)
lev = self.index.levels[level]
level_codes = np.array(self.index.codes[level], subok=False, copy=True)
mask = level_codes == -1
if mask.any():
level_codes[mask] = cnt = len(lev)
lev = lev.insert(cnt, lev._na_value)
obs = level_codes[notna(self._values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev, dtype="int64").__finalize__(
self, method="count"
)
def mode(self, dropna=True) -> "Series":
"""
Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
Series
Modes of the Series in sorted order.
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self, dropna=dropna)
def unique(self):
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
Returns
-------
ndarray or ExtensionArray
The unique values returned as a NumPy array. See Notes.
See Also
--------
unique : Top-level unique method for any 1-d array-like object.
Index.unique : Return Index with unique values from an Index object.
Notes
-----
Returns the unique values as a NumPy array. In case of an
extension-array backed Series, a new
:class:`~api.extensions.ExtensionArray` of that type with just
the unique values is returned. This includes
* Categorical
* Period
* Datetime with Timezone
* Interval
* Sparse
* IntegerNA
See Examples section.
Examples
--------
>>> pd.Series([2, 1, 3, 3], name='A').unique()
array([2, 1, 3])
>>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')
... for _ in range(3)]).unique()
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
An unordered Categorical will return categories in the order of
appearance.
>>> pd.Series(pd.Categorical(list('baabc'))).unique()
['b', 'a', 'c']
Categories (3, object): ['b', 'a', 'c']
An ordered Categorical preserves the category ordering.
>>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
... ordered=True)).unique()
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
"""
result = super().unique()
return result
def drop_duplicates(self, keep="first", inplace=False) -> Optional["Series"]:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series
Series with duplicates dropped.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
result = super().drop_duplicates(keep=keep)
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(self, keep="first") -> "Series":
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
return super().duplicated(keep=keep)
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
i = nanops.nanargmin(self._values, skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
axis : int, default 0
For compatibility with DataFrame.idxmax. Redundant for application
on Series.
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 3, 4],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 4.0
dtype: float64
>>> s.idxmax()
'C'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
"""
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
i = nanops.nanargmax(self._values, skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def round(self, decimals=0, *args, **kwargs) -> "Series":
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int, default 0
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Series
Rounded values of the Series.
See Also
--------
numpy.around : Round values of an np.array.
DataFrame.round : Round values of a DataFrame.
Examples
--------
>>> s = pd.Series([0.1, 1.3, 2.7])
>>> s.round()
0 0.0
1 1.0
2 3.0
dtype: float64
"""
nv.validate_round(args, kwargs)
result = self._values.round(decimals)
result = self._constructor(result, index=self.index).__finalize__(
self, method="round"
)
return result
def quantile(self, q=0.5, interpolation="linear"):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
The quantile(s) to compute, which can lie in range: 0 <= q <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile : Calculate the rolling quantile.
numpy.percentile : Returns the q-th percentile(s) of the array elements.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
validate_percentile(q)
# We dispatch to DataFrame so that core.internals only has to worry
# about 2D cases.
df = self.to_frame()
result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)
if result.ndim == 2:
result = result.iloc[:, 0]
if is_list_like(q):
result.name = self.name
return self._constructor(result, index=Float64Index(q), name=self.name)
else:
# scalar
return result.iloc[0]
def corr(self, other, method="pearson", min_periods=None) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'kendall', 'spearman'} or callable
Method used to compute correlation:
- pearson : Standard correlation coefficient
- kendall : Kendall Tau correlation coefficient
- spearman : Spearman rank correlation
- callable: Callable with input two 1d ndarrays and returning a float.
.. versionadded:: 0.24.0
Note that the returned matrix from corr will have 1 along the
diagonals and will be symmetric regardless of the callable's
behavior.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Correlation with other.
See Also
--------
DataFrame.corr : Compute pairwise correlation between columns.
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> s1 = pd.Series([.2, .0, .6, .2])
>>> s2 = pd.Series([.3, .6, .0, .1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
if method in ["pearson", "spearman", "kendall"] or callable(method):
return nanops.nancorr(
this.values, other.values, method=method, min_periods=min_periods
)
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
def cov(
self,
other: "Series",
min_periods: Optional[int] = None,
ddof: Optional[int] = 1,
) -> float:
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
See Also
--------
DataFrame.cov : Compute pairwise covariance of columns.
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(
this.values, other.values, min_periods=min_periods, ddof=ddof
)
@doc(
klass="Series",
extra_params="",
other_klass="DataFrame",
examples=dedent(
"""
Difference with previous row
>>> s = pd.Series([1, 1, 2, 3, 5, 8])
>>> s.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
dtype: float64
Difference with 3rd previous row
>>> s.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 2.0
4 4.0
5 6.0
dtype: float64
Difference with following row
>>> s.diff(periods=-1)
0 0.0
1 -1.0
2 -1.0
3 -2.0
4 -3.0
5 NaN
dtype: float64
Overflow in input dtype
>>> s = pd.Series([1, 0], dtype=np.uint8)
>>> s.diff()
0 NaN
1 255.0
dtype: float64"""
),
)
def diff(self, periods: int = 1) -> "Series":
"""
First discrete difference of element.
Calculates the difference of a {klass} element compared with another
element in the {klass} (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
{extra_params}
Returns
-------
{klass}
First differences of the Series.
See Also
--------
{klass}.pct_change: Percent change over given number of periods.
{klass}.shift: Shift index by desired number of periods with an
optional time freq.
{other_klass}.diff: First discrete difference of object.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
The result is calculated according to current dtype in {klass},
however dtype of the result is always float64.
Examples
--------
{examples}
"""
result = algorithms.diff(self.array, periods)
return self._constructor(result, index=self.index).__finalize__(
self, method="diff"
)
def autocorr(self, lag=1) -> float:
"""
Compute the lag-N autocorrelation.
This method computes the Pearson correlation between
the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
float
The Pearson correlation between self and self.shift(lag).
See Also
--------
Series.corr : Compute the correlation between two Series.
Series.shift : Shift index by desired number of periods.
DataFrame.corr : Compute pairwise correlation of columns.
DataFrame.corrwith : Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
-----
If the Pearson correlation is not well defined return 'NaN'.
Examples
--------
>>> s = pd.Series([0.25, 0.5, 0.2, -0.05])
>>> s.autocorr() # doctest: +ELLIPSIS
0.10355...
>>> s.autocorr(lag=2) # doctest: +ELLIPSIS
-0.99999...
If the Pearson correlation is not well defined, then 'NaN' is returned.
>>> s = pd.Series([1, 0, 0, 0])
>>> s.autocorr()
nan
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
"""
if isinstance(other, (Series, ABCDataFrame)):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, ABCDataFrame):
return self._constructor(
np.dot(lvals, rvals), index=other.columns
).__finalize__(self, method="dot")
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(np.transpose(other))
@doc(base.IndexOpsMixin.searchsorted, klass="Series")
def searchsorted(self, value, side="left", sorter=None):
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self]
to_concat.extend(to_append)
else:
to_concat = [self, to_append]
if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]):
msg = "to_append should be a Series or list/tuple of Series, got DataFrame"
raise TypeError(msg)
return concat(
to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity
)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value.
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value.
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
Returns
-------
Series
"""
if not isinstance(other, Series):
raise AssertionError("Other operand must be Series")
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join="outer", copy=False)
this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value)
with np.errstate(all="ignore"):
result = func(this_vals, other_vals)
name = ops.get_op_result_name(self, other)
ret = this._construct_result(result, name)
return ret
def _construct_result(
self, result: Union[ArrayLike, Tuple[ArrayLike, ArrayLike]], name: Label
) -> Union["Series", Tuple["Series", "Series"]]:
"""
Construct an appropriately-labelled Series from the result of an op.
Parameters
----------
result : ndarray or ExtensionArray
name : Label
Returns
-------
Series
In the case of __divmod__ or __rdivmod__, a 2-tuple of Series.
"""
if isinstance(result, tuple):
# produced by divmod or rdivmod
res1 = self._construct_result(result[0], name=name)
res2 = self._construct_result(result[1], name=name)
# GH#33427 assertions to keep mypy happy
assert isinstance(res1, Series)
assert isinstance(res2, Series)
return (res1, res2)
# We do not pass dtype to ensure that the Series constructor
# does inference in the case where `result` has object-dtype.
out = self._constructor(result, index=self.index)
out = out.__finalize__(self)
# Set the result's name after __finalize__ is called because __finalize__
# would set it back to self.name
out.name = name
return out
@Appender(
"""
Returns
-------
Series or DataFrame
If axis is 0 or 'index' the result will be a Series.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
If axis is 1 or 'columns' the result will be a DataFrame.
It will have two columns namely 'self' and 'other'.
See Also
--------
DataFrame.compare : Compare with another DataFrame and show differences.
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> s1 = pd.Series(["a", "b", "c", "d", "e"])
>>> s2 = pd.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2)
self other
1 b a
3 d b
Stack the differences on indices
>>> s1.compare(s2, align_axis=0)
1 self b
other a
3 self d
other b
dtype: object
Keep all original rows
>>> s1.compare(s2, keep_shape=True)
self other
0 NaN NaN
1 b a
2 NaN NaN
3 d b
4 NaN NaN
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True)
self other
0 a a
1 b a
2 c c
3 d b
4 e e
"""
)
@Appender(generic._shared_docs["compare"] % _shared_doc_kwargs)
def compare(
self,
other: "Series",
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> FrameOrSeriesUnion:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(self, other, func, fill_value=None) -> "Series":
"""
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
if isinstance(other, Series):
# If other is a Series, result is based on union of Series,
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
new_values = []
for idx in new_index:
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all="ignore"):
new_values.append(func(lv, rv))
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
with np.errstate(all="ignore"):
new_values = [func(lv, other) for lv in self._values]
new_name = self.name
if is_categorical_dtype(self.dtype):
pass
elif is_extension_array_dtype(self.dtype):
# TODO: can we do this for only SparseDtype?
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
new_values = maybe_cast_to_extension_array(type(self._values), new_values)
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other) -> "Series":
"""
Combine Series values, choosing the calling Series's values first.
Parameters
----------
other : Series
The value(s) to be combined with the `Series`.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function.
Notes
-----
Result index will be the union of the two indexes.
Examples
--------
>>> s1 = pd.Series([1, np.nan])
>>> s2 = pd.Series([3, 4])
>>> s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
if this.dtype.kind == "M" and other.dtype.kind != "M":
other = to_datetime(other)
return this.where(notna(this), other)
def update(self, other) -> None:
"""
Modify Series in place using values from passed Series.
Uses non-NA values from passed Series to make updates. Aligns
on index.
Parameters
----------
other : Series, or object coercible into Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64
``other`` can also be a non-Series object type
that is coercible into a Series
>>> s = pd.Series([1, 2, 3])
>>> s.update([4, np.nan, 6])
>>> s
0 4
1 2
2 6
dtype: int64
>>> s = pd.Series([1, 2, 3])
>>> s.update({1: 9})
>>> s
0 1
1 9
2 3
dtype: int64
"""
if not isinstance(other, Series):
other = Series(other)
other = other.reindex_like(self)
mask = notna(other)
self._mgr = self._mgr.putmask(mask=mask, new=other)
self._maybe_update_cacher()
# ----------------------------------------------------------------------
# Reindexing, sorting
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
"""
Sort by the values.
Sort a Series in ascending or descending order by some
criterion.
Parameters
----------
axis : {0 or 'index'}, default 0
Axis to direct sorting. The value 'index' is accepted for
compatibility with DataFrame.sort_values.
ascending : bool, default True
If True, sort values in ascending order, otherwise descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the series values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect a
``Series`` and return an array-like.
.. versionadded:: 1.1.0
Returns
-------
Series
Series ordered by values.
See Also
--------
Series.sort_index : Sort by the Series indices.
DataFrame.sort_values : Sort DataFrame by the values along either axis.
DataFrame.sort_index : Sort DataFrame by indices.
Examples
--------
>>> s = pd.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
Sort using a key function. Your `key` function will be
given the ``Series`` of values and should return an array-like.
>>> s = pd.Series(['a', 'B', 'c', 'D', 'e'])
>>> s.sort_values()
1 B
3 D
0 a
2 c
4 e
dtype: object
>>> s.sort_values(key=lambda x: x.str.lower())
0 a
1 B
2 c
3 D
4 e
dtype: object
NumPy ufuncs work well here. For example, we can
sort by the ``sin`` of the value
>>> s = pd.Series([-4, -2, 0, 2, 4])
>>> s.sort_values(key=np.sin)
1 -2
4 4
2 0
0 -4
3 2
dtype: int64
More complicated user-defined functions can be used,
as long as they expect a Series and return an array-like
>>> s.sort_values(key=lambda x: (np.tan(x.cumsum())))
0 -4
3 2
4 4
1 -2
2 0
dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError(
"This Series is a view of some other array, to "
"sort in-place you must create a copy"
)
def _try_kind_sort(arr):
arr = ensure_key_mapped(arr, key)
arr = getattr(arr, "_values", arr)
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind="quicksort")
arr = self._values
sorted_index = np.empty(len(self), dtype=np.int32)
bad = isna(arr)
good = ~bad
idx = ibase.default_index(len(self))
argsorted = _try_kind_sort(self[good])
if is_list_like(ascending):
if len(ascending) != 1:
raise ValueError(
f"Length of ascending ({len(ascending)}) must be 1 for Series"
)
ascending = ascending[0]
if not is_bool(ascending):
raise ValueError("ascending must be boolean")
if not ascending:
argsorted = argsorted[::-1]
if na_position == "last":
n = good.sum()
sorted_index[:n] = idx[good][argsorted]
sorted_index[n:] = idx[bad]
elif na_position == "first":
n = bad.sum()
sorted_index[n:] = idx[good][argsorted]
sorted_index[:n] = idx[bad]
else:
raise ValueError(f"invalid na_position: {na_position}")
result = self._constructor(arr[sorted_index], index=self.index[sorted_index])
if ignore_index:
result.index = ibase.default_index(len(sorted_index))
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis=0,
level=None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort Series by index labels.
Returns a new Series sorted by label if `inplace` argument is
``False``, otherwise updates the original series and returns None.
Parameters
----------
axis : int, default 0
Axis to direct sorting. This can only be 0 for Series.
level : int, optional
If not None, sort on values in specified index level(s).
ascending : bool or list of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape.
.. versionadded:: 1.1.0
Returns
-------
Series
The original Series sorted by the labels.
See Also
--------
DataFrame.sort_index: Sort DataFrame by the index.
DataFrame.sort_values: Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
>>> s.sort_index()
1 c
2 b
3 a
4 d
dtype: object
Sort Descending
>>> s.sort_index(ascending=False)
4 d
3 a
2 b
1 c
dtype: object
Sort Inplace
>>> s.sort_index(inplace=True)
>>> s
1 c
2 b
3 a
4 d
dtype: object
By default NaNs are put at the end, but use `na_position` to place
them at the beginning
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])
>>> s.sort_index(na_position='first')
NaN d
1.0 c
2.0 b
3.0 a
dtype: object
Specify index level to sort
>>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',
... 'baz', 'baz', 'bar', 'bar']),
... np.array(['two', 'one', 'two', 'one',
... 'two', 'one', 'two', 'one'])]
>>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)
>>> s.sort_index(level=1)
bar one 8
baz one 6
foo one 4
qux one 2
bar two 7
baz two 5
foo two 3
qux two 1
dtype: int64
Does not sort by remaining levels when sorting by levels
>>> s.sort_index(level=1, sort_remaining=False)
qux one 2
foo one 4
baz one 6
bar one 8
qux two 1
foo two 3
baz two 5
bar two 7
dtype: int64
Apply a key function before sorting
>>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd'])
>>> s.sort_index(key=lambda x : x.str.lower())
A 1
b 2
C 3
d 4
dtype: int64
"""
# TODO: this can be combined with DataFrame.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis)
index = ensure_key_mapped(self.index, key, levels=level)
if level is not None:
new_index, indexer = index.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
labels = index._sort_levels_monotonic()
indexer = lexsort_indexer(
labels._get_codes_for_sorting(),
orders=ascending,
na_position=na_position,
)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if (ascending and index.is_monotonic_increasing) or (
not ascending and index.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
index, kind=kind, ascending=ascending, na_position=na_position
)
indexer = ensure_platform_int(indexer)
new_index = self.index.take(indexer)
new_index = new_index._sort_levels_monotonic()
new_values = self._values.take(indexer)
result = self._constructor(new_values, index=new_index)
if ignore_index:
result.index = ibase.default_index(len(result))
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_index")
def argsort(self, axis=0, kind="quicksort", order=None) -> "Series":
"""
Return the integer indices that would sort the Series values.
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : {0 or "index"}
Has no effect but is accepted for compatibility with numpy.
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm.
order : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort : Returns the indices that would sort this array.
"""
values = self._values
mask = isna(values)
if mask.any():
result = Series(-1, index=self.index, name=self.name, dtype="int64")
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result, index=self.index).__finalize__(
self, method="argsort"
)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index, dtype="int64"
).__finalize__(self, method="argsort")
def nlargest(self, n=5, keep="first") -> "Series":
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n=5, keep="first") -> "Series":
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Montserrat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Montserrat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
>>> s.nsmallest(3, keep='all')
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
def swaplevel(self, i=-2, j=-1, copy=True) -> "Series":
"""
Swap levels i and j in a :class:`MultiIndex`.
Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int, str
Level of the indices to be swapped. Can pass level name as string.
copy : bool, default True
Whether to copy underlying data.
Returns
-------
Series
Series with levels swapped in MultiIndex.
"""
assert isinstance(self.index, MultiIndex)
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index, copy=copy).__finalize__(
self, method="swaplevel"
)
def reorder_levels(self, order) -> "Series":
"""
Rearrange index levels using input order.
May not drop or duplicate levels.
Parameters
----------
order : list of int representing new level order
Reference level by number or key.
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception("Can only reorder levels on a hierarchical axis.")
result = self.copy()
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
return result
def explode(self, ignore_index: bool = False) -> "Series":
"""
Transform each element of a list-like to a row.
.. versionadded:: 0.25.0
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged. Empty list-likes will
result in a np.nan for that row.
Examples
--------
>>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
"""
if not len(self) or not is_object_dtype(self):
return self.copy()
values, counts = reshape.explode(np.asarray(self.array))
if ignore_index:
index = ibase.default_index(len(values))
else:
index = self.index.repeat(counts)
result = self._constructor(values, index=index, name=self.name)
return result
def unstack(self, level=-1, fill_value=None):
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
fill_value : scalar value, default None
Value to use when replacing NaN values.
Returns
-------
DataFrame
Unstacked Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'],
... ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None) -> "Series":
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, collections.abc.Mapping subclass or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
"""
new_values = super()._map_values(arg, na_action=na_action)
return self._constructor(new_values, index=self.index).__finalize__(
self, method="map"
)
def _gotitem(self, key, ndim, subset=None) -> "Series":
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
Requested ndim of result.
subset : object, default None
Subset to act on.
"""
return self
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Transform function producing a Series with like indexes.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.agg('min')
1
>>> s.agg(['min', 'max'])
min 1
max 4
dtype: int64
"""
)
@doc(
generic._shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="\n.. versionadded:: 0.20.0\n",
)
def aggregate(self, func=None, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
# if func is None, will switch to user-provided "named aggregation" kwargs
if func is None:
func = dict(kwargs.items())
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
# we can be called from an inner function which
# passes this meta-data
kwargs.pop("_axis", None)
kwargs.pop("_level", None)
# try a regular apply, this evaluates lambdas
# row-by-row; however if the lambda is expected a Series
# expression, e.g.: lambda x: x-x.quantile(0.25)
# this will fail, so we can try a vectorized evaluation
# we cannot FIRST try the vectorized evaluation, because
# then .agg and .apply would have different semantics if the
# operation is actually defined on the Series, e.g. str
try:
result = self.apply(func, *args, **kwargs)
except (ValueError, AttributeError, TypeError):
result = func(self, *args, **kwargs)
return result
agg = aggregate
@doc(
NDFrame.transform,
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(self, func, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
return super().transform(func, *args, **kwargs)
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series.
Can be ufunc (a NumPy function that applies to the entire Series)
or a Python function that only works on single values.
Parameters
----------
func : function
Python function or NumPy ufunc to apply.
convert_dtype : bool, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series or DataFrame
If func returns a Series object the result will be a DataFrame.
See Also
--------
Series.map: For element-wise operations.
Series.agg: Only perform aggregating type operations.
Series.transform: Only perform transforming type operations.
Examples
--------
Create a series with typical summer temperatures for each city.
>>> s = pd.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> s.apply(lambda x: x ** 2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype, index=self.index).__finalize__(
self, method="apply"
)
# dispatch to agg
if isinstance(func, (list, dict)):
return self.aggregate(func, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, str):
return self._try_aggregate_string_function(func, *args, **kwds)
# handle ufuncs and lambdas
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
with np.errstate(all="ignore"):
if isinstance(f, np.ufunc):
return f(self)
# row-wise access
if is_extension_array_dtype(self.dtype) and hasattr(self._values, "map"):
# GH#23179 some EAs do not have `map`
mapped = self._values.map(f)
else:
values = self.astype(object)._values
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
# GH 25959 use pd.array instead of tolist
# so extension arrays can be used
return self._constructor_expanddim(pd.array(mapped), index=self.index)
else:
return self._constructor(mapped, index=self.index).__finalize__(
self, method="apply"
)
def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
"""
Perform a reduction operation.
If we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object.
"""
delegate = self._values
if axis is not None:
self._get_axis_number(axis)
if isinstance(delegate, ExtensionArray):
# dispatch to ExtensionArray interface
return delegate._reduce(name, skipna=skipna, **kwds)
else:
# dispatch to numpy arrays
if numeric_only:
raise NotImplementedError(
f"Series.{name} does not implement numeric_only."
)
with np.errstate(all="ignore"):
return op(delegate, skipna=skipna, **kwds)
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
new_values = algorithms.take_1d(
self._values, indexer, allow_fill=True, fill_value=None
)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
"""
Check if we do need a multi reindex; this is for compat with
higher dims.
"""
return False
@doc(
NDFrame.align,
klass=_shared_doc_kwargs["klass"],
axes_single_arg=_shared_doc_kwargs["axes_single_arg"],
)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def rename(
self,
index=None,
*,
axis=None,
copy=True,
inplace=False,
level=None,
errors="ignore",
):
"""
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
axis : {0 or "index"}
Unused. Accepted for compatibility with DataFrame method only.
index : scalar, hashable sequence, dict-like or function, optional
Functions or dict-like are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
**kwargs
Additional keyword arguments passed to the function. Only the
"inplace" keyword is used.
Returns
-------
Series
Series with index labels or name altered.
See Also
--------
DataFrame.rename : Corresponding DataFrame method.
Series.rename_axis : Set the name of the axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
if callable(index) or is_dict_like(index):
return super().rename(
index, copy=copy, inplace=inplace, level=level, errors=errors
)
else:
return self._set_name(index, inplace=inplace)
@Appender(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0)
a 1
b 2
c 3
dtype: int64
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub="",
axis_description_sub="",
see_also_sub="",
)
@Appender(generic.NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@doc(
NDFrame.reindex,
klass=_shared_doc_kwargs["klass"],
axes=_shared_doc_kwargs["axes"],
optional_labels=_shared_doc_kwargs["optional_labels"],
optional_axis=_shared_doc_kwargs["optional_axis"],
)
def reindex(self, index=None, **kwargs):
return super().reindex(index=index, **kwargs)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
) -> "Series":
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : 0, default 0
Redundant for application on Series.
index : single label or list-like
Redundant for application on Series, but 'index' can be used instead
of 'labels'.
columns : single label or list-like
No change is made to the Series; use 'index' or 'labels' instead.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are dropped.
Returns
-------
Series
Series with specified index labels removed.
Raises
------
KeyError
If none of the labels are found in the index.
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Examples
--------
>>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Optional["Series"]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Label) -> Any:
"""
Return item and drops from series. Raise KeyError if not found.
Parameters
----------
item : label
Index of the element that needs to be removed.
Returns
-------
Value that is popped from series.
Examples
--------
>>> ser = pd.Series([1,2,3])
>>> ser.pop(0)
1
>>> ser
1 2
2 3
dtype: int64
"""
return super().pop(item=item)
@doc(NDFrame.replace, klass=_shared_doc_kwargs["klass"])
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "Series":
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
152
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
144
>>> s.memory_usage(deep=True)
260
"""
v = super().memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
def isin(self, values) -> "Series":
"""
Whether elements in Series are contained in `values`.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
Series
Series of booleans indicating if each element is in values.
Raises
------
TypeError
* If `values` is a string
See Also
--------
DataFrame.isin : Equivalent method on DataFrame.
Examples
--------
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
"""
result = algorithms.isin(self, values)
return self._constructor(result, index=self.index).__finalize__(
self, method="isin"
)
def between(self, left, right, inclusive=True) -> "Series":
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
# ----------------------------------------------------------------------
# Convert to types that support pd.NA
def _convert_dtypes(
self,
infer_objects: bool = True,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
) -> "Series":
input_series = self
if infer_objects:
input_series = input_series.infer_objects()
if is_object_dtype(input_series):
input_series = input_series.copy()
if convert_string or convert_integer or convert_boolean:
inferred_dtype = convert_dtypes(
input_series._values, convert_string, convert_integer, convert_boolean
)
try:
result = input_series.astype(inferred_dtype)
except TypeError:
result = input_series.copy()
else:
result = input_series.copy()
return result
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> "Series":
return super().isna()
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> "Series":
return super().isnull()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> "Series":
return super().notna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> "Series":
return super().notnull()
def dropna(self, axis=0, inplace=False, how=None):
"""
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
Returns
-------
Series
Series with NA entries dropped from it.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
if inplace:
self._update_inplace(result)
else:
return result
else:
if inplace:
# do nothing
pass
else:
return self.copy()
# ----------------------------------------------------------------------
# Time series-oriented methods
def to_timestamp(self, freq=None, how="start", copy=True) -> "Series":
"""
Cast to DatetimeIndex of Timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
copy : bool, default True
Whether or not to return a copy.
Returns
-------
Series with DatetimeIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
if not isinstance(self.index, PeriodIndex):
raise TypeError(f"unsupported Type {type(self.index).__name__}")
new_index = self.index.to_timestamp(freq=freq, how=how) # type: ignore
return self._constructor(new_values, index=new_index).__finalize__(
self, method="to_timestamp"
)
def to_period(self, freq=None, copy=True) -> "Series":
"""
Convert Series from DatetimeIndex to PeriodIndex.
Parameters
----------
freq : str, default None
Frequency associated with the PeriodIndex.
copy : bool, default True
Whether or not to return a copy.
Returns
-------
Series
Series with index converted to PeriodIndex.
"""
new_values = self._values
if copy:
new_values = new_values.copy()
if not isinstance(self.index, DatetimeIndex):
raise TypeError(f"unsupported Type {type(self.index).__name__}")
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values, index=new_index).__finalize__(
self, method="to_period"
)
# ----------------------------------------------------------------------
# Add index
_AXIS_ORDERS = ["index"]
_AXIS_REVERSED = False
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 0
_info_axis_name = "index"
index: "Index" = properties.AxisProperty(
axis=0, doc="The index (axis labels) of the Series."
)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
str = CachedAccessor("str", StringMethods)
dt = CachedAccessor("dt", CombinedDatetimelikeProperties)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
sparse = CachedAccessor("sparse", SparseAccessor)
# ----------------------------------------------------------------------
# Add plotting methods to Series
hist = pandas.plotting.hist_series
Series._add_numeric_operations()
Series._add_series_or_dataframe_operations()
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series)
ops.add_special_arithmetic_methods(Series)
|
py | 1a4556ee46b97d7337e9044e6f5ce59fc5e99f79 | """
Support for Wink lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.wink/
"""
import colorsys
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_RGB_COLOR, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR, Light)
from homeassistant.components.wink import WinkDevice
from homeassistant.util import color as color_util
from homeassistant.util.color import \
color_temperature_mired_to_kelvin as mired_to_kelvin
DEPENDENCIES = ['wink']
SUPPORT_WINK = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_RGB_COLOR
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Wink lights."""
import pywink
add_devices(WinkLight(light, hass) for light in pywink.get_bulbs())
class WinkLight(WinkDevice, Light):
"""Representation of a Wink light."""
def __init__(self, wink, hass):
"""Initialize the Wink device."""
WinkDevice.__init__(self, wink, hass)
@property
def is_on(self):
"""Return true if light is on."""
return self.wink.state()
@property
def brightness(self):
"""Return the brightness of the light."""
if self.wink.brightness() is not None:
return int(self.wink.brightness() * 255)
else:
return None
@property
def rgb_color(self):
"""Current bulb color in RGB."""
if not self.wink.supports_hue_saturation():
return None
else:
hue = self.wink.color_hue()
saturation = self.wink.color_saturation()
value = int(self.wink.brightness() * 255)
if hue is None or saturation is None or value is None:
return None
rgb = colorsys.hsv_to_rgb(hue, saturation, value)
r_value = int(round(rgb[0]))
g_value = int(round(rgb[1]))
b_value = int(round(rgb[2]))
return r_value, g_value, b_value
@property
def xy_color(self):
"""Current bulb color in CIE 1931 (XY) color space."""
if not self.wink.supports_xy_color():
return None
return self.wink.color_xy()
@property
def color_temp(self):
"""Current bulb color in degrees Kelvin."""
if not self.wink.supports_temperature():
return None
return color_util.color_temperature_kelvin_to_mired(
self.wink.color_temperature_kelvin())
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_WINK
def turn_on(self, **kwargs):
"""Turn the switch on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
rgb_color = kwargs.get(ATTR_RGB_COLOR)
color_temp_mired = kwargs.get(ATTR_COLOR_TEMP)
state_kwargs = {
}
if rgb_color:
if self.wink.supports_xy_color():
xyb = color_util.color_RGB_to_xy(*rgb_color)
state_kwargs['color_xy'] = xyb[0], xyb[1]
state_kwargs['brightness'] = xyb[2]
elif self.wink.supports_hue_saturation():
hsv = colorsys.rgb_to_hsv(rgb_color[0],
rgb_color[1], rgb_color[2])
state_kwargs['color_hue_saturation'] = hsv[0], hsv[1]
if color_temp_mired:
state_kwargs['color_kelvin'] = mired_to_kelvin(color_temp_mired)
if brightness:
state_kwargs['brightness'] = brightness / 255.0
self.wink.set_state(True, **state_kwargs)
def turn_off(self):
"""Turn the switch off."""
self.wink.set_state(False)
|
py | 1a4557d192998edb9b224df9ff0d4c3a4352d99f | """
Django settings for healthchecks project.
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings
"""
import os
import warnings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def envbool(s, default):
v = os.getenv(s, default=default)
if v not in ("", "True", "False"):
msg = "Unexpected value %s=%s, use 'True' or 'False'" % (s, v)
raise Exception(msg)
return v == "True"
def envint(s, default):
v = os.getenv(s, default)
if v == "None":
return None
return int(v)
SECRET_KEY = os.getenv("SECRET_KEY", "---")
METRICS_KEY = os.getenv("METRICS_KEY")
DEBUG = envbool("DEBUG", "True")
ALLOWED_HOSTS = os.getenv("ALLOWED_HOSTS", "*").split(",")
DEFAULT_FROM_EMAIL = os.getenv("DEFAULT_FROM_EMAIL", "[email protected]")
SUPPORT_EMAIL = os.getenv("SUPPORT_EMAIL")
USE_PAYMENTS = envbool("USE_PAYMENTS", "False")
REGISTRATION_OPEN = envbool("REGISTRATION_OPEN", "True")
VERSION = ""
with open(os.path.join(BASE_DIR, "CHANGELOG.md"), encoding="utf-8") as f:
for line in f.readlines():
if line.startswith("## v"):
VERSION = line.split()[1]
break
INSTALLED_APPS = (
"hc.accounts",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.humanize",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"compressor",
"hc.api",
"hc.front",
"hc.payments",
)
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.locale.LocaleMiddleware",
"hc.accounts.middleware.TeamAccessMiddleware",
)
AUTHENTICATION_BACKENDS = (
"hc.accounts.backends.EmailBackend",
"hc.accounts.backends.ProfileBackend",
)
ROOT_URLCONF = "hc.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"hc.front.context_processors.branding",
"hc.payments.context_processors.payments",
]
},
}
]
WSGI_APPLICATION = "hc.wsgi.application"
TEST_RUNNER = "hc.api.tests.CustomRunner"
# Default database engine is SQLite. So one can just check out code,
# install requirements.txt and do manage.py runserver and it works
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.getenv("DB_NAME", BASE_DIR + "/hc.sqlite"),
}
}
# You can switch database engine to postgres or mysql using environment
# variable 'DB'. Travis CI does this.
if os.getenv("DB") == "postgres":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"HOST": os.getenv("DB_HOST", ""),
"PORT": os.getenv("DB_PORT", ""),
"NAME": os.getenv("DB_NAME", "hc"),
"USER": os.getenv("DB_USER", "postgres"),
"PASSWORD": os.getenv("DB_PASSWORD", ""),
"CONN_MAX_AGE": envint("DB_CONN_MAX_AGE", "0"),
"TEST": {"CHARSET": "UTF8"},
"OPTIONS": {
"sslmode": os.getenv("DB_SSLMODE", "prefer"),
"target_session_attrs": os.getenv(
"DB_TARGET_SESSION_ATTRS", "read-write"
),
},
}
}
if os.getenv("DB") == "mysql":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"HOST": os.getenv("DB_HOST", ""),
"PORT": os.getenv("DB_PORT", ""),
"NAME": os.getenv("DB_NAME", "hc"),
"USER": os.getenv("DB_USER", "root"),
"PASSWORD": os.getenv("DB_PASSWORD", ""),
"TEST": {"CHARSET": "UTF8"},
}
}
USE_TZ = True
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = False
LOCALE_PATHS = (os.path.join(BASE_DIR, "locale"),)
SITE_ROOT = os.getenv("SITE_ROOT", "http://localhost:8000")
SITE_NAME = os.getenv("SITE_NAME", "Mychecks")
MASTER_BADGE_LABEL = os.getenv("MASTER_BADGE_LABEL", SITE_NAME)
PING_ENDPOINT = os.getenv("PING_ENDPOINT", SITE_ROOT + "/ping/")
PING_EMAIL_DOMAIN = os.getenv("PING_EMAIL_DOMAIN", "localhost")
PING_BODY_LIMIT = envint("PING_BODY_LIMIT", "10000")
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATIC_ROOT = os.path.join(BASE_DIR, "static-collected")
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"compressor.finders.CompressorFinder",
)
COMPRESS_OFFLINE = True
COMPRESS_CSS_HASHING_METHOD = "content"
# Discord integration
DISCORD_CLIENT_ID = os.getenv("DISCORD_CLIENT_ID")
DISCORD_CLIENT_SECRET = os.getenv("DISCORD_CLIENT_SECRET")
# Email integration
EMAIL_HOST = os.getenv("EMAIL_HOST", "")
EMAIL_PORT = envint("EMAIL_PORT", "587")
EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER", "")
EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD", "")
EMAIL_USE_TLS = envbool("EMAIL_USE_TLS", "True")
EMAIL_USE_VERIFICATION = envbool("EMAIL_USE_VERIFICATION", "True")
# Slack integration
SLACK_CLIENT_ID = os.getenv("SLACK_CLIENT_ID")
SLACK_CLIENT_SECRET = os.getenv("SLACK_CLIENT_SECRET")
# Pushover integration
PUSHOVER_API_TOKEN = os.getenv("PUSHOVER_API_TOKEN")
PUSHOVER_SUBSCRIPTION_URL = os.getenv("PUSHOVER_SUBSCRIPTION_URL")
PUSHOVER_EMERGENCY_RETRY_DELAY = int(os.getenv("PUSHOVER_EMERGENCY_RETRY_DELAY", "300"))
PUSHOVER_EMERGENCY_EXPIRATION = int(os.getenv("PUSHOVER_EMERGENCY_EXPIRATION", "86400"))
# Pushbullet integration
PUSHBULLET_CLIENT_ID = os.getenv("PUSHBULLET_CLIENT_ID")
PUSHBULLET_CLIENT_SECRET = os.getenv("PUSHBULLET_CLIENT_SECRET")
# Telegram integration -- override in local_settings.py
TELEGRAM_BOT_NAME = os.getenv("TELEGRAM_BOT_NAME", "ExampleBot")
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
# SMS and WhatsApp (Twilio) integration
TWILIO_ACCOUNT = os.getenv("TWILIO_ACCOUNT")
TWILIO_AUTH = os.getenv("TWILIO_AUTH")
TWILIO_FROM = os.getenv("TWILIO_FROM")
TWILIO_USE_WHATSAPP = envbool("TWILIO_USE_WHATSAPP", "False")
# PagerDuty
PD_VENDOR_KEY = os.getenv("PD_VENDOR_KEY")
# Trello
TRELLO_APP_KEY = os.getenv("TRELLO_APP_KEY")
# Matrix
MATRIX_HOMESERVER = os.getenv("MATRIX_HOMESERVER")
MATRIX_USER_ID = os.getenv("MATRIX_USER_ID")
MATRIX_ACCESS_TOKEN = os.getenv("MATRIX_ACCESS_TOKEN")
# Apprise
APPRISE_ENABLED = envbool("APPRISE_ENABLED", "False")
# Local shell commands
SHELL_ENABLED = envbool("SHELL_ENABLED", "False")
# LINE Notify
LINENOTIFY_CLIENT_ID = os.getenv("LINENOTIFY_CLIENT_ID")
LINENOTIFY_CLIENT_SECRET = os.getenv("LINENOTIFY_CLIENT_SECRET")
if os.path.exists(os.path.join(BASE_DIR, "hc/local_settings.py")):
from .local_settings import *
else:
warnings.warn("local_settings.py not found, using defaults")
|
py | 1a455807b88c9a0274aed878a147fa538bc31bd2 | # Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
"""Defines the dashboard class."""
import json
import os
import uuid
from html.parser import HTMLParser
from rai_core_flask import FlaskHelper # , environment_detector
from responsibleai.serialization_utilities import serialize_json_safe
class InLineScript(HTMLParser):
def __init__(self, load_widget_file):
HTMLParser.__init__(self)
self.content = ""
self.load_widget_file = load_widget_file
def handle_starttag(self, tag, attrs):
if tag == "script":
src = None
scriptTag = "<script "
for att in attrs:
if att[0] == "src":
src = att[1]
continue
# skip module type as it causes ipython to render widget
# with 8px height
if att[0] == "type":
continue
scriptTag += f' {att[0]}={att[1]}'
if src is not None:
content = self.load_widget_file(src)
self.content += f'{scriptTag}>\r\n{content}\r\n'
return
self.content += self.get_starttag_text()
def handle_endtag(self, tag):
self.content += f'</{tag}>'
pass
def handle_data(self, data):
self.content += data
pass
class Dashboard(object):
"""The dashboard class, wraps the dashboard component."""
def __init__(self, *,
dashboard_type,
model_data,
public_ip,
port,
locale,
no_inline_dashboard=False):
"""Initialize the dashboard."""
if model_data is None or type is None:
raise ValueError("Required parameters not provided")
try:
self._service = FlaskHelper(ip=public_ip, port=port)
except Exception as e:
self._service = None
raise e
self.id = uuid.uuid4().hex
self.config = {
'dashboardType': dashboard_type,
'id': self.id,
'baseUrl': self._service.env.base_url,
'withCredentials': self._service.with_credentials,
'locale': locale
}
self.model_data = model_data
self.add_route()
html = self.load_index()
print(f'{dashboard_type} started at {self._service.env.base_url}')
if no_inline_dashboard:
return
self._service.env.display(html)
def add_route(self):
# To enable multiple dashboards to run in the same notebook we need to
# prevent them from using the same method names (in addition to using
# dedicated ports). Below we rename the function for that purpose and
# manually add the URL rule instead of using the route decorator.
def visual():
return self.load_index()
self.add_url_rule(visual, '/', methods=["GET"])
return
@staticmethod
def get_widget_path(path):
script_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(script_path, "widget", path)
def load_index(self):
index = self.load_widget_file("index.html")
parser = InLineScript(self.load_widget_file)
parser.feed(index)
return parser.content
def load_widget_file(self, path):
js_path = Dashboard.get_widget_path(path)
with open(js_path, "r", encoding="utf-8") as f:
content = f.read()
content = content.replace(
"__rai_app_id__", f'rai_widget_{self.id}')
content = content.replace(
'"__rai_config__"', f'`{json.dumps(self.config)}`')
model_data = json.dumps(self.model_data,
default=serialize_json_safe)
content = content.replace(
'"__rai_model_data__"',
f'`{model_data}`')
return content
def add_url_rule(self, func, route, methods):
"""To enable multiple dashboards to run in the same notebook we need to
prevent them from using the same method names (in addition to using
dedicated ports). We rename the function for that purpose and
manually add the URL rule instead of using the route decorator.
"""
func.__name__ = func.__name__ + str(id(self))
self._service.app.add_url_rule(
route,
endpoint=func.__name__,
view_func=func,
methods=methods)
|
py | 1a4559ae4c91d8c3e75ffe05ca5c6a2586f4863c | import toppra
import numpy as np
from _mplib import *
import toppra as ta
import toppra.constraint as constraint
import toppra.algorithm as algo
from transforms3d.quaternions import quat2mat
from typing import Tuple
class Planner(object):
def __init__(
self,
urdf="./panda/panda.urdf",
srdf="./panda/panda.srdf",
# align the order of user links
user_link_names=['panda_link0', 'panda_link1', 'panda_link2', 'panda_link3',
'panda_link4', 'panda_link5', 'panda_link6', 'panda_link7',
'panda_link8', 'panda_hand', 'panda_leftfinger', 'panda_rightfinger'],
# align the order of user joints
user_joint_names=['panda_joint1', 'panda_joint2', 'panda_joint3', 'panda_joint4',
'panda_joint5', 'panda_joint6', 'panda_joint7',
'panda_finger_joint1', 'panda_finger_joint2'],
move_group="ee_link",
joint_vel_limits=np.ones(7),
joint_acc_limits=np.ones(7)
):
self.urdf = urdf
self.srdf = srdf
self.user_link_names = user_link_names
self.user_joint_names = user_joint_names
self.joint_name_2_idx = {}
for i, joint in enumerate(self.user_joint_names):
self.joint_name_2_idx[joint] = i
self.link_name_2_idx = {}
for i, link in enumerate(self.user_link_names):
self.link_name_2_idx[link] = i
self.robot = articulation.ArticulatedModel(urdf, srdf, [0, 0, -9.81], self.user_joint_names,
self.user_link_names, verbose=False, convex=True)
self.planning_world = planning_world.PlanningWorld([self.robot], ["robot"], [], [])
self.move_group = move_group
self.robot.set_move_group(self.move_group)
self.move_group_joint_indices = self.robot.get_move_group_joint_indices()
self.pinocchio_model = self.robot.get_pinocchio_model()
self.joint_types = self.pinocchio_model.get_joint_types()
self.joint_limits = np.concatenate(self.pinocchio_model.get_joint_limits())
self.planner = ompl.OMPLPlanner(world=self.planning_world)
self.joint_vel_limits = joint_vel_limits
self.joint_acc_limits = joint_acc_limits
self.move_group_link_id = self.link_name_2_idx[self.move_group]
assert(len(self.joint_vel_limits) == len(self.move_group_joint_indices))
assert(len(self.joint_acc_limits) == len(self.move_group_joint_indices))
def distance_6D(self, p1, q1, p2, q2):
return np.linalg.norm(p1 - p2) + min(np.linalg.norm(q1 - q2), np.linalg.norm(q1 + q2))
def check_joint_limit(self, q):
n = len(q)
flag = True
for i in range(n):
if self.joint_types[i].startswith("JointModelR"):
if (np.abs(q[i] - self.joint_limits[i][0]) < 1e-3):
continue
q[i] -= 2 * np.pi * np.floor((q[i] - self.joint_limits[i][0]) / (2 * np.pi))
if q[i] > self.joint_limits[i][1] + 1e-3:
flag = False
else:
if q[i] < self.joint_limits[i][0] - 1e-3 or q[i] > self.joint_limits[i][1] + 1e-3:
flag = False
return flag
def IK(self, goal_pose, start_qpos, n_init_qpos = 20, threshold = 1e-3):
index = self.link_name_2_idx[self.move_group]
min_dis = 1e9
result = np.zeros(len(self.user_joint_names))
for i in range(n_init_qpos):
ik_results = self.pinocchio_model.compute_IK_CLIK(index, goal_pose, start_qpos)
flag = self.check_joint_limit(np.copy(ik_results[0]))
if flag:
self.pinocchio_model.compute_forward_kinematics(ik_results[0])
new_pose = self.pinocchio_model.get_link_pose(index)
tmp_dis = self.distance_6D(goal_pose[:3], goal_pose[3:], new_pose[:3], new_pose[3:])
if tmp_dis < min_dis:
min_dis = tmp_dis
result = ik_results[0]
if min_dis < threshold:
return "Success", result
start_qpos = self.pinocchio_model.get_random_configuration()
if min_dis != 1e9:
status = "IK Failed! Distance %lf is greater than threshold %lf." % (min_dis, threshold)
else:
status = "IK Failed! Cannot find valid solution."
return status, result
def TOPP(self, path, step = 0.1, verbose = False):
N_samples = path.shape[0]
dof = path.shape[1]
assert(dof == len(self.joint_vel_limits))
assert(dof == len(self.joint_acc_limits))
ss = np.linspace(0, 1, N_samples)
path = ta.SplineInterpolator(ss, path)
pc_vel = constraint.JointVelocityConstraint(self.joint_vel_limits)
pc_acc = constraint.JointAccelerationConstraint(self.joint_acc_limits)
instance = algo.TOPPRA([pc_vel, pc_acc], path, parametrizer="ParametrizeConstAccel")
jnt_traj = instance.compute_trajectory()
ts_sample = np.linspace(0, jnt_traj.duration, int(jnt_traj.duration / step))
qs_sample = jnt_traj(ts_sample)
qds_sample = jnt_traj(ts_sample, 1)
qdds_sample = jnt_traj(ts_sample, 2)
return ts_sample, qs_sample, qds_sample, qdds_sample, jnt_traj.duration
def update_point_cloud(self, pc, resolution = 1e-3):
self.planning_world.update_point_cloud(pc, resolution)
def update_attached_box(self, size, pose, link_id = -1):
if link_id == -1:
link_id = self.move_group_link_id
self.planning_world.update_attached_box(size, link_id, pose)
def plan(self, goal_pose, current_qpos, time_step = 0.1, rrt_range = 0.1, planning_time = 1, fix_joint_limits = True, use_point_cloud = False, use_attach = False, verbose = False):
self.planning_world.set_use_point_cloud(use_point_cloud)
self.planning_world.set_use_attach(use_attach)
n = current_qpos.shape[0]
if fix_joint_limits:
for i in range(n):
if current_qpos[i] < self.joint_limits[i][0]:
current_qpos[i] = self.joint_limits[i][0] + 1e-3
if current_qpos[i] > self.joint_limits[i][1]:
current_qpos[i] = self.joint_limits[i][1] - 1e-3
idx = self.move_group_joint_indices
ik_status, goal_qpos = self.IK(goal_pose, current_qpos)
if ik_status != "Success":
return {"status": ik_status}
self.robot.set_qpos(current_qpos, True)
status, path = self.planner.plan(current_qpos[idx], goal_qpos[idx], range = rrt_range, verbose = verbose, time = planning_time)
if status == "Exact solution":
if verbose:
ta.setup_logging("INFO")
else:
ta.setup_logging("WARNING")
times, pos, vel, acc, duration = self.TOPP(path, time_step)
return {"status": "Success",
"time": times,
"position": pos,
"velocity": vel,
"acceleration": acc,
"duration": duration}
else:
return {"status": "RRT Failed. %s" % status}
def plan_screw(self, target_pose, qpos, qpos_step = 0.1, time_step = 0.1, use_point_cloud = False, use_attach = False, verbose = False):
self.planning_world.set_use_point_cloud(use_point_cloud)
self.planning_world.set_use_attach(use_attach)
qpos = np.copy(qpos)
self.robot.set_qpos(qpos, True)
def pose7D2mat(pose):
mat = np.eye(4)
mat[0:3, 3] = pose[:3]
mat[0:3, 0:3] = quat2mat(pose[3:])
return mat
def skew(vec):
return np.array([[0, -vec[2], vec[1]],
[vec[2], 0, -vec[0]],
[-vec[1], vec[0], 0]])
def pose2exp_coordinate(pose: np.ndarray) -> Tuple[np.ndarray, float]:
def rot2so3(rotation: np.ndarray):
assert rotation.shape == (3, 3)
if np.isclose(rotation.trace(), 3):
return np.zeros(3), 1
if np.isclose(rotation.trace(), -1):
return np.zeros(3), -1e6
theta = np.arccos((rotation.trace() - 1) / 2)
omega = 1 / 2 / np.sin(theta) * np.array(
[rotation[2, 1] - rotation[1, 2], rotation[0, 2] - rotation[2, 0], rotation[1, 0] - rotation[0, 1]]).T
return omega, theta
omega, theta = rot2so3(pose[:3, :3])
if theta < -1e5:
return omega, theta
ss = skew(omega)
inv_left_jacobian = np.eye(3) / theta - 0.5 * ss + (
1.0 / theta - 0.5 / np.tan(theta / 2)) * ss @ ss
v = inv_left_jacobian @ pose[:3, 3]
return np.concatenate([v, omega]), theta
self.pinocchio_model.compute_forward_kinematics(qpos)
ee_index = self.link_name_2_idx[self.move_group]
current_p = pose7D2mat(self.pinocchio_model.get_link_pose(ee_index))
target_p = pose7D2mat(target_pose)
relative_transform = target_p @ np.linalg.inv(current_p)
omega, theta = pose2exp_coordinate(relative_transform)
if theta < -1e4:
return {"status": "screw plan failed."}
omega = omega.reshape((-1, 1)) * theta
index = self.move_group_joint_indices
path = [np.copy(qpos[index])]
while True:
self.pinocchio_model.compute_full_jacobian(qpos)
J = self.pinocchio_model.get_link_jacobian(ee_index, local = False)
delta_q = np.linalg.pinv(J) @ omega
delta_q *= qpos_step / (np.linalg.norm(delta_q))
delta_twist = J @ delta_q
flag = False
if np.linalg.norm(delta_twist) > np.linalg.norm(omega):
ratio = np.linalg.norm(omega) / np.linalg.norm(delta_twist)
delta_q = delta_q * ratio
delta_twist = delta_twist * ratio
flag = True
qpos += delta_q.reshape(-1)
omega -= delta_twist
def check_joint_limit(q):
n = len(q)
for i in range(n):
if q[i] < self.joint_limits[i][0] - 1e-3 or q[i] > self.joint_limits[i][1] + 1e-3:
return False
return True
within_joint_limit = check_joint_limit(qpos)
self.planning_world.set_qpos_all(qpos[index])
collide = self.planning_world.collide()
if np.linalg.norm(delta_twist) < 1e-4 or collide or within_joint_limit == False:
return {"status": "screw plan failed"}
path.append(np.copy(qpos[index]))
if flag:
if verbose:
ta.setup_logging("INFO")
else:
ta.setup_logging("WARNING")
times, pos, vel, acc, duration = self.TOPP(np.vstack(path), time_step)
return {"status": "Success",
"time": times,
"position": pos,
"velocity": vel,
"acceleration": acc,
"duration": duration}
|
py | 1a455b74c3e3366903b9adf0c8155a17d5346de0 | from __future__ import division
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv import Config
from mmcv.runner import init_dist
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume_from', help='the checkpoint file to resume from')
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([('{}: {}'.format(k, v))
for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(
args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__,
config=cfg.text,
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=args.validate,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.