filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_7137 | # -*- coding: utf-8 -*-
import os
import sys
import argparse
from evaluate import evaluate_beam_search
import logging
import numpy as np
import config
import utils
import torch
import torch.nn as nn
from torch import cuda
from beam_search import SequenceGenerator
from train import load_data_vocab, init_model, init_optimizer_criterion
from utils import Progbar, plot_learning_curve
import pykp
from pykp.io import KeyphraseDatasetTorchText
__author__ = "Rui Meng"
__email__ = "[email protected]"
def main():
# load settings for training
parser = argparse.ArgumentParser(
description='predict.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config.preprocess_opts(parser)
config.model_opts(parser)
config.train_opts(parser)
config.predict_opts(parser)
opt = parser.parse_args()
if opt.seed > 0:
torch.manual_seed(opt.seed)
print(opt.gpuid)
if torch.cuda.is_available() and not opt.gpuid:
opt.gpuid = 0
opt.exp = 'predict.' + opt.exp
if hasattr(opt, 'copy_model') and opt.copy_model:
opt.exp += '.copy'
if hasattr(opt, 'bidirectional'):
if opt.bidirectional:
opt.exp += '.bi-directional'
else:
opt.exp += '.uni-directional'
# fill time into the name
if opt.exp_path.find('%s') > 0:
opt.exp_path = opt.exp_path % (opt.exp, opt.timemark)
opt.pred_path = opt.pred_path % (opt.exp, opt.timemark)
if not os.path.exists(opt.exp_path):
os.makedirs(opt.exp_path)
if not os.path.exists(opt.pred_path):
os.makedirs(opt.pred_path)
logging = config.init_logging('train', opt.exp_path + '/output.log')
logging.info('Parameters:')
[logging.info('%s : %s' % (k, str(v))) for k, v in opt.__dict__.items()]
try:
train_data_loader, valid_data_loader, test_data_loader, word2id, id2word, vocab = load_data_vocab(opt, load_train=False)
model = init_model(opt)
# optimizer, criterion = init_optimizer_criterion(model, opt)
generator = SequenceGenerator(model,
eos_id=opt.word2id[pykp.io.EOS_WORD],
beam_size=opt.beam_size,
max_sequence_length=opt.max_sent_length
)
# import time
# start_time = time.time()
evaluate_beam_search(generator, test_data_loader, opt, title='predict', save_path=opt.pred_path + '/[epoch=%d,batch=%d,total_batch=%d]test_result.csv' % (0, 0, 0))
# print("--- %s seconds --- Complete Beam Search" % (time.time() - start_time))
# predict_greedy(model, test_data_loader, test_examples, opt)
except Exception as e:
logging.exception("message")
if __name__ == '__main__':
main()
|
the-stack_0_7140 | import logging
import re
import feedparser
from requests.auth import AuthBase
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException
log = logging.getLogger('apple_trailers')
class AppleTrailers:
"""
Adds support for Apple.com movie trailers.
Configuration:
quality: Set the desired resolution - 480p, 720p or 1080p. default '720p'
genres: List of genres used to filter the entries. If set, the
trailer must match at least one listed genre to be accepted. Genres
that can be used: Action and Adventure, Comedy, Documentary, Drama,
Family, Fantasy, Foreign, Horror, Musical, Romance, Science Fiction,
Thriller. default '' (all)
apple_trailers:
quality: 720p
genres: ['Action and Adventure']
Alternatively, a simpler configuration format can be used. This uses
the default genre filter, all:
apple_trailers: 720p
This plugin adds the following fields to the entry:
movie_name, movie_year, genres, apple_trailers_name, movie_studio
movie_name: Name of the movie
movie_year: Year the movie was/will be released
genres: Comma-separated list of genres that apply to the movie
apple_trailers_name: Contains the Apple-supplied name of the clip,
such as 'Clip 2', 'Trailer', 'Winter Olympic Preview'
movie_studio: Name of the studio that makes the movie
"""
movie_data_url = 'http://trailers.apple.com/trailers/feeds/data/'
rss_url = 'http://trailers.apple.com/trailers/home/rss/newtrailers.rss'
qualities = {'480p': 'sd', '720p': 'hd720', '1080p': 'hd1080'}
schema = {
'oneOf': [
{
'type': 'object',
'properties': {
'quality': {
'type': 'string',
'enum': list(qualities.keys()),
'default': '720p',
},
'genres': {'type': 'array', 'items': {'type': 'string'}},
},
'additionalProperties': False,
},
{'title': 'justquality', 'type': 'string', 'enum': list(qualities.keys())},
]
}
def broken(self, error_message):
raise plugin.PluginError('Plugin is most likely broken. Got: %s' % error_message)
@plugin.priority(127)
@cached('apple_trailers')
def on_task_input(self, task, config):
# Turn simple config into full config
if isinstance(config, str):
config = {'quality': config}
try:
r = task.requests.get(self.rss_url)
except RequestException as e:
raise plugin.PluginError('Retrieving Apple Trailers RSS feed failed: %s' % e)
rss = feedparser.parse(r.content)
if rss.get('bozo_exception', False):
raise plugin.PluginError('Got bozo_exception (bad feed)')
filmid_regex = re.compile(r'(FilmId\s*\=\s*\')(\d+)(?=\')')
studio_regex = re.compile(r'(?:[0-9]*\s*)(.+)')
# use the following dict to save json object in case multiple trailers have been released for the same movie
# no need to do multiple requests for the same thing!
trailers = {}
entries = []
for item in rss.entries:
entry = Entry()
movie_url = item['link']
entry['title'] = item['title']
entry['movie_name'], entry['apple_trailers_name'] = entry['title'].split(' - ', 1)
if not trailers.get(movie_url):
try:
movie_page = task.requests.get(movie_url).text
match = filmid_regex.search(movie_page)
if match:
json_url = self.movie_data_url + match.group(2) + '.json'
movie_data = task.requests.get(json_url).json()
trailers[movie_url] = {'json_url': json_url, 'json': movie_data}
else:
self.broken('FilmId not found for {0}'.format(entry['movie_name']))
except RequestException as e:
log.error('Failed to get trailer %s: %s', entry['title'], e.args[0])
continue
else:
movie_data = trailers[movie_url]['json']
genres = {genre.get('name') for genre in movie_data.get('details').get('genres')}
config_genres = set(config.get('genres', []))
if genres and config_genres and not set.intersection(config_genres, genres):
log.debug('Config genre(s) do not match movie genre(s)')
continue
desired_quality = config['quality']
# find the trailer url
for clip in movie_data.get('clips'):
if clip.get('title') == entry['apple_trailers_name']:
try:
trailer_url = clip['versions']['enus']['sizes'][
self.qualities[desired_quality]
]
src = trailer_url.get('src')
src_alt = trailer_url.get('srcAlt')
# .mov tends to be a streaming video file, but the real video file is the same url, but
# they prepend 'h' to the quality
if src.split('.')[-1] == 'mov':
entry['url'] = src.replace(desired_quality, 'h' + desired_quality)
elif src_alt.split('.')[-1] == 'mov':
entry['url'] = src_alt.replace(desired_quality, 'h' + desired_quality)
else:
continue # just continue until we reach the else part of the for-else
break
except KeyError as e:
self.broken(e.args[0])
else:
log.error('Trailer "%s" not found', entry['apple_trailers_name'])
continue
# set some entry fields if present
# studio is usually also the copyright holder
studio = studio_regex.match(movie_data.get('page').get('copyright'))
if studio:
entry['movie_studio'] = studio.group(1)
release_date = movie_data.get('page').get('release_date')
if release_date:
entry['release_date'] = release_date
if genres:
entry['genres'] = ', '.join(list(genres))
# set the correct header without modifying the task.requests obj
entry['download_auth'] = AppleTrailersHeader()
entries.append(entry)
return entries
class AppleTrailersHeader(AuthBase):
def __call__(self, request):
request.headers['User-Agent'] = 'QuickTime/7.7'
return request
@event('plugin.register')
def register_plugin():
plugin.register(AppleTrailers, 'apple_trailers', api_ver=2)
|
the-stack_0_7142 | import os
import torch
import faiss
from argparse import ArgumentParser
from tqdm import tqdm
from typing import List
from collections import defaultdict
def load_rerank_f(fname):
if not fname:
return None
f = open(fname)
ret = defaultdict(set)
for line in f:
line = line.strip().split()
ret[int(line[0])].add(int(line[1]))
return ret
def main():
parser = ArgumentParser()
parser.add_argument('--score_dir', required=True)
parser.add_argument('--query_lookup', required=True)
parser.add_argument('--depth', type=int, required=True)
parser.add_argument('--num_query', type=int)
parser.add_argument('--save_ranking_to', required=True)
parser.add_argument('--marco_document', action='store_true')
parser.add_argument("--rerank_pairs", default=None)
args = parser.parse_args()
rerank_dic = load_rerank_f(args.rerank_pairs)
if args.num_query:
rh = faiss.ResultHeap(args.num_query, args.depth)
else:
print("Inferring number of query from first input")
rh = None
partitions = os.listdir(args.score_dir)
pbar = tqdm(partitions)
for part_name in pbar:
pbar.set_description_str(f'Processing {part_name}')
scores, indices = torch.load(
os.path.join(args.score_dir, part_name)
)
if rh is None:
print(f'Initializing Heap. Assuming {scores.shape[0]} queries.')
rh = faiss.ResultHeap(scores.shape[0], args.depth)
rh.add_result(-scores.numpy(), indices.numpy())
rh.finalize()
corpus_scores, corpus_indices = (-rh.D).tolist(), rh.I.tolist()
q_lookup: List[str] = torch.load(args.query_lookup).tolist()
os.makedirs(os.path.split(args.save_ranking_to)[0], exist_ok=True)
with open(args.save_ranking_to, 'w') as f:
for qid, q_doc_scores, q_doc_indices in zip(q_lookup, corpus_scores, corpus_indices):
_last = None
score_list = [(s, idx) for s, idx in zip(q_doc_scores, q_doc_indices)]
if rerank_dic:
new_l = []
for tp in score_list:
if tp[1] in rerank_dic[qid]:
new_l.append((tp[0]+100000.0, tp[1]))
else:
new_l.append((tp[0], tp[1]))
score_list = new_l
score_list = sorted(score_list, key=lambda x: x[0], reverse=True)
for s, idx in score_list:
if args.marco_document:
_idx = f'D{idx}'
else:
_idx = idx
f.write(f'{qid}\t{_idx}\t{s}\n')
if __name__ == '__main__':
main()
|
the-stack_0_7144 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_binary_labels(labels, label_weights, label_channels):
# Caution: this function should only be used in RPN
# in other files such as in ghm_loss, the _expand_binary_labels
# is used for multi-class classification.
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if pred.dim() == label.dim() and pred.shape[-1] != label.shape[-1] and label.dtype == torch.long:
num_class = pred.shape[-1]
onehot = torch.nn.functional.one_hot(label, num_classes=num_class + 1)
# import pdb; pdb.set_trace()
onehot = onehot.sum(dim=1)[..., :-1] # remove background/no-attr class
label = onehot
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight=class_weight, reduction='none')
# do the reduction for the weighted loss
# if label.shape[-1] > 10:
# import pdb; pdb.set_trace()
if loss.dim() == 2 and loss.shape[-1] > 1:
loss = loss.mean(dim=-1)
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
|
the-stack_0_7145 | import shapely.geometry
import numpy as np
import fiona.crs
import pyproj
from shapely.geometry.point import Point
UTM_ZONE30 = pyproj.Proj(
proj='utm',
zone=30,
datum='WGS84',
units='m',
errcheck=True)
schema = {'geometry': 'LineString', 'properties': {'PhysID': 'int'}}
crs = fiona.crs.from_string(UTM_ZONE30.srs)
x0, y0, x1, y1 = 0, 0, 640, 320
features = \
[shapely.geometry.LineString([(x0, y0), (x1, y0)]),
shapely.geometry.LineString([(x1, y0), (x1, y1)]),
shapely.geometry.LineString([(x1, y1), (x0, y1)]),
shapely.geometry.LineString([(x0, y1), (x0, y0)])]
with fiona.collection("outline_2.shp", "w", "ESRI Shapefile", schema, crs=crs) as output:
for i in range(len(features)):
output.write({'geometry': shapely.geometry.mapping(features[i]), 'properties': {'PhysID': i}})
# Array coordinates
array_list = np.zeros((7, 2))
array_1 = np.arange(64, 320, 64)
array_2 = np.arange(64 + 32, 320-64, 64)
array_list[0:4, 0] = 640 / 3
array_list[4:, 0] = 640 / 3 + 64
array_list[0:4, 1] = array_1
array_list[4:, 1] = array_2
np.save("Turbine_coords.npy", array_list)
features2 = []
for x, y in array_list:
p = Point(x, y)
circle = shapely.geometry.LineString(list(p.buffer(10).exterior.coords))
features2.append(circle)
with fiona.collection("turbine_circles.shp", "w", "ESRI Shapefile", schema, crs=crs) as output:
for i in range(len(features2)):
output.write({'geometry': shapely.geometry.mapping(features2[i]), 'properties': {'PhysID': 100}})
|
the-stack_0_7146 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import OrderedDict
# External imports
import bs4
from jinja2 import Template
from mock import patch
# Bokeh imports
import bokeh.resources as resources
import bokeh.util.version as buv
from bokeh.document import Document
from bokeh.embed.util import RenderRoot, standalone_docs_json
from bokeh.io import curdoc
from bokeh.plotting import figure
from bokeh.resources import CDN, CSSResources, JSResources
# Module under test
import bokeh.embed.standalone as bes # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
"bokeh._testing.plugins.selenium",
)
def stable_id():
return 'ID'
@pytest.fixture
def test_plot() -> None:
from bokeh.plotting import figure
test_plot = figure(title="'foo'")
test_plot.circle([1, 2], [2, 3])
return test_plot
@pytest.fixture
def test_plot_and_widget() -> None:
from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.models import Div
test_plot = figure(title="'foo'")
test_plot.circle([1, 2], [2, 3])
return column(Div(text="foo"), test_plot)
PAGE = Template("""
<!DOCTYPE html>
<html lang="en">
<head>
</head>
<body>
<script>
{{js}}
</script>
{{tag}}
</body>
""")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_autoload_static(object):
def test_return_type(self, test_plot) -> None:
r = bes.autoload_static(test_plot, CDN, "some/path")
assert len(r) == 2
def test_script_attrs(self, test_plot) -> None:
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
html = bs4.BeautifulSoup(tag, "html.parser")
scripts = html.findAll(name='script')
assert "bokeh-widgets" not in js
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set(['src', 'id'])
assert attrs['src'] == 'some/path'
def test_script_attrs_with_widgets(self, test_plot_and_widget) -> None:
js, tag = bes.autoload_static(test_plot_and_widget, CDN, "some/path")
html = bs4.BeautifulSoup(tag, "html.parser")
scripts = html.findAll(name='script')
assert "bokeh-widgets" in js
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set(['src', 'id'])
assert attrs['src'] == 'some/path'
@pytest.mark.parametrize("version", ["1.4.0rc1", "2.0.0dev3"])
@pytest.mark.selenium
def test_js_dev_cdn(self, version, monkeypatch, driver, test_file_path_and_url, test_plot) -> None:
monkeypatch.setattr(buv, "__version__", "1.4.0rc1")
monkeypatch.setattr(resources, "__version__", "1.4.0rc1")
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
assert len(scripts) == 1
for script in scripts:
assert script.get_attribute("crossorigin") == None
assert script.get_attribute("integrity") == ""
@pytest.mark.selenium
def test_js_release_cdn(self, monkeypatch, driver, test_file_path_and_url, test_plot) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
for x in scripts:
print(x.get_attribute("src"))
assert len(scripts) == 1
for script in scripts:
assert script.get_attribute("crossorigin") == "anonymous"
assert script.get_attribute("integrity").startswith("sha384-")
@pytest.mark.selenium
def test_js_release_cdn_with_widgets(self, monkeypatch, driver, test_file_path_and_url, test_plot_and_widget) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
js, tag = bes.autoload_static(test_plot_and_widget, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
for x in scripts:
print(x.get_attribute("src"))
assert len(scripts) == 2 # 2 to include widgets bundle
for script in scripts:
assert script.get_attribute("crossorigin") == "anonymous"
assert script.get_attribute("integrity").startswith("sha384-")
@pytest.mark.selenium
def test_js_release_dev_cdn(self, monkeypatch, driver, test_file_path_and_url, test_plot) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0-foo")
monkeypatch.setattr(resources, "__version__", "2.0.0-foo")
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
for x in scripts:
print(x.get_attribute("src"))
assert len(scripts) == 1
for script in scripts:
assert script.get_attribute("crossorigin") == "anonymous"
assert script.get_attribute("integrity").startswith("sha384-")
@pytest.mark.selenium
def test_js_release_server(self, monkeypatch, driver, test_file_path_and_url, test_plot) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
js, tag = bes.autoload_static(test_plot, resources.Resources(mode="server"), "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
assert len(scripts) == 1
for script in scripts:
assert script.get_attribute("crossorigin") == None
assert script.get_attribute("integrity") == ""
class Test_components(object):
def test_return_type(self) -> None:
plot1 = figure()
plot1.circle([], [])
plot2 = figure()
plot2.circle([], [])
# This is a testing artefact, users dont' have to do this in practice
curdoc().add_root(plot1)
curdoc().add_root(plot2)
r = bes.components(plot1)
assert len(r) == 2
_, divs = bes.components((plot1, plot2))
assert isinstance(divs, tuple)
_, divs = bes.components([plot1, plot2])
assert isinstance(divs, tuple)
_, divs = bes.components({"Plot 1": plot1, "Plot 2": plot2})
assert isinstance(divs, dict)
assert all(isinstance(x, str) for x in divs.keys())
_, divs = bes.components(OrderedDict([("Plot 1", plot1), ("Plot 2", plot2)]))
assert isinstance(divs, OrderedDict)
assert all(isinstance(x, str) for x in divs.keys())
@patch('bokeh.embed.util.make_globally_unique_id', new_callable=lambda: stable_id)
def test_plot_dict_returned_when_wrap_plot_info_is_false(self, mock_make_id) -> None:
doc = Document()
plot1 = figure()
plot1.circle([], [])
doc.add_root(plot1)
plot2 = figure()
plot2.circle([], [])
doc.add_root(plot2)
expected_plotdict_1 = RenderRoot(elementid="ID", id="ID")
expected_plotdict_2 = RenderRoot(elementid="ID", id="ID")
_, plotdict = bes.components(plot1, wrap_plot_info=False)
assert plotdict == expected_plotdict_1
_, plotids = bes.components((plot1, plot2), wrap_plot_info=False)
assert plotids == (expected_plotdict_1, expected_plotdict_2)
_, plotiddict = bes.components({'p1': plot1, 'p2': plot2}, wrap_plot_info=False)
assert plotiddict == {'p1': expected_plotdict_1, 'p2': expected_plotdict_2}
def test_result_attrs(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "html.parser")
scripts = html.findAll(name='script')
assert len(scripts) == 1
assert scripts[0].attrs == {'type': 'text/javascript'}
@patch('bokeh.embed.util.make_globally_unique_id', new=stable_id)
def test_div_attrs(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(div, "html.parser")
divs = html.findAll(name='div')
assert len(divs) == 1
div = divs[0]
assert set(div.attrs) == set(['class', 'id', 'data-root-id'])
assert div.attrs['class'] == ['bk-root']
assert div.attrs['id'] == 'ID'
assert div.attrs['data-root-id'] == test_plot.id
assert div.string is None
def test_script_is_utf8_encoded(self, test_plot) -> None:
script, div = bes.components(test_plot)
assert isinstance(script, str)
def test_quoting(self, test_plot) -> None:
script, div = bes.components(test_plot)
assert """ not in script
assert "'foo'" not in script
assert "'foo'" in script
def test_output_is_without_script_tag_when_wrap_script_is_false(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "html.parser")
scripts = html.findAll(name='script')
assert len(scripts) == 1
# XXX: this needs to account for indentation
#script_content = scripts[0].getText()
#rawscript, div = bes.components(test_plot, wrap_script=False)
#self.maxDiff = None
#assert rawscript.strip() == script_content.strip()
class Test_file_html(object):
def test_return_type(self, test_plot) -> None:
class fake_template:
def __init__(self, tester, user_template_variables=None):
self.tester = tester
self.template_variables = {
"title",
"bokeh_js",
"bokeh_css",
"plot_script",
"doc",
"docs",
"base",
}
if user_template_variables is not None:
self.template_variables.update(user_template_variables)
def render(self, template_variables):
assert self.template_variables.issubset(set(template_variables.keys()))
return "template result"
r = bes.file_html(test_plot, CDN, "title")
assert isinstance(r, str)
r = bes.file_html(test_plot, CDN, "title", fake_template(self))
assert isinstance(r, str)
r = bes.file_html(test_plot, CDN, "title",
fake_template(self, {"test_var"}),
{"test_var": "test"})
assert isinstance(r, str)
@patch('bokeh.embed.bundle.warn')
def test_file_html_handles_js_only_resources(self, mock_warn, test_plot) -> None:
js_resources = JSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_js }}</head><body></body>")
output = bes.file_html(test_plot, (js_resources, None), "title", template=template)
html = "<head>%s</head><body></body>" % js_resources.render_js()
assert output == html
@patch('bokeh.embed.bundle.warn')
def test_file_html_provides_warning_if_no_css(self, mock_warn, test_plot) -> None:
js_resources = JSResources()
bes.file_html(test_plot, (js_resources, None), "title")
mock_warn.assert_called_once_with(
'No Bokeh CSS Resources provided to template. If required you will need to provide them manually.'
)
@patch('bokeh.embed.bundle.warn')
def test_file_html_handles_css_only_resources(self, mock_warn, test_plot) -> None:
css_resources = CSSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_css }}</head><body></body>")
output = bes.file_html(test_plot, (None, css_resources), "title", template=template)
html = "<head>%s</head><body></body>" % css_resources.render_css()
assert output == html
@patch('bokeh.embed.bundle.warn')
def test_file_html_provides_warning_if_no_js(self, mock_warn, test_plot) -> None:
css_resources = CSSResources()
bes.file_html(test_plot, (None, css_resources), "title")
mock_warn.assert_called_once_with(
'No Bokeh JS Resources provided to template. If required you will need to provide them manually.'
)
def test_file_html_title_is_escaped(self, test_plot) -> None:
r = bes.file_html(test_plot, CDN, "&<")
assert "<title>&<</title>" in r
def test_entire_doc_is_not_used(self) -> None:
from bokeh.document import Document
from bokeh.models import Button
fig = figure()
fig.x([0], [0])
button = Button(label="Button")
d = Document()
d.add_root(fig)
d.add_root(button)
out = bes.file_html([fig], CDN)
# this is a very coarse test but it will do
assert "bokeh-widgets" not in out
class Test_json_item(object):
def test_with_target_id(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['target_id'] == "foo"
def test_without_target_id(self, test_plot) -> None:
out = bes.json_item(test_plot)
assert out['target_id'] == None
def test_doc_json(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
expected = list(standalone_docs_json([test_plot]).values())[0]
assert out['doc'] == expected
def test_doc_title(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['doc']['title'] == ""
def test_root_id(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['doc']['roots']['root_ids'][0] == out['root_id']
@patch('bokeh.embed.standalone.OutputDocumentFor')
def test_apply_theme(self, mock_OFD, test_plot) -> None:
# the subsequent call inside ODF will fail since the model was never
# added to a document. Ignoring that since we just want to make sure
# ODF is called with the expected theme arg.
try:
bes.json_item(test_plot, theme="foo")
except ValueError:
pass
mock_OFD.assert_called_once_with([test_plot], apply_theme="foo")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test__title_from_models(object):
pass
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
the-stack_0_7147 | from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Delete the application 1 and 3.
APPLICATION_ID = [1, 3]
try:
res = voxapi.del_application(application_id=APPLICATION_ID)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
|
the-stack_0_7149 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: possibility.py
# Purpose: music21 class to define rule checking methods for a possibility
# represented as a tuple.
# Authors: Jose Cabal-Ugaz
#
# Copyright: Copyright © 2011 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
A possibility is a tuple with pitches, and is intended to encapsulate a possible
solution to a :class:`~music21.figuredBass.segment.Segment`. Unlike a :class:`~music21.chord.Chord`,
the ordering of a possibility does matter. The assumption throughout fbRealizer
is that a possibility is always in order from highest part to lowest part, and
the last element of each possibility is the bass.
.. note:: fbRealizer supports voice crossing, so the order of pitches from lowest
to highest may not correspond to the ordering of parts.
Here, a possibility is created. G5 is in the highest part, and C4 is the bass. The highest
part contains the highest Pitch, and the lowest part contains the lowest Pitch. No voice
crossing is present.
>>> from music21 import pitch
>>> G5 = pitch.Pitch('G5')
>>> C5 = pitch.Pitch('C5')
>>> E4 = pitch.Pitch('E4')
>>> C4 = pitch.Pitch('C4')
>>> p1 = (G5, C5, E4, C4)
Here, another possibility is created with the same pitches, but this time, with voice crossing present.
C5 is in the highest part, but the highest Pitch G5 is in the second highest part.
>>> p2 = (C5, G5, E4, C4)
The methods in this module are applied to possibilities, and fall into three main categories:
1) Single Possibility Methods. These methods are applied in finding correct possibilities in
:meth:`~music21.figuredBass.segment.Segment.allCorrectSinglePossibilities`.
2) Consecutive Possibility Methods. These methods are applied to (possibA, possibB) pairs
in :meth:`~music21.figuredBass.segment.Segment.allCorrectConsecutivePossibilities`,
possibA being any correct possibility in segmentA and possibB being any correct possibility
in segmentB.
3) Special Resolution Methods. These methods are applied in :meth:`~music21.figuredBass.segment.Segment.allCorrectConsecutivePossibilities`
as applicable if the pitch names of a Segment correctly spell out an augmented sixth, dominant
seventh, or diminished seventh chord. They are located in :mod:`~music21.figuredBass.resolution`.
The application of these methods is controlled by corresponding instance variables in a
:class:`~music21.figuredBass.rules.Rules` object provided to a Segment.
.. note:: The number of parts and maxPitch are universal for a :class:`~music21.figuredBass.realizer.FiguredBassLine`.
'''
import unittest
from music21 import chord
from music21 import exceptions21
from music21 import interval
from music21 import pitch
from music21 import voiceLeading
from music21.ext import six
izip = six.moves.zip # @UndefinedVariable
# SINGLE POSSIBILITY RULE-CHECKING METHODS
# ----------------------------------------
def voiceCrossing(possibA):
'''
Returns True if there is voice crossing present between any two parts
in possibA. The parts from lowest part to highest part (right to left)
must correspond to increasingly higher pitches in order for there to
be no voice crossing. Comparisons between pitches are done using pitch
comparison methods, which are based on pitch space values
(see :class:`~music21.pitch.Pitch`).
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> E4 = pitch.Pitch('E4')
>>> C5 = pitch.Pitch('C5')
>>> G5 = pitch.Pitch('G5')
>>> possibA1 = (C5, G5, E4)
>>> possibility.voiceCrossing(possibA1) # G5 > C5
True
>>> possibA2 = (C5, E4, C4)
>>> possibility.voiceCrossing(possibA2)
False
'''
hasVoiceCrossing = False
for part1Index in range(len(possibA)):
higherPitch = possibA[part1Index]
for part2Index in range(part1Index + 1, len(possibA)):
lowerPitch = possibA[part2Index]
if higherPitch < lowerPitch:
hasVoiceCrossing = True
return hasVoiceCrossing
return hasVoiceCrossing
def isIncomplete(possibA, pitchNamesToContain):
'''
Returns True if possibA is incomplete, if it doesn't contain at least
one of every pitch name in pitchNamesToContain.
For a Segment, pitchNamesToContain is :attr:`~music21.figuredBass.segment.Segment.pitchNamesInChord`.
If possibA contains excessive pitch names, a PossibilityException is
raised, although this is not a concern with the current implementation
of fbRealizer.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> E4 = pitch.Pitch('E4')
>>> G4 = pitch.Pitch('G4')
>>> C5 = pitch.Pitch('C5')
>>> Bb5 = pitch.Pitch('B-5')
>>> possibA1 = (C5, G4, E4, C3)
>>> pitchNamesA1 = ['C', 'E', 'G', 'B-']
>>> possibility.isIncomplete(possibA1, pitchNamesA1) # Missing B-
True
>>> pitchNamesA2 = ['C', 'E', 'G']
>>> possibility.isIncomplete(possibA1, pitchNamesA2)
False
'''
isIncomplete = False
pitchNamesContained = []
for givenPitch in possibA:
if givenPitch.name not in pitchNamesContained:
pitchNamesContained.append(givenPitch.name)
for pitchName in pitchNamesToContain:
if pitchName not in pitchNamesContained:
isIncomplete = True
if not isIncomplete and (len(pitchNamesContained) > len(pitchNamesToContain)):
isIncomplete = False
#raise PossibilityException(str(possibA) + " contains pitch names not found in pitchNamesToContain.")
return isIncomplete
def upperPartsWithinLimit(possibA, maxSemitoneSeparation = 12):
'''
Returns True if the pitches in the upper parts of possibA
are found within maxSemitoneSeparation of each other. The
upper parts of possibA are all the pitches except the last.
The default value of maxSemitoneSeparation is 12 semitones,
enharmonically equivalent to a perfect octave. If this method
returns True for this default value, then all the notes in
the upper parts can be played by most adult pianists using
just the right hand.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> E3 = pitch.Pitch('E3')
>>> E4 = pitch.Pitch('E4')
>>> G4 = pitch.Pitch('G4')
>>> C5 = pitch.Pitch('C5')
>>> possibA1 = (C5, G4, E4, C3)
>>> possibility.upperPartsWithinLimit(possibA1)
True
Here, C5 and E3 are separated by almost two octaves.
>>> possibA2 = (C5, G4, E3, C3)
>>> possibility.upperPartsWithinLimit(possibA2)
False
'''
upperPartsWithinLimit = True
if maxSemitoneSeparation == None:
return upperPartsWithinLimit
upperParts = possibA[0:len(possibA)-1]
for part1Index in range(len(upperParts)):
higherPitch = upperParts[part1Index]
for part2Index in range(part1Index + 1, len(upperParts)):
lowerPitch = upperParts[part2Index]
if abs(higherPitch.ps - lowerPitch.ps) > maxSemitoneSeparation:
upperPartsWithinLimit = False
return upperPartsWithinLimit
return upperPartsWithinLimit
def pitchesWithinLimit(possibA, maxPitch = pitch.Pitch('B5')):
'''
Returns True if all pitches in possibA are less than or equal to
the maxPitch provided. Comparisons between pitches are done using pitch
comparison methods, which are based on pitch space values
(see :class:`~music21.pitch.Pitch`).
Used in :class:`~music21.figuredBass.segment.Segment` to filter
resolutions of special Segments which can have pitches exceeeding
the universal maxPitch of a :class:`~music21.figuredBass.realizer.FiguredBassLine`.
>>> from music21.figuredBass import possibility
>>> from music21.figuredBass import resolution
>>> from music21 import pitch
>>> G2 = pitch.Pitch('G2')
>>> D4 = pitch.Pitch('D4')
>>> F5 = pitch.Pitch('F5')
>>> B5 = pitch.Pitch('B5')
>>> domPossib = (B5, F5, D4, G2)
>>> possibility.pitchesWithinLimit(domPossib)
True
>>> resPossib = resolution.dominantSeventhToMajorTonic(domPossib)
>>> resPossib # Contains C6 > B5
(<music21.pitch.Pitch C6>, <music21.pitch.Pitch E5>, <music21.pitch.Pitch C4>, <music21.pitch.Pitch C3>)
>>> possibility.pitchesWithinLimit(resPossib)
False
'''
for givenPitch in possibA:
if givenPitch > maxPitch:
return False
return True
def limitPartToPitch(possibA, partPitchLimits = {}):
'''
Takes in partPitchLimits containing (partNumber, partPitch) pairs, each
of which limits a part in possibA to a certain :class:`~music21.pitch.Pitch`.
Returns True if all limits are followed in possibA, False otherwise.
>>> from music21.figuredBass import possibility
>>> from music21 import pitch
>>> C4 = pitch.Pitch('C4')
>>> E4 = pitch.Pitch('E4')
>>> G4 = pitch.Pitch('G4')
>>> C5 = pitch.Pitch('C5')
>>> G5 = pitch.Pitch('G5')
>>> sopranoPitch = pitch.Pitch('G5')
>>> possibA1 = (C5, G4, E4, C4)
>>> possibility.limitPartToPitch(possibA1, {1: sopranoPitch})
False
>>> possibA2 = (G5, G4, E4, C4)
>>> possibility.limitPartToPitch(possibA2, {1: sopranoPitch})
True
'''
for (partNumber, partPitch) in partPitchLimits.items():
if not (possibA[partNumber - 1] == partPitch):
return False
return True
# CONSECUTIVE POSSIBILITY RULE-CHECKING METHODS
# ---------------------------------------------
#Speedup tables
parallelFifthsTable = {}
parallelOctavesTable = {}
hiddenFifthsTable = {}
hiddenOctavesTable = {}
def parallelFifths(possibA, possibB):
'''
Returns True if there are parallel fifths between any
two shared parts of possibA and possibB.
If pitchA1 and pitchA2 in possibA are separated by
a simple interval of a perfect fifth, and they move
to a pitchB1 and pitchB2 in possibB also separated
by the simple interval of a perfect fifth, then this
constitutes parallel fifths between these two parts.
If the method returns False, then no two shared parts
have parallel fifths. The method returns True as soon
as two shared parts with parallel fifths are found.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> D3 = pitch.Pitch('D3')
>>> G3 = pitch.Pitch('G3')
>>> A3 = pitch.Pitch('A3')
>>> A4 = pitch.Pitch('A4')
>>> B4 = pitch.Pitch('B4')
Here, the bass moves from C3 to D3 and the tenor moves
from G3 to A3. The interval between C3 and G3, as well
as between D3 and A3, is a perfect fifth. These two
parts, and therefore the two possibilities, have
parallel fifths.
>>> possibA1 = (B4, G3, C3)
>>> possibB1 = (A4, A3, D3)
>>> possibility.parallelFifths(possibA1, possibB1)
True
Now, the tenor moves instead to F3. The interval between
D3 and F3 is a minor third. The bass and tenor parts
don't form parallel fifths. The soprano part forms parallel
fifths with neither the bass nor tenor parts. The
two possibilities, therefore, have no parallel fifths.
>>> F3 = pitch.Pitch('F3')
>>> possibA2 = (B4, G3, C3)
>>> possibB2 = (A4, F3, D3)
>>> possibility.parallelFifths(possibA2, possibB2)
False
'''
hasParallelFifths = False
pairsList = partPairs(possibA, possibB)
for pair1Index in range(len(pairsList)):
(higherPitchA, higherPitchB) = pairsList[pair1Index]
for pair2Index in range(pair1Index + 1, len(pairsList)):
(lowerPitchA, lowerPitchB) = pairsList[pair2Index]
if not abs(higherPitchA.ps - lowerPitchA.ps) % 12 == 7:
continue
if not abs(higherPitchB.ps - lowerPitchB.ps) % 12 == 7:
continue
#Very high probability of ||5, but still not certain.
pitchQuartet = (lowerPitchA, lowerPitchB, higherPitchA, higherPitchB)
if pitchQuartet in parallelFifthsTable:
hasParallelFifths = parallelFifthsTable[pitchQuartet]
if hasParallelFifths:
return hasParallelFifths
vlq = voiceLeading.VoiceLeadingQuartet(*pitchQuartet)
if vlq.parallelFifth():
hasParallelFifths = True
parallelFifthsTable[pitchQuartet] = hasParallelFifths
if hasParallelFifths:
return hasParallelFifths
return hasParallelFifths
def parallelOctaves(possibA, possibB):
'''
Returns True if there are parallel octaves between any
two shared parts of possibA and possibB.
If pitchA1 and pitchA2 in possibA are separated by
a simple interval of a perfect octave, and they move
to a pitchB1 and pitchB2 in possibB also separated
by the simple interval of a perfect octave, then this
constitutes parallel octaves between these two parts.
If the method returns False, then no two shared parts
have parallel octaves. The method returns True as soon
as two shared parts with parallel octaves are found.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> D3 = pitch.Pitch('D3')
>>> G3 = pitch.Pitch('G3')
>>> A3 = pitch.Pitch('A3')
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
Here, the soprano moves from C4 to D4 and the bass moves
from C3 to D3. The interval between C3 and C4, as well as
between D3 and D4, is a parallel octave. The two parts,
and therefore the two possibilities, have parallel octaves.
>>> possibA1 = (C4, G3, C3)
>>> possibB1 = (D4, A3, D3)
>>> possibility.parallelOctaves(possibA1, possibB1)
True
Now, the soprano moves down to B3. The interval between
D3 and B3 is a major sixth. The soprano and bass parts
no longer have parallel octaves. The tenor part forms
a parallel octave with neither the bass nor soprano,
so the two possibilities do not have parallel octaves.
(Notice, however, the parallel fifth between the bass
and tenor!)
>>> B3 = pitch.Pitch('B3')
>>> possibA2 = (C4, G3, C3)
>>> possibB2 = (B3, A3, D3)
>>> possibility.parallelOctaves(possibA2, possibB2)
False
'''
hasParallelOctaves = False
pairsList = partPairs(possibA, possibB)
for pair1Index in range(len(pairsList)):
(higherPitchA, higherPitchB) = pairsList[pair1Index]
for pair2Index in range(pair1Index + 1, len(pairsList)):
(lowerPitchA, lowerPitchB) = pairsList[pair2Index]
if not abs(higherPitchA.ps - lowerPitchA.ps) % 12 == 0:
continue
if not abs(higherPitchB.ps - lowerPitchB.ps) % 12 == 0:
continue
#Very high probability of ||8, but still not certain.
pitchQuartet = (lowerPitchA, lowerPitchB, higherPitchA, higherPitchB)
if pitchQuartet in parallelOctavesTable:
hasParallelOctaves = parallelOctavesTable[pitchQuartet]
if hasParallelOctaves:
return hasParallelOctaves
vlq = voiceLeading.VoiceLeadingQuartet(*pitchQuartet)
if vlq.parallelOctave():
hasParallelOctaves = True
parallelOctavesTable[pitchQuartet] = hasParallelOctaves
if hasParallelOctaves:
return hasParallelOctaves
return hasParallelOctaves
def hiddenFifth(possibA, possibB):
'''
Returns True if there is a hidden fifth between shared outer parts
of possibA and possibB. The outer parts here are the first and last
elements of each possibility.
If sopranoPitchA and bassPitchA in possibA move to a sopranoPitchB
and bassPitchB in possibB in similar motion, and the simple interval
between sopranoPitchB and bassPitchB is that of a perfect fifth,
then this constitutes a hidden octave between the two possibilities.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> D3 = pitch.Pitch('D3')
>>> E3 = pitch.Pitch('E3')
>>> F3 = pitch.Pitch('F3')
>>> E5 = pitch.Pitch('E5')
>>> A5 = pitch.Pitch('A5')
Here, the bass part moves up from C3 to D3 and the soprano part moves
up from E5 to A5. The simple interval between D3 and A5 is a perfect
fifth. Therefore, there is a hidden fifth between the two possibilities.
>>> possibA1 = (E5, E3, C3)
>>> possibB1 = (A5, F3, D3)
>>> possibility.hiddenFifth(possibA1, possibB1)
True
Here, the soprano and bass parts also move in similar motion, but the
simple interval between D3 and Ab5 is a diminished fifth. Consequently,
there is no hidden fifth.
>>> Ab5 = pitch.Pitch('A-5')
>>> possibA2 = (E5, E3, C3)
>>> possibB2 = (Ab5, F3, D3)
>>> possibility.hiddenFifth(possibA2, possibB2)
False
Now, we have the soprano and bass parts again moving to A5 and D3, whose
simple interval is a perfect fifth. However, the bass moves up while the
soprano moves down. Therefore, there is no hidden fifth.
>>> E6 = pitch.Pitch('E6')
>>> possibA3 = (E6, E3, C3)
>>> possibB3 = (A5, F3, D3)
>>> possibility.hiddenFifth(possibA3, possibB3)
False
'''
hasHiddenFifth = False
pairsList = partPairs(possibA, possibB)
(highestPitchA, highestPitchB) = pairsList[0]
(lowestPitchA, lowestPitchB) = pairsList[-1]
if abs(highestPitchB.ps - lowestPitchB.ps) % 12 == 7:
#Very high probability of hidden fifth, but still not certain.
pitchQuartet = (lowestPitchA, lowestPitchB, highestPitchA, highestPitchB)
if pitchQuartet in hiddenFifthsTable:
hasHiddenFifth = hiddenFifthsTable[pitchQuartet]
return hasHiddenFifth
vlq = voiceLeading.VoiceLeadingQuartet(*pitchQuartet)
if vlq.hiddenFifth():
hasHiddenFifth = True
hiddenFifthsTable[pitchQuartet] = hasHiddenFifth
return hasHiddenFifth
def hiddenOctave(possibA, possibB):
'''
Returns True if there is a hidden octave between shared outer parts
of possibA and possibB. The outer parts here are the first and last
elements of each possibility.
If sopranoPitchA and bassPitchA in possibA move to a sopranoPitchB
and bassPitchB in possibB in similar motion, and the simple interval
between sopranoPitchB and bassPitchB is that of a perfect octave,
then this constitutes a hidden octave between the two possibilities.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> D3 = pitch.Pitch('D3')
>>> E3 = pitch.Pitch('E3')
>>> F3 = pitch.Pitch('F3')
>>> A5 = pitch.Pitch('A5')
>>> D6 = pitch.Pitch('D6')
Here, the bass part moves up from C3 to D3 and the soprano part moves
up from A5 to D6. The simple interval between D3 and D6 is a perfect
octave. Therefore, there is a hidden octave between the two possibilities.
>>> possibA1 = (A5, E3, C3)
>>> possibB1 = (D6, F3, D3) #Perfect octave between soprano and bass.
>>> possibility.hiddenOctave(possibA1, possibB1)
True
Here, the bass part moves up from C3 to D3 but the soprano part moves
down from A6 to D6. There is no hidden octave since the parts move in
contrary motion.
>>> A6 = pitch.Pitch('A6')
>>> possibA2 = (A6, E3, C3)
>>> possibB2 = (D6, F3, D3)
>>> possibility.hiddenOctave(possibA2, possibB2)
False
'''
hasHiddenOctave = False
pairsList = partPairs(possibA, possibB)
(highestPitchA, highestPitchB) = pairsList[0]
(lowestPitchA, lowestPitchB) = pairsList[-1]
if abs(highestPitchB.ps - lowestPitchB.ps) % 12 == 0:
#Very high probability of hidden octave, but still not certain.
pitchQuartet = (lowestPitchA, lowestPitchB, highestPitchA, highestPitchB)
if pitchQuartet in hiddenOctavesTable:
hasHiddenOctave = hiddenOctavesTable[pitchQuartet]
return hasHiddenOctave
vlq = voiceLeading.VoiceLeadingQuartet(*pitchQuartet)
if vlq.hiddenOctave():
hasHiddenOctave = True
hiddenOctavesTable[pitchQuartet] = hasHiddenOctave
return hasHiddenOctave
def voiceOverlap(possibA, possibB):
'''
Returns True if there is voice overlap between any two shared parts
of possibA and possibB.
Voice overlap can occur in two ways:
1) If a pitch in a lower part in possibB is higher than a pitch in
a higher part in possibA. This case is demonstrated below.
2) If a pitch in a higher part in possibB is lower than a pitch in
a lower part in possibA.
.. image:: images/figuredBass/fbPossib_voiceOverlap.*
:width: 75
In the above example, possibA has G4 in the bass and B4 in the soprano.
If the bass moves up to C5 in possibB, that would constitute voice overlap
because the bass in possibB would be higher than the soprano in possibA.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> C5 = pitch.Pitch('C5')
Here, case #2 is demonstrated. There is overlap between the soprano and
alto parts, because F4 in the soprano in possibB1 is lower than the G4
in the alto in possibA1. Note that neither possibility has to have voice
crossing for voice overlap to occur, as shown.
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (F4, F4, D4, D4)
>>> possibility.voiceOverlap(possibA1, possibB1)
True
>>> possibility.voiceCrossing(possibA1)
False
>>> possibility.voiceCrossing(possibB1)
False
Here is the same example as above, except the soprano of the second
possibility is now B4, which does not overlap the G4 of the first.
Now, there is no voice overlap.
>>> B4 = pitch.Pitch('B4')
>>> possibA2 = (C5, G4, E4, C4)
>>> possibB2 = (B4, F4, D4, D4)
>>> possibility.voiceOverlap(possibA2, possibB2)
False
'''
hasVoiceOverlap = False
pairsList = partPairs(possibA, possibB)
for pair1Index in range(len(pairsList)):
(higherPitchA, higherPitchB) = pairsList[pair1Index]
for pair2Index in range(pair1Index + 1, len(pairsList)):
(lowerPitchA, lowerPitchB) = pairsList[pair2Index]
if lowerPitchB > higherPitchA or higherPitchB < lowerPitchA:
hasVoiceOverlap = True
return hasVoiceOverlap
return hasVoiceOverlap
def partMovementsWithinLimits(possibA, possibB, partMovementLimits = []):
'''
Returns True if all movements between shared parts of possibA and possibB
are within limits, as specified by partMovementLimits, which consists of
(partNumber, maxSeparation) tuples.
* partNumber: Specified from 1 to n, where 1 is the soprano or highest part and n is the bass or lowest part.
* maxSeparation: For a given part, the maximum separation to allow between a pitch in possibA and a corresponding pitch in possibB, in semitones.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> A4 = pitch.Pitch('A4')
>>> B4 = pitch.Pitch('B4')
>>> C5 = pitch.Pitch('C5')
Here, we limit the soprano part to motion of two semitones, enharmonically equivalent to a major second.
Moving from C5 to B4 is allowed because it constitutes stepwise motion, but moving to A4 is not allowed
because the distance between A4 and C5 is three semitones.
>>> partMovementLimits = [(1, 2)]
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (B4, F4, D4, D4)
>>> possibility.partMovementsWithinLimits(possibA1, possibB1, partMovementLimits)
True
>>> possibB2 = (A4, F4, D4, D4)
>>> possibility.partMovementsWithinLimits(possibA1, possibB2, partMovementLimits)
False
'''
withinLimits = True
for (partNumber, maxSeparation) in partMovementLimits:
pitchA = possibA[partNumber - 1]
pitchB = possibB[partNumber - 1]
if abs(pitchB.ps - pitchA.ps) > maxSeparation:
withinLimits = False
return withinLimits
return withinLimits
def upperPartsSame(possibA, possibB):
'''
Returns True if the upper parts are the same.
False otherwise.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> B4 = pitch.Pitch('B4')
>>> C5 = pitch.Pitch('C5')
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (B4, F4, D4, D4)
>>> possibB2 = (C5, G4, E4, D4)
>>> possibility.upperPartsSame(possibA1, possibB1)
False
>>> possibility.upperPartsSame(possibA1, possibB2)
True
'''
pairsList = partPairs(possibA, possibB)
for (pitchA, pitchB) in pairsList[0:-1]:
if not (pitchA == pitchB):
return False
return True
def partsSame(possibA, possibB, partsToCheck = None):
'''
Takes in partsToCheck, a list of part numbers. Checks if pitches at those part numbers of
possibA and possibB are equal, determined by pitch space.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> E4 = pitch.Pitch('E4')
>>> G4 = pitch.Pitch('G4')
>>> B4 = pitch.Pitch('B4')
>>> C5 = pitch.Pitch('C5')
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (B4, G4, E4, C4)
>>> possibility.partsSame(possibA1, possibB1, [2,3,4])
True
'''
if partsToCheck == None:
return True
pairsList = partPairs(possibA, possibB)
for partIndex in partsToCheck:
(pitchA, pitchB) = pairsList[partIndex - 1]
if not (pitchA == pitchB):
return False
return True
def couldBeItalianA6Resolution(possibA, possibB, threePartChordInfo = None, restrictDoublings = True):
'''
Speed-enhanced but designed to stand alone. Returns True if possibA is an Italian A6 chord
and possibB could possibly be an acceptable resolution. If restrictDoublings is set to True,
only the tonic can be doubled. Setting restrictDoublings to False opens up the chance
that the root or the third can be doubled. Controlled in the :class:`~music21.figuredBass.rules.Rules`
object by :attr:`~music21.figuredBass.rules.Rules.restrictDoublingsInItalianA6Resolution`.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> A2 = pitch.Pitch('A2')
>>> Bb2 = pitch.Pitch('B-2')
>>> Cs4 = pitch.Pitch('C#4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> Fs4 = pitch.Pitch('F#4')
>>> Gs4 = pitch.Pitch('G#4')
>>> A4 = pitch.Pitch('A4')
>>> possibA1 = (Gs4, D4, D4, Bb2)
>>> possibB1 = (A4, Cs4, E4, A2)
>>> possibB2 = (A4, E4, Cs4, A2)
>>> possibB3 = (A4, D4, Fs4, A2)
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB1)
True
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB1)
True
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB3)
True
A PossibilityException is raised if possibA is not an Italian A6 chord, but this only
applies only if threePartChordInfo = None, because otherwise the chord information is
coming from :class:`~music21.figuredBass.segment.Segment` and the fact that possibA is
an It+6 chord is assumed.
>>> possibA2 = (Gs4, E4, D4, Bb2)
>>> possibB2 = (A4, E4, Cs4, A2)
>>> possibility.couldBeItalianA6Resolution(possibA2, possibB2)
Traceback (most recent call last):
PossibilityException: possibA does not spell out an It+6 chord.
The method is called "couldBeItalianA6Resolution" as opposed
to "isItalianA6Resolution" because it is designed to work in
tandem with :meth:`~music21.figuredBass.possibility.parallelOctaves`
and :meth:`~music21.figuredBass.possibility.isIncomplete` in
a Segment. Consider the following examples with possibA1 above as the
augmented sixth chord to resolve.
>>> possibA1 = (Gs4, D4, D4, Bb2)
>>> possibB4 = (A4, D4, D4, A2) # No 3rd
>>> possibB5 = (A4, Cs4, Cs4, A2) # No 5th
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB4)
True
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB5) # parallel octaves
True
>>> possibA3 = (Gs4, Gs4, D4, Bb2)
>>> possibB6 = (A4, A4, Cs4, A2)
>>> possibility.couldBeItalianA6Resolution(possibA3, possibB6, restrictDoublings = True)
False
>>> possibility.couldBeItalianA6Resolution(possibA3, possibB6, restrictDoublings = False)
True
'''
if threePartChordInfo == None:
augSixthChord = chord.Chord(possibA)
if not augSixthChord.isItalianAugmentedSixth():
raise PossibilityException("possibA does not spell out an It+6 chord.")
bass = augSixthChord.bass()
root = augSixthChord.root()
third = augSixthChord.getChordStep(3)
fifth = augSixthChord.getChordStep(5)
threePartChordInfo = [bass, root, third, fifth]
allowedIntervalNames = ['M3','m3','M2','m-2']
rootResolved = False
[bass, root, third, fifth] = threePartChordInfo
for pitchIndex in range(len(possibA)):
pitchA = possibA[pitchIndex]
pitchB = possibB[pitchIndex]
if pitchA.name == fifth.name:
if pitchA == pitchB:
continue
if abs(pitchA.ps - pitchB.ps) > 4.0:
return False
tt = interval.Interval(pitchA, pitchB)
if not tt.directedSimpleName in allowedIntervalNames:
return False
elif pitchA.name == bass.name and pitchA == bass:
if not (pitchA.ps - pitchB.ps) == 1.0:
return False
i = interval.Interval(pitchA, pitchB)
if not i.directedName == 'm-2':
return False
elif pitchA.name == root.name:
if rootResolved == True and restrictDoublings:
# there can't be more than one root
return False
if not (pitchB.ps - pitchA.ps) == 1.0:
return False
i = interval.Interval(pitchA, pitchB)
if not i.directedName == 'm2':
return False
rootResolved = True
elif pitchA.name == third.name:
if restrictDoublings:
# there can't be more than one third, which is in the bass.
return False
if not (pitchA.ps - pitchB.ps) == 1.0:
return False
i = interval.Interval(pitchA, pitchB)
if not i.directedName == 'm-2':
return False
'''
# Part 1: Check if possibA is A6 chord, and if it is properly formed.
bass = possibA[-1]
root = None
rootIndex = 0
for pitchA in possibA[0:-1]:
if not (pitchA.ps - bass.ps) % 12 == 10:
rootIndex += 1
continue
br = interval.Interval(bass, pitchA)
isAugmentedSixth = (br.directedSimpleName == 'A6')
if isAugmentedSixth:
root = pitchA
break
tonic = bass.transpose('M3')
#Restrict doublings, It+6
for pitchIndex in range(len(possibA) - 1):
if pitchIndex == rootIndex:
continue
pitchA = possibA[pitchIndex]
if not pitchA.name == tonic.name:
return False
#Part 2: If possibA is Italian A6 chord, check that it resolves properly in possibB.
fifth = root.transpose('m2')
pairsList = partPairs(possibA, possibB)
(bassA, bassB) = pairsList[-1]
(rootA, rootB) = pairsList[rootIndex]
if not (bassB.name == fifth.name and rootB.name == fifth.name):
return False
if not (bassB.ps - bassA.ps == -1.0 and rootB.ps - rootA.ps == 1.0):
return False
allowedIntervalNames = ['M3','m3','M2','m-2']
for pitchIndex in range(len(pairsList) - 1):
if pitchIndex == rootIndex:
continue
(tonicA, tonicB) = pairsList[pitchIndex]
if tonicA == tonicB:
continue
tt = interval.Interval(tonicA, tonicB)
if not tt.directedSimpleName in allowedIntervalNames:
return False
'''
return True
# HELPER METHODS
# --------------
def partPairs(possibA, possibB):
'''
Groups together pitches of possibA and possibB which correspond to the same part,
constituting a shared part.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> B4 = pitch.Pitch('B4')
>>> C5 = pitch.Pitch('C5')
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (B4, F4, D4, D4)
>>> possibility.partPairs(possibA1, possibA1)
[(<music21.pitch.Pitch C5>, <music21.pitch.Pitch C5>),
(<music21.pitch.Pitch G4>, <music21.pitch.Pitch G4>),
(<music21.pitch.Pitch E4>, <music21.pitch.Pitch E4>),
(<music21.pitch.Pitch C4>, <music21.pitch.Pitch C4>)]
>>> possibility.partPairs(possibA1, possibB1)
[(<music21.pitch.Pitch C5>, <music21.pitch.Pitch B4>),
(<music21.pitch.Pitch G4>, <music21.pitch.Pitch F4>),
(<music21.pitch.Pitch E4>, <music21.pitch.Pitch D4>),
(<music21.pitch.Pitch C4>, <music21.pitch.Pitch D4>)]
'''
return list(izip(possibA, possibB))
# apply a function to one pitch of possibA at a time
# apply a function to two pitches of possibA at a time
# apply a function to one partPair of possibA, possibB at a time
# apply a function to two partPairs of possibA, possibB at a time
# use an iterator that fails when the first false is returned
singlePossibilityMethods = [voiceCrossing, isIncomplete, upperPartsWithinLimit, pitchesWithinLimit]
#singlePossibilityMethods.sort(None, lambda x: x.__name__)
consequentPossibilityMethods = [parallelFifths, parallelOctaves, hiddenFifth, hiddenOctave, voiceOverlap,
partMovementsWithinLimits, upperPartsSame, couldBeItalianA6Resolution]
#consequentPossibilityMethods.sort(None, lambda x: x.__name__)
_DOC_ORDER = singlePossibilityMethods + [partPairs] + consequentPossibilityMethods
class PossibilityException(exceptions21.Music21Exception):
pass
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
if __name__ == "__main__":
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof |
the-stack_0_7150 | #!/usr/bin/env python
import pygame # pylint: disable=import-error
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputting the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def printf(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("My Game")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print("Joystick button released.")
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
textPrint.printf(screen, "Number of joysticks: {}".format(joystick_count) )
textPrint.indent()
# For each joystick:
joystick = pygame.joystick.Joystick(0)
joystick.init()
textPrint.printf(screen, "Joystick {}".format(0) )
textPrint.indent()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
textPrint.printf(screen, "Joystick name: {}".format(name) )
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.printf(screen, "Number of axes: {}".format(axes) )
textPrint.indent()
for i in range( axes ):
axis = joystick.get_axis( i )
textPrint.printf(screen, "Axis {} value: {:>6.3f}".format(i, axis) )
textPrint.unindent()
buttons = joystick.get_numbuttons()
textPrint.printf(screen, "Number of buttons: {}".format(buttons) )
textPrint.indent()
for i in range( buttons ):
button = joystick.get_button( i )
textPrint.printf(screen, "Button {:>2} value: {}".format(i,button) )
textPrint.unindent()
textPrint.unindent()
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
|
the-stack_0_7153 | import math
from config.config import config
class Plot:
@staticmethod
def line(prices, size=(100, 100), position=(0, 0), draw=None, fill=None):
assert draw
max_price = max(prices)
min_price = min(prices)
normalised_prices = [(price - min_price) / (max_price - min_price) for price in prices]
plot_data = []
for i, element in enumerate(normalised_prices):
x = i * (size[0] / len(normalised_prices)) + position[0]
y = size[1] - (element * size[1]) + position[1]
plot_data.append((x, y))
draw.line(plot_data, fill=fill)
@staticmethod
def y_axis_labels(prices, font, position_first=(0, 0), position_last=(0, 0), draw=None, fill=None, labels_number=3):
def center_x(price):
area_width = position_last[0] - position_first[0]
text_width, _ = draw.textsize(price, font)
if area_width >= text_width:
return position_first[0] + (area_width - text_width) / 2
else:
return position_first[0]
max_price = max(prices)
min_price = min(prices)
price_step = (max_price - min_price) / (labels_number - 1)
y_step = (position_last[1] - position_first[1]) / (labels_number - 1)
for i in range(0, labels_number):
human_price = Plot.human_format(min_price + i * price_step, 5)
draw.text((center_x(human_price), position_last[1] - i * y_step), human_price, font=font, fill=fill)
@staticmethod
def percentage(prices, x_middle, y, font, draw, fill=None):
open = prices[0][0]
close = prices[len(prices) - 1][3]
percentage = ((1 - (close / open)) * -1) * 100
price_text = Plot.human_format(percentage, 4, 0)
price_text = price_text + "%"
if percentage > 0:
price_text = "+" + price_text
text_width, _ = draw.textsize(price_text, font)
price_position = ((x_middle - (text_width / 2)), y)
draw.text(price_position, price_text, font=font, fill=fill)
return text_width
@staticmethod #offset name #offset price
def caption(price, y, screen_width, font, draw, name, fill=None, currency_offset=4, price_offset=4):
#draw.text((currency_offset, y), config.currency[:3], font=font, fill=fill)
draw.text((currency_offset, y), name, font=font, fill=fill)
price_text = Plot.human_format(price, 2, 2)
text_width, _ = draw.textsize(price_text, font)
#price_position = (((screen_width - text_width - price_offset) / 2) + price_offset, y)
price_position = ((screen_width - text_width - price_offset), y)
draw.text(price_position, price_text, font=font, fill=fill)
@staticmethod
def candle(data, size=(100, 100), position=(0, 0), draw=None, fill_neg="#000000", fill_pos=None):
width = size[0]
height = size[1]
candle_width = 9
space = 1
num_of_candles = width // (candle_width + space)
leftover_space = width % (candle_width + space)
windows_per_candle = len(data) // num_of_candles
data_offset = len(data) % num_of_candles
candle_data = []
for i in range(data_offset, len(data), windows_per_candle):
window = data[i:i + windows_per_candle]
open = window[0][0]
close = window[len(window) - 1][3]
high = max([i[1] for i in window])
low = min([i[2] for i in window])
candle_data.append((open, high, low, close))
all_values = [item for sublist in candle_data for item in sublist]
max_price = max(all_values)
min_price = min(all_values)
normalised_data = []
for line in candle_data:
normalised_line = []
normalised_data.append(normalised_line)
for i in range(len(line)):
price = line[i]
normalised_line.append((price - min_price) / (max_price - min_price))
def y_flip(y):
return height - (y * height) + position[1]
for i, element in enumerate(normalised_data):
open = element[0]
close = element[3]
high = element[1]
low = element[2]
x = candle_width * i + space * i + leftover_space / 2 + position[0]
# high price
wick_x = x + (candle_width // 2)
draw.line([wick_x, y_flip(high), wick_x, y_flip(max(open, close))], fill=fill_pos)
# low price
draw.line([wick_x, y_flip(low), wick_x, y_flip(min(open, close))], fill=fill_pos)
open_y = math.floor(y_flip(open))
close_y = math.floor(y_flip(close))
if open_y == close_y:
draw.line([x, open_y, x + candle_width - 1, close_y], fill=fill_pos)
else:
if open < close:
draw.rectangle([x, open_y, x + candle_width - 1, close_y], fill=fill_pos)
else:
draw.rectangle([x, open_y, x + candle_width - 1, close_y], fill=fill_neg)
# TODO: Adapt for big numbers 1k, 1m, etc
@staticmethod
def human_format(number, length, fractional_minimal=0):
magnitude = 0
num = number
while abs(num) >= 10:
magnitude += 1
num /= 10.0
format_string = f'%.{fractional_minimal}f'
if length >= magnitude + fractional_minimal + 2:
fractional_length = length - magnitude - 2
format_string = f'%.{fractional_length}f'
return format_string % number
|
the-stack_0_7154 | """
Explores the kbase draft to see if any metabolic genes are present which are not present in iSG3
"""
import os
from settings import INTERMEDIATE_MODEL_ROOT
import pandas as pd
import re
import cobra as cb
df = pd.read_excel(os.path.join(INTERMEDIATE_MODEL_ROOT,'kbase-draft', 'draft_dsm.xls'),
sheet_name='ModelReactions')
df = df.replace(pd.np.nan, '', regex=True)
draft_genes = []
for ind, row in df.iterrows():
match = re.findall(r'(CLO1313_RS[0-9]+)', row['gpr'])
draft_genes.extend(match)
isg = cb.io.load_json_model(os.path.join(INTERMEDIATE_MODEL_ROOT, 'iSG_3.json'))
isg_genes = [gene.id for gene in isg.genes]
not_in_isg = set(draft_genes) - set(isg_genes)
print('The draft model contains {} metabolic genes which are not in iSG'.format(len(not_in_isg)))
pattern = '|'.join(list(not_in_isg))
df2 = df[df['gpr'].str.contains(pattern)]
df2.to_csv(os.path.join(INTERMEDIATE_MODEL_ROOT, 'kbase-draft', 'not_in_isg3.csv'),
index=False)
print('These genes span {} metabolic reactions'.format(len(df2))) |
the-stack_0_7155 | """xception in pytorch
[1] François Chollet
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/abs/1610.02357
"""
import torch
import torch.nn as nn
__all__ = ['xception']
class SeperableConv2d(nn.Module):
#***Figure 4. An “extreme” version of our Inception module,
#with one spatial convolution per output channel of the 1x1
#convolution."""
def __init__(self, input_channels, output_channels, kernel_size, **kwargs):
super().__init__()
self.depthwise = nn.Conv2d(
input_channels,
input_channels,
kernel_size,
groups=input_channels,
bias=False,
**kwargs
)
self.pointwise = nn.Conv2d(input_channels, output_channels, 1, bias=False)
def forward(self, x):
x = self.depthwise(x)
x = self.pointwise(x)
return x
class EntryFlow(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, 3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.conv3_residual = nn.Sequential(
SeperableConv2d(64, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
SeperableConv2d(128, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.MaxPool2d(3, stride=2, padding=1),
)
self.conv3_shortcut = nn.Sequential(
nn.Conv2d(64, 128, 1, stride=2),
nn.BatchNorm2d(128),
)
self.conv4_residual = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(128, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
SeperableConv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.MaxPool2d(3, stride=2, padding=1)
)
self.conv4_shortcut = nn.Sequential(
nn.Conv2d(128, 256, 1, stride=2),
nn.BatchNorm2d(256),
)
#no downsampling
self.conv5_residual = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(256, 728, 3, padding=1),
nn.BatchNorm2d(728),
nn.ReLU(inplace=True),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728),
nn.MaxPool2d(3, 1, padding=1)
)
#no downsampling
self.conv5_shortcut = nn.Sequential(
nn.Conv2d(256, 728, 1),
nn.BatchNorm2d(728)
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
residual = self.conv3_residual(x)
shortcut = self.conv3_shortcut(x)
x = residual + shortcut
residual = self.conv4_residual(x)
shortcut = self.conv4_shortcut(x)
x = residual + shortcut
residual = self.conv5_residual(x)
shortcut = self.conv5_shortcut(x)
x = residual + shortcut
return x
class MiddleFLowBlock(nn.Module):
def __init__(self):
super().__init__()
self.shortcut = nn.Sequential()
self.conv1 = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728)
)
self.conv2 = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728)
)
self.conv3 = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728)
)
def forward(self, x):
residual = self.conv1(x)
residual = self.conv2(residual)
residual = self.conv3(residual)
shortcut = self.shortcut(x)
return shortcut + residual
class MiddleFlow(nn.Module):
def __init__(self, block):
super().__init__()
#"""then through the middle flow which is repeated eight times"""
self.middel_block = self._make_flow(block, 8)
def forward(self, x):
x = self.middel_block(x)
return x
def _make_flow(self, block, times):
flows = []
for i in range(times):
flows.append(block())
return nn.Sequential(*flows)
class ExitFLow(nn.Module):
def __init__(self):
super().__init__()
self.residual = nn.Sequential(
nn.ReLU(),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728),
nn.ReLU(),
SeperableConv2d(728, 1024, 3, padding=1),
nn.BatchNorm2d(1024),
nn.MaxPool2d(3, stride=2, padding=1)
)
self.shortcut = nn.Sequential(
nn.Conv2d(728, 1024, 1, stride=2),
nn.BatchNorm2d(1024)
)
self.conv = nn.Sequential(
SeperableConv2d(1024, 1536, 3, padding=1),
nn.BatchNorm2d(1536),
nn.ReLU(inplace=True),
SeperableConv2d(1536, 2048, 3, padding=1),
nn.BatchNorm2d(2048),
nn.ReLU(inplace=True)
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
shortcut = self.shortcut(x)
residual = self.residual(x)
output = shortcut + residual
output = self.conv(output)
output = self.avgpool(output)
return output
class Xception(nn.Module):
def __init__(self, block, num_classes=100):
super().__init__()
self.entry_flow = EntryFlow()
self.middel_flow = MiddleFlow(block)
self.exit_flow = ExitFLow()
self.fc = nn.Linear(2048, num_classes)
def forward(self, x):
x = self.entry_flow(x)
x = self.middel_flow(x)
x = self.exit_flow(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def xception(num_classes=10):
return Xception(MiddleFLowBlock, num_classes=num_classes)
|
the-stack_0_7156 | import unittest
import numpy as np
import openmdao.api as om
import numpy.testing as npt
import wisdem.commonse.wind_wave_drag as wwd
from openmdao.utils.assert_utils import assert_check_partials
npts = 100
myones = np.ones((npts,))
class TestDrag(unittest.TestCase):
def setUp(self):
self.params = {}
self.unknowns = {}
self.resid = None
# variables
self.params["U"] = 2.0 * myones
self.params["A"] = 4.0 * myones
self.params["p"] = 3.0 * myones
self.params["cm"] = 1.0
self.params["d"] = 10.0 * myones
self.params["rho_water"] = 0.5
self.params["mu_water"] = 1e-3
self.params["z"] = -100.0 * myones
self.params["beta_wave"] = 0.0
self.params["cd_usr"] = -1.0
self.wave = wwd.CylinderWaveDrag(nPoints=npts)
def testRegular(self):
U = 2.0
A = 4.0
# cm = 1.0
r = 5.0
rho = 0.5
# mu = 1e-3
# Re = rho*U*2*r/mu
q = 0.5 * rho * U * U
cd = 1.11
area = 2 * r
D = q * area * cd
Fi = rho * A * np.pi * r * r
Fp = Fi + D
self.wave.compute(self.params, self.unknowns)
npt.assert_equal(self.unknowns["waveLoads_Px"], Fp)
npt.assert_equal(self.unknowns["waveLoads_Py"], 0.0)
npt.assert_equal(self.unknowns["waveLoads_Pz"], 0.0)
npt.assert_equal(self.unknowns["waveLoads_qdyn"], q)
npt.assert_equal(self.unknowns["waveLoads_pt"], q + 3.0)
npt.assert_equal(self.unknowns["waveLoads_z"], -100.0)
npt.assert_equal(self.unknowns["waveLoads_beta"], 0.0)
npt.assert_equal(self.unknowns["waveLoads_d"], 10.0)
def testCDset(self):
self.params["cd_usr"] = 2.0
U = 2.0
A = 4.0
r = 5.0
rho = 0.5
q = 0.5 * rho * U * U
area = 2 * r
D = q * area * 2.0
Fi = rho * A * np.pi * r * r
Fp = Fi + D
self.wave.compute(self.params, self.unknowns)
npt.assert_equal(self.unknowns["waveLoads_Px"], Fp)
def test_wave_derivs(self):
nPoints = 5
prob = om.Problem()
comp = wwd.CylinderWaveDrag(nPoints=nPoints)
prob.model.add_subsystem("comp", comp, promotes=["*"])
prob.setup(force_alloc_complex=True)
# Add some arbitrary inputs
prob.set_val("U", np.arange(nPoints), units="m/s")
prob.set_val("A", np.ones(nPoints), units="m/s**2")
prob.set_val("p", np.ones(nPoints) * 0.5, units="N/m**2")
prob.set_val("z", np.linspace(0.0, 10.0, nPoints), units="m")
prob.set_val("d", np.ones(nPoints), units="m")
prob.set_val("beta_wave", 1.2, units="deg")
prob.set_val("rho_water", 1.0, units="kg/m**3")
prob.set_val("mu_water", 0.001, units="kg/(m*s)")
prob.set_val("cm", 10.0)
prob.set_val("cd_usr", 0.01)
prob.run_model()
check = prob.check_partials(out_stream=None, compact_print=True, method="fd")
assert_check_partials(check, rtol=5e-5, atol=1e-1)
def test_wind_derivs(self):
nPoints = 5
prob = om.Problem()
comp = wwd.CylinderWindDrag(nPoints=nPoints)
prob.model.add_subsystem("comp", comp, promotes=["*"])
prob.setup(force_alloc_complex=True)
# Add some arbitrary inputs
prob.set_val("U", np.arange(nPoints), units="m/s")
prob.set_val("z", np.linspace(0.0, 10.0, nPoints), units="m")
prob.set_val("d", np.ones(nPoints), units="m")
prob.set_val("beta_wind", 1.2, units="deg")
prob.set_val("rho_air", 1.0, units="kg/m**3")
prob.set_val("mu_air", 0.001, units="kg/(m*s)")
prob.set_val("cd_usr", 0.01)
prob.run_model()
check = prob.check_partials(out_stream=None, compact_print=True, method="fd")
assert_check_partials(check, rtol=5e-5, atol=1e-1)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestDrag))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
|
the-stack_0_7158 | import sys
import pickle
import numpy as np
from scipy.stats import bernoulli
sys.path.append('./../')
sys.path.append('./../../')
from src.FullModel.model import Model as parent_model
from src.LocalGlobalAttentionModel.model import Model as super_model
from .vel_param import VelParam as vel_param
from src.HMC.hmc import HMC
delta = 10 ** -200
class Model(parent_model):
"""
This class implements the Fixed Choice model as described in the paper.
It has the same local and global policis like the full model and the difference is in the calculation of rho.
Here rho has a fixed value.
"""
def __init__(self, saliencies, rho, epsilon, xi, cov_ratio=1):
# epsilon and xi should be the objects from parent_model, with fix_dist_ind = 0
# rho should be from this model
super_model.__init__(self, saliencies)
self.rho = rho
self.epsilon = epsilon
self.xi = xi
self.cov_ratio = cov_ratio
self.fix_dist_ind = 0
def calc_ros(self, *args):
return self.rho.value
# Methods for generating data
def generate_gamma(self, s_t):
"""
This method generates gamma according to a Bernouli distribution with p = rho
:param s_t: here just to be compatible with the parent class.
:return: gamma \sim Ber(rho)
"""
return bernoulli.rvs(self.rho.value)
# Methods for parameters inference via Gibbs sampling
def sample_gamma(self):
"""
This methods samples form the conditional posterior distribution of gamma.
For details see the paper.
:return: a sample \gamma_i for each data point
"""
BF = self.calc_BF()
gammas = []
for i, sal_ts in enumerate(self.saliencies_ts):
gammas.append([])
for s, subject in enumerate(sal_ts):
ros = self.rho.value / (self.rho.value + BF[i][s] * (1 - self.rho.value))
gammas[-1].append(bernoulli.rvs(ros))
return gammas
def sample(self, num_samples, save_steps, file_path, sample_gammas=True):
"""
This method perform Gibbs sampling for the model parameters.
:param num_samples: number of samples in the chain
:param save_steps: whether to save the chains.
:param file_path: path to a file to save the chains
:param sample_gammas: whether to sample gamma or not,
:return: array with samples for each of the model parameters - b, s_0, epsilon, xi
"""
# initialize the arrays that will hold the samples.
samples_rho = np.zeros(num_samples)
samples_epsilon = np.zeros((num_samples, 2))
samples_xi = np.zeros((num_samples, 2))
# set variables needed for the HMC inference of epsilon and xi
vel_eps = vel_param([1, 1])
vel_xi = vel_param([1, 1])
delta_xi = 0.5
delta_eps = 0.03
n = 10
m = 1
hmc_eps = HMC(self.epsilon, vel_eps, delta_eps, n, m)
hmc_xi = HMC(self.xi, vel_xi, delta_xi, n, m)
if not sample_gammas:
self.remove_first_gamma()
for i in range(num_samples):
if sample_gammas:
self.gammas = self.sample_gamma()
if i == 0:
if not self.rho.is_fixed:
self.rho.set_num_time_steps(self.gammas)
rho_samp = self.rho.conditional_posterior(self.gammas)
self.rho.set_value(rho_samp)
if not self.epsilon.is_fixed:
hmc_eps.HMC(self.xi.value, self.cov_ratio, self.saliencies, self.gammas, self.fix_dists_2,
self.dist_mat_per_fix,
self.xi.alpha, self.xi.betta)
epsilon_samp = hmc_eps.state_param.value
if not self.xi.is_fixed:
hmc_xi.HMC(self.epsilon.value, self.cov_ratio, self.saliencies, self.gammas, self.fix_dists_2,
self.dist_mat_per_fix,
self.epsilon.alpha, self.epsilon.betta)
xi_samp = hmc_xi.state_param.value
samples_rho[i] = rho_samp
samples_epsilon[i] = epsilon_samp
samples_xi[i] = xi_samp
if save_steps and not i % 50:
with open(file_path, 'wb') as f:
pickle.dump([samples_rho[:i], samples_epsilon[:i], samples_xi[:i]], f)
if save_steps:
with open(file_path, 'wb') as f:
pickle.dump([samples_rho, samples_epsilon, samples_xi], f)
return samples_rho, samples_epsilon, samples_xi
|
the-stack_0_7164 | #!/usr/bin/env python3
import functools
import os.path
import numpy as np
class CExample(object):
def __init__(self, x, y, w, z=1):
self.x = x
self.y = y
self.w = w
self.z = z
def copy(self):
return CExample(self.x, self.y, self.w, self.z)
class CDataSet(object):
def __init__(self, all_data=None, train=None, test=None, log_data=None, online_data=None, r=None):
self.all_data = [] if all_data is None else [x.copy() for x in all_data]
self.train_data = [] if train is None else [x.copy() for x in train]
self.test_data = [] if test is None else [x.copy() for x in test]
self.log_data = None if log_data is None else [x.copy() for x in log_data]
self.online_data = None if online_data is None else [x.copy() for x in online_data]
def load_data(self, filename, handler):
self.all_data = []
with open(filename) as file:
if handler == data_parser_libsvm:
self.all_data = data_parser_libsvm([line for line in file])
else:
self.all_data = [handler(line.strip().split(',')) for line in file]
def copy_all(self):
return CDataSet(self.all_data, self.train_data, self.test_data)
def copy(self):
return CDataSet(self.all_data, self.train_data, self.test_data, self.log_data, self.online_data)
def random_split(self, prop, r):
self.train_data = [x for x in self.all_data]
r.shuffle(self.train_data)
cnt = int(len(self.all_data)*prop)
self.test_data = self.train_data[cnt:]
self.train_data = self.train_data[:cnt]
def split_log(self, prop):
cnt = int(len(self.train_data)*prop)
self.log_data = self.train_data[:cnt]
self.online_data = self.train_data[cnt:]
def to_binary_label(dataset, rule):
return CDataSet([CExample(d.x, rule(d.y), d.w, d.z) for d in dataset.all_data if rule(d.y)!=0])
def normalize(dataset):
lb = functools.reduce(np.minimum, [e.x for e in dataset.all_data])
ub = functools.reduce(np.maximum, [e.x for e in dataset.all_data])
mid = (lb+ub)/2
diff = np.array([x if x>0 else 1 for x in ub-lb])
return CDataSet([CExample((e.x-mid)/diff*2, e.y, e.w, e.z) for e in dataset.all_data])
def gen_synthetic_uniform(n, d, r):
w = r.rand(d) - r.rand(d)
X = r.rand(n, d) - r.rand(n, d)
return w, [CExample(x, (1 if np.inner(x,w)>=0 else -1)*(-1 if r.rand()<0.05 else 1), 1, 1) for x in X]
def gen_synthetic_bandit(data, Q, r):
prop = [Q(dp.x) for dp in data]
tmp = [CExample(dp[0].x, dp[0].y, 1.0/dp[1], 1 if r.rand()<dp[1] else 0) for dp in zip(data, prop)]
return [CExample(tmp[i].x, tmp[i].y, tmp[i].w, i+1) for i in range(0, len(tmp)) if tmp[i].z==1]
def data_parser_rear(l):
features = np.array([float(x) for x in l[:-1]])
return CExample(features, l[-1], 1, 1)
def data_parser_front(l):
features = np.array([float(x) for x in l[1:]])
return CExample(features, l[0], 1, 1)
def data_parser_libsvm(ls):
split_ls = [l.strip().split() for l in ls]
num_features = max([max([0] + [int(e.split(":")[0]) for e in l[1:]]) for l in split_ls])
examples = []
for l in split_ls:
f = [0]*num_features
for e in l[1:]:
idx, val = e.split(":")
f[int(idx)-1] = float(val)
examples.append(CExample(np.array(f), l[0].strip(), 1, 1))
return examples
DATA_COLLECTION_PATHS = ["../data/", "../../data/", "../../../data/", \
"/media/songbai/Files/research/observational/logged data/code/data/", \
"N:\\research\\observational\\logged data\\code\\data\\"]
LibsvmBinaryRule = lambda s: 1 if float(s) > 0.5 else -1
DatasetInfo = {"skin": ("skin.txt", data_parser_rear, lambda s: 1 if s=="1" else -1),\
"magic": ("magic04.data", data_parser_rear, lambda s: 1 if s=="g" else -1),\
"eeg": ("eeg.data", data_parser_rear, lambda s: 1 if s=="1" else -1),\
"covtype": ("covtype.data", data_parser_rear, lambda s: 1 if s=="1" else (-1 if s=="2" else 0)),\
"letter": ("letter.data", data_parser_front, lambda s: 1 if s=="U" else (-1 if s=="P" else 0)),\
"a9a": ("a9a.txt", data_parser_libsvm, LibsvmBinaryRule),\
"a5a": ("a5a", data_parser_libsvm, LibsvmBinaryRule),\
"cod-rna": ("cod-rna.txt", data_parser_libsvm, LibsvmBinaryRule),\
"german": ("german.numer_scale", data_parser_libsvm, LibsvmBinaryRule),\
"ijcnn1": ("ijcnn1.tr", data_parser_libsvm, LibsvmBinaryRule),\
"mushrooms": ("mushrooms.txt", data_parser_libsvm, lambda s: 1 if int(s)==1 else -1),\
"phishing": ("phishing.txt", data_parser_libsvm, LibsvmBinaryRule),\
"splice": ("splice.t", data_parser_libsvm, LibsvmBinaryRule),\
"svmguide1": ("svmguide1.t", data_parser_libsvm, LibsvmBinaryRule),\
"w7a": ("w7a", data_parser_libsvm, LibsvmBinaryRule),}
def load_data(dataset_name, r, max_sz = None):
if dataset_name == "synthetic":
return CDataSet(gen_synthetic_uniform(6000 if max_sz is None else max_sz, 30, r)[1])
if dataset_name not in DatasetInfo:
print("dataset " + dataset_name +" is unknown")
return None
dataset_path = None
info = DatasetInfo[dataset_name]
for path in DATA_COLLECTION_PATHS:
#print(path+"/"+dataset_name)
if os.path.isfile(path+"/"+info[0]):
dataset_path = path+"/"+info[0]
break
if dataset_path is None:
print("data file for " + dataset_name +" does not exist")
return None
dataset = CDataSet()
dataset.load_data(dataset_path, info[1])
dataset = normalize(to_binary_label(dataset, info[2]))
if max_sz != None:
r.shuffle(dataset.all_data)
dataset = CDataSet(dataset.all_data[:max_sz])
return dataset
|
the-stack_0_7168 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import django.db.models.deletion
import utils.time
class Migration(migrations.Migration):
dependencies = [
('events', '0037_merge'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HasVoted',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
],
options={
'verbose_name': 'deltagare i omröstningen',
'verbose_name_plural': 'deltagarna i omröstningen',
},
),
migrations.CreateModel(
name='Options',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(verbose_name='alternativ', max_length=255)),
],
options={
'verbose_name': 'alternativ',
'verbose_name_plural': 'alternativen',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(verbose_name='namn', max_length=255)),
('body', models.TextField(verbose_name='utförlig information', help_text='Utförligare information till frågan.')),
('result', models.CharField(default='p', choices=[('d', 'Publik tillgång till detaljerad information om röstingen.'), ('l', 'Publik tillgång till begränsad information om röstningen.'), ('p', 'Privat åtkomst enbart för administratörer')], max_length=1)),
('question_status', models.CharField(default='c', choices=[('o', 'Öppen'), ('c', 'Stängd')], max_length=1)),
('nr_of_picks', models.IntegerField(default=1, verbose_name='Antal val en användare kan kryssa i på frågan.')),
('anonymous', models.BooleanField(default=True, verbose_name='namn')),
('modified_by', models.ForeignKey(help_text='Användaren som ändrat på frågan.', on_delete=django.db.models.deletion.SET_NULL, verbose_name='användare', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'fråga',
'verbose_name_plural': 'frågor',
},
),
migrations.CreateModel(
name='QuestionGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('question_status', models.CharField(default='e', choices=[('e', 'Incheckade deltagare på ett event kan rösta.'), ('a', 'Alla medlemmar kan rösta')], max_length=1)),
('visible_from', models.DateTimeField(default=utils.time.now, verbose_name='publicering', help_text='Publiceringsdatum')),
('visible_to', models.DateTimeField(default=utils.time.now_plus_one_month, verbose_name='avpublicering', help_text='Avpubliceringsdatum')),
('event', models.ForeignKey(verbose_name='event', to='events.Event', blank=True, null=True)),
],
options={
'verbose_name': 'frågegrupp',
'verbose_name_plural': 'frågegrupper',
},
),
migrations.CreateModel(
name='Votes',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('option', models.ForeignKey(verbose_name='alternativ', to='votings.Options')),
('question', models.ForeignKey(verbose_name='fråga', to='votings.Question')),
('user', models.ForeignKey(verbose_name='användare', to=settings.AUTH_USER_MODEL, blank=True, null=True)),
],
options={
'verbose_name': 'röst',
'verbose_name_plural': 'röster',
},
),
migrations.AddField(
model_name='question',
name='question_group',
field=models.ForeignKey(verbose_name='frågegrupp', to='votings.QuestionGroup'),
),
migrations.AddField(
model_name='options',
name='question',
field=models.ForeignKey(verbose_name='fråga', to='votings.Question'),
),
migrations.AddField(
model_name='hasvoted',
name='question',
field=models.ForeignKey(verbose_name='fråga', to='votings.Question'),
),
migrations.AddField(
model_name='hasvoted',
name='user',
field=models.ForeignKey(verbose_name='användare', to=settings.AUTH_USER_MODEL),
),
]
|
the-stack_0_7169 | #!/usr/bin/env python
"""This module serializes AFF4 objects in various ways."""
import yaml
from grr.lib import aff4
from grr.lib import rdfvalue
def YamlDumper(aff4object):
"""Dumps the given aff4object into a yaml representation."""
aff4object.Flush()
result = {}
for attribute, values in aff4object.synced_attributes.items():
result[attribute.predicate] = []
for value in values:
# This value is really a LazyDecoder() instance. We need to get at the
# real data here.
value = value.ToRDFValue()
result[attribute.predicate].append([
value.__class__.__name__, value.SerializeToString(), str(value.age)
])
return yaml.dump(
dict(
aff4_class=aff4object.__class__.__name__,
_urn=aff4object.urn.SerializeToString(),
attributes=result,
age_policy=aff4object.age_policy,))
def YamlLoader(string):
"""Load an AFF4 object from a serialized YAML representation."""
representation = yaml.load(string)
result_cls = aff4.FACTORY.AFF4Object(representation["aff4_class"])
aff4_attributes = {}
for predicate, values in representation["attributes"].items():
attribute = aff4.Attribute.PREDICATES[predicate]
tmp = aff4_attributes[attribute] = []
for rdfvalue_cls_name, value, age in values:
rdfvalue_cls = aff4.FACTORY.RDFValue(rdfvalue_cls_name)
value = rdfvalue_cls(value, age=rdfvalue.RDFDatetime(age))
tmp.append(value)
# Ensure the object is dirty so when we save it, it can be written to the data
# store.
result = result_cls(
urn=representation["_urn"],
clone=aff4_attributes,
mode="rw",
age=representation["age_policy"])
result.new_attributes, result.synced_attributes = result.synced_attributes, {}
result._dirty = True # pylint: disable=protected-access
return result
|
the-stack_0_7170 | import streamlit as st
import pandas as pd
import pickle
import numpy as np
st.write("""
## Forest Fires
""")
st.sidebar.header('User Input')
st.sidebar.subheader('Please enter your data:')
# -- Define function to display widgets and store data
def get_input():
# Display widgets and store their values in variables
v_X = st.sidebar.radio('X', ['1', '2', '3', '4', '5', '6', '7', '8', '9'])
v_Y = st.sidebar.radio('Y', ['2', '3', '4', '5', '6', '7', '8', '9'])
v_month = st.sidebar.radio('month', ['February','March','April','June','July','August','September','October','December'])
v_day = st.sidebar.radio('Day', ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'])
v_FFMC = st.sidebar.slider('FFMC', 18.7, 96.2, 0.1)
v_DMC = st.sidebar.slider('DMC', 1.1, 291.3, 0.1)
v_DC = st.sidebar.slider('DC', 7.9, 860.6, 0.1)
v_ISI = st.sidebar.slider('ISI', 0.0, 56.1, 0.1)
v_temp = st.sidebar.slider('temp', 2.2, 33.3, 0.1)
v_RH = st.sidebar.slider('RH', 15, 100, 1)
v_wind = st.sidebar.slider('wind', 0.4, 9.4, 0.1)
v_rain = st.sidebar.slider('rain', 0.0, 6.4, 0.1)
# Month
if v_month == 'February':
v_month = '2'
elif v_month == 'March':
v_month = '3'
elif v_month == 'April':
v_month = '4'
elif v_month == 'June':
v_month = '6'
elif v_month == 'July':
v_month = '7'
elif v_month == 'August':
v_month = '8'
elif v_month == 'September':
v_month = '9'
elif v_month == 'October':
v_month = '10'
elif v_month == 'December':
v_month = '12'
# Day
if v_day == 'Monday':
v_day = '1'
elif v_day == 'Tuesday':
v_day = '2'
elif v_day == 'Wednesday':
v_day = '3'
elif v_day == 'Thursday':
v_day = '4'
elif v_day == 'Friday':
v_day = '5'
elif v_day == 'Saturday':
v_day = '6'
elif v_day == 'Sunday':
v_day = '7'
# Store user input data in a dictionary
data = {'X': v_X,
'Y': v_Y,
'month': v_month,
'day': v_day,
'FFMC': v_FFMC,
'DMC': v_DMC,
'DC': v_DC,
'ISI': v_ISI,
'temp': v_temp,
'RH': v_RH,
'wind': v_wind,
'rain': v_rain,}
# Create a data frame from the above dictionary
data_df = pd.DataFrame(data, index=[0])
return data_df
# -- Call function to display widgets and get data from user
df = get_input()
st.header('Application of Status Prediction:')
# -- Display new data from user inputs:
st.subheader('User Input:')
st.write(df)
# -- Data Pre-processing for New Data:
# Combines user input data with sample dataset
# The sample data contains unique values for each nominal features
# This will be used for the One-hot encoding
data_sample = pd.read_csv('ML_A.csv')
df = pd.concat([df, data_sample],axis=0)
###Data Cleaning & Feature Engineering###
#drop
df = df.drop(columns=['area'])
df = df.drop(columns=['Unnamed: 0'])
df_num=df
# -- Display pre-processed new data:
st.subheader('Pre-Processed Input:')
st.write(df_num)
# -- Reads the saved normalization model
load_nor = pickle.load(open('normalization_ML1.pkl', 'rb'))
#Apply the normalization model to new data
x_new = load_nor.transform(df)
x_new = x_new[:1]
st.subheader('Normalization Input:')
st.write(x_new)
# -- Reads the saved classification model
load_LR = pickle.load(open('LR_ML1.pkl', 'rb'))
# Apply model for prediction
prediction = load_LR.predict(x_new)
prediction = prediction[:1]
st.subheader('Prediction:')
st.write(prediction)
|
the-stack_0_7172 | """Tests downloading and reading of the GO annotation file from NCBI Gene.
python test_NCBI_Entrez_annotations.py
"""
__copyright__ = "Copyright (C) 2016, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
import sys
from goatools.associations import get_assoc_ncbi_taxids
from collections import defaultdict
from goatools.test_data.genes_NCBI_9606_ProteinCoding import GeneID2nt as GeneID2nt_hsa
from goatools.test_data.genes_NCBI_7227_ProteinCoding import GeneID2nt as GeneID2nt_dme
def test_ncbi_gene2go(log=sys.stdout):
"""Return GO associations to Entrez GeneIDs. Download if necessary.
Example report generated with Feb 22, 2013 download of:
NCBI Gene tables and associations in gene2go
49672 items found in gene2go from NCBI's ftp server
taxid GOs GeneIDs Description
----- ------ ------- -----------
10090 16,807 18,971 all DNA items
7227 7,022 12,019 all DNA items
7227 6,956 10,590 76% GO coverage of 13,919 protein-coding genes
9606 16,299 18,680 all DNA items
9606 16,296 18,253 87% GO coverage of 20,913 protein-coding genes
"""
# Get associations for human(9606), mouse(10090), and fly(7227)
# (optional) multi-level dictionary separate associations by taxid
taxid2asscs = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))
# Simple dictionary containing id2gos
id2gos = get_assoc_ncbi_taxids(taxids=[9606, 10090, 7227], taxid2asscs=taxid2asscs)
log.write(" {N} items found in gene2go from NCBI's ftp server\n".format(N=len(id2gos)))
taxid2pc = {9606:GeneID2nt_hsa, 7227:GeneID2nt_dme}
# Report findings
log.write(" taxid GOs GeneIDs Description\n")
log.write(" ----- ------ ------- -----------\n")
for taxid, asscs in taxid2asscs.items():
num_gene2gos_all = len(asscs['GeneID2GOs'])
num_go2genes_all = len(asscs['GO2GeneIDs'])
log.write(" {TAXID:>6} {N:>6,} {M:>7,} all DNA items\n".format(
TAXID=taxid, N=num_go2genes_all, M=num_gene2gos_all))
# Basic check to ensure gene2go was downloaded and data was returned.
assert num_gene2gos_all > 11000
assert num_go2genes_all > 6000
if taxid in taxid2pc.keys():
rpt_coverage(taxid, asscs, taxid2pc[taxid], log)
def rpt_coverage(taxid, asscs, pc2nt, log):
"""Calculate and report GO coverage on protein-coding genes.
Example report generated with Feb 22, 2013 download of:
NCBI Gene tables and associations in gene2go
taxid GOs GeneIDs Description
----- ------ ------- -----------
7227 6,956 10,590 76% GO coverage of 13,919 protein-coding genes
9606 16,296 18,253 87% GO coverage of 20,913 protein-coding genes
"""
# List of all protein-coding genes have GO terms associated with them
geneid2gos = asscs['GeneID2GOs']
pcgene_w_gos = set(geneid2gos.keys()).intersection(set(pc2nt.keys()))
num_pcgene_w_gos = len(pcgene_w_gos)
num_pc_genes = len(pc2nt)
perc_cov = 100.0*num_pcgene_w_gos/num_pc_genes
# Get list of GOs associated with protein-coding genes
gos_pcgenes = set()
for geneid in pcgene_w_gos:
gos_pcgenes |= geneid2gos[geneid]
log.write(" {TAXID:>6} {N:>6,} {M:>7,} {COV:2.0f}% GO coverage of {TOT:,} protein-coding genes\n".format(
TAXID=taxid, N=len(gos_pcgenes), M=num_pcgene_w_gos, COV=perc_cov, TOT=num_pc_genes))
if __name__ == '__main__':
test_ncbi_gene2go()
|
the-stack_0_7173 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateTensorboardExperiment
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_sync]
from google.cloud import aiplatform_v1
def sample_create_tensorboard_experiment():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateTensorboardExperimentRequest(
parent="parent_value",
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
# Make the request
response = client.create_tensorboard_experiment(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_sync]
|
the-stack_0_7174 | # ==============================================================================
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import pytest
import tensorflow as tf
import ngraph_bridge
# Test ngraph_bridge config options
def test_set_backend():
ngraph_bridge.enable()
backend_cpu = 'CPU'
backend_interpreter = 'INTERPRETER'
found_cpu = False
found_interpreter = False
# These will only print when running pytest with flag "-s"
print("Number of supported backends ", ngraph_bridge.backends_len())
supported_backends = ngraph_bridge.list_backends()
print(" ****** Supported Backends ****** ")
for backend_name in supported_backends:
print(backend_name)
if backend_name == backend_cpu:
found_cpu = True
if backend_name == backend_interpreter:
found_interpreter = True
print(" ******************************** ")
assert (found_cpu and found_interpreter) == True
# Create Graph
val = tf.placeholder(tf.float32)
out1 = tf.abs(val)
out2 = tf.abs(out1)
# set INTERPRETER backend
assert ngraph_bridge.is_supported_backend(backend_interpreter) == True
ngraph_bridge.set_backend(backend_interpreter)
currently_set_backend = ngraph_bridge.get_currently_set_backend_name()
assert currently_set_backend == backend_interpreter
# create new session to execute graph
# If you want to re-confirm which backend the graph was executed
# currently the only way is to enable NGRAPH_TF_VLOG_LEVEL=5
with tf.Session() as sess:
sess.run((out2,), feed_dict={val: ((1.4, -0.5, -1))})
currently_set_backend = ngraph_bridge.get_currently_set_backend_name()
assert currently_set_backend == backend_interpreter
# set CPU backend
assert ngraph_bridge.is_supported_backend(backend_cpu) == True
ngraph_bridge.set_backend(backend_cpu)
currently_set_backend = ngraph_bridge.get_currently_set_backend_name()
assert currently_set_backend == backend_cpu
# create new session to execute graph
with tf.Session() as sess:
sess.run((out2,), feed_dict={val: ((1.4, -0.5, -1))})
currently_set_backend = ngraph_bridge.get_currently_set_backend_name()
assert currently_set_backend == backend_cpu
|
the-stack_0_7177 | from collections import namedtuple
from itertools import chain
from django.conf.urls import url
from django.contrib.auth.models import User
from django.forms import ValidationError
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.urls import reverse
from django.utils.translation import ugettext_noop
from memoized import memoized_property
from tastypie import fields, http
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.exceptions import BadRequest, ImmediateHttpResponse, NotFound
from tastypie.http import HttpForbidden, HttpUnauthorized
from tastypie.resources import ModelResource, Resource, convert_post_to_patch
from tastypie.utils import dict_strip_unicode_keys
from casexml.apps.stock.models import StockTransaction
from corehq.apps.api.resources.serializers import ListToSingleObjectSerializer
from corehq.apps.sms.models import MessagingEvent
from phonelog.models import DeviceReportEntry
from corehq import privileges
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.api.odata.serializers import (
ODataCaseSerializer,
ODataFormSerializer,
)
from corehq.apps.api.odata.utils import record_feed_access_in_datadog
from corehq.apps.api.odata.views import (
add_odata_headers,
raise_odata_permissions_issues,
)
from corehq.apps.api.resources.auth import (
AdminAuthentication,
ODataAuthentication,
RequirePermissionAuthentication,
LoginAuthentication)
from corehq.apps.api.resources.meta import CustomResourceMeta
from corehq.apps.api.util import get_obj
from corehq.apps.app_manager.models import Application
from corehq.apps.domain.auth import HQApiKeyAuthentication
from corehq.apps.domain.forms import clean_password
from corehq.apps.domain.models import Domain
from corehq.apps.es import UserES
from corehq.apps.export.esaccessors import (
get_case_export_base_query,
get_form_export_base_query,
)
from corehq.apps.export.models import CaseExportInstance, FormExportInstance
from corehq.apps.groups.models import Group
from corehq.apps.locations.permissions import location_safe
from corehq.apps.reports.analytics.esaccessors import (
get_case_types_for_domain_es,
)
from corehq.apps.reports.standard.cases.utils import (
query_location_restricted_cases,
query_location_restricted_forms,
)
from corehq.apps.sms.util import strip_plus
from corehq.apps.userreports.columns import UCRExpandDatabaseSubcolumn
from corehq.apps.userreports.models import (
ReportConfiguration,
StaticReportConfiguration,
report_config_id_is_static,
)
from corehq.apps.userreports.reports.data_source import (
ConfigurableReportDataSource,
)
from corehq.apps.userreports.reports.view import (
get_filter_values,
query_dict_to_dict,
)
from corehq.apps.users.dbaccessors.all_commcare_users import (
get_all_user_id_username_pairs_by_domain,
)
from corehq.apps.users.models import (
CommCareUser,
CouchUser,
Permissions,
UserRole,
WebUser,
)
from corehq.apps.users.util import raw_username
from corehq.const import USER_CHANGE_VIA_API
from corehq.util import get_document_or_404
from corehq.util.couch import DocumentNotFound, get_document_or_not_found
from corehq.util.model_log import ModelAction, log_model_change
from corehq.util.timer import TimingContext
from . import (
CouchResourceMixin,
DomainSpecificResourceMixin,
HqBaseResource,
v0_1,
v0_4,
CorsResourceMixin)
from .pagination import DoesNothingPaginator, NoCountingPaginator
MOCK_BULK_USER_ES = None
def user_es_call(domain, q, fields, size, start_at):
query = (UserES()
.domain(domain)
.fields(fields)
.size(size)
.start(start_at))
if q is not None:
query.set_query({"query_string": {"query": q}})
return query.run().hits
def _set_role_for_bundle(kwargs, bundle):
# check for roles associated with the domain
domain_roles = UserRole.by_domain_and_name(kwargs['domain'], bundle.data.get('role'))
if domain_roles:
qualified_role_id = domain_roles[0].get_qualified_id()
bundle.obj.set_role(kwargs['domain'], qualified_role_id)
else:
# check for preset roles and now create them for the domain
permission_preset_name = UserRole.get_preset_permission_by_name(bundle.data.get('role'))
if permission_preset_name:
bundle.obj.set_role(kwargs['domain'], permission_preset_name)
class BulkUserResource(HqBaseResource, DomainSpecificResourceMixin):
"""
A read-only user data resource based on elasticsearch.
Supported Params: limit offset q fields
"""
type = "bulk-user"
id = fields.CharField(attribute='id', readonly=True, unique=True)
email = fields.CharField(attribute='email')
username = fields.CharField(attribute='username', unique=True)
first_name = fields.CharField(attribute='first_name', null=True)
last_name = fields.CharField(attribute='last_name', null=True)
phone_numbers = fields.ListField(attribute='phone_numbers', null=True)
@staticmethod
def to_obj(user):
'''
Takes a flat dict and returns an object
'''
if '_id' in user:
user['id'] = user.pop('_id')
return namedtuple('user', list(user))(**user)
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.edit_commcare_users)
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
object_class = object
resource_name = 'bulk-user'
def dehydrate(self, bundle):
fields = bundle.request.GET.getlist('fields')
data = {}
if not fields:
return bundle
for field in fields:
data[field] = bundle.data[field]
bundle.data = data
return bundle
def obj_get_list(self, bundle, **kwargs):
request_fields = bundle.request.GET.getlist('fields')
for field in request_fields:
if field not in self.fields:
raise BadRequest('{0} is not a valid field'.format(field))
params = bundle.request.GET
param = lambda p: params.get(p, None)
fields = list(self.fields)
fields.remove('id')
fields.append('_id')
fn = MOCK_BULK_USER_ES or user_es_call
users = fn(
domain=kwargs['domain'],
q=param('q'),
fields=fields,
size=param('limit'),
start_at=param('offset'),
)
return list(map(self.to_obj, users))
def detail_uri_kwargs(self, bundle_or_obj):
return {
'pk': get_obj(bundle_or_obj).id
}
class CommCareUserResource(v0_1.CommCareUserResource):
class Meta(v0_1.CommCareUserResource.Meta):
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post']
always_return_data = True
def serialize(self, request, data, format, options=None):
if not isinstance(data, dict) and request.method == 'POST':
data = {'id': data.obj._id}
return self._meta.serializer.serialize(data, format, options)
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_detail'):
if bundle_or_obj is None:
return super(CommCareUserResource, self).get_resource_uri(bundle_or_obj, url_name)
elif isinstance(bundle_or_obj, Bundle):
obj = bundle_or_obj.obj
else:
obj = bundle_or_obj
return reverse('api_dispatch_detail', kwargs=dict(resource_name=self._meta.resource_name,
domain=obj.domain,
api_name=self._meta.api_name,
pk=obj._id))
def _update(self, bundle):
should_save = False
for key, value in bundle.data.items():
if getattr(bundle.obj, key, None) != value:
if key == 'phone_numbers':
bundle.obj.phone_numbers = []
for idx, phone_number in enumerate(bundle.data.get('phone_numbers', [])):
bundle.obj.add_phone_number(strip_plus(phone_number))
if idx == 0:
bundle.obj.set_default_phone_number(strip_plus(phone_number))
should_save = True
elif key == 'groups':
bundle.obj.set_groups(bundle.data.get("groups", []))
should_save = True
elif key in ['email', 'username']:
setattr(bundle.obj, key, value.lower())
should_save = True
elif key == 'password':
domain = Domain.get_by_name(bundle.obj.domain)
if domain.strong_mobile_passwords:
try:
clean_password(bundle.data.get("password"))
except ValidationError as e:
if not hasattr(bundle.obj, 'errors'):
bundle.obj.errors = []
bundle.obj.errors.append(str(e))
return False
bundle.obj.set_password(bundle.data.get("password"))
should_save = True
elif key == 'user_data':
try:
bundle.obj.update_metadata(value)
except ValueError as e:
raise BadRequest(str(e))
else:
setattr(bundle.obj, key, value)
should_save = True
return should_save
def obj_create(self, bundle, request=None, **kwargs):
try:
bundle.obj = CommCareUser.create(
domain=kwargs['domain'],
username=bundle.data['username'].lower(),
password=bundle.data['password'],
created_by=bundle.request.user,
created_via=USER_CHANGE_VIA_API,
email=bundle.data.get('email', '').lower(),
)
del bundle.data['password']
self._update(bundle)
bundle.obj.save()
except Exception:
if bundle.obj._id:
bundle.obj.retire(deleted_by=request.user, deleted_via=USER_CHANGE_VIA_API)
try:
django_user = bundle.obj.get_django_user()
except User.DoesNotExist:
pass
else:
django_user.delete()
log_model_change(request.user, django_user, message=f"deleted_via: {USER_CHANGE_VIA_API}",
action=ModelAction.DELETE)
return bundle
def obj_update(self, bundle, **kwargs):
bundle.obj = CommCareUser.get(kwargs['pk'])
assert bundle.obj.domain == kwargs['domain']
if self._update(bundle):
assert bundle.obj.domain == kwargs['domain']
bundle.obj.save()
return bundle
else:
raise BadRequest(''.join(chain.from_iterable(bundle.obj.errors)))
def obj_delete(self, bundle, **kwargs):
user = CommCareUser.get(kwargs['pk'])
if user:
user.retire(deleted_by=bundle.request.user, deleted_via=USER_CHANGE_VIA_API)
return ImmediateHttpResponse(response=http.HttpAccepted())
class WebUserResource(v0_1.WebUserResource):
class Meta(v0_1.WebUserResource.Meta):
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post']
always_return_data = True
def serialize(self, request, data, format, options=None):
if not isinstance(data, dict) and request.method == 'POST':
data = {'id': data.obj._id}
return self._meta.serializer.serialize(data, format, options)
def dispatch(self, request_type, request, **kwargs):
"""
Override dispatch to check for proper params for user create : role and admin permissions
"""
if request.method == 'POST':
details = self._meta.serializer.deserialize(request.body)
if details.get('is_admin', False):
if self._admin_assigned_another_role(details):
raise BadRequest("An admin can have only one role : Admin")
else:
if not details.get('role', None):
raise BadRequest("Please assign role for non admin user")
elif self._invalid_user_role(request, details):
raise BadRequest("Invalid User Role %s" % details.get('role', None))
return super(WebUserResource, self).dispatch(request_type, request, **kwargs)
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_detail'):
if isinstance(bundle_or_obj, Bundle):
domain = bundle_or_obj.request.domain
obj = bundle_or_obj.obj
elif bundle_or_obj is None:
return None
return reverse('api_dispatch_detail', kwargs=dict(resource_name=self._meta.resource_name,
domain=domain,
api_name=self._meta.api_name,
pk=obj._id))
def _update(self, bundle):
should_save = False
for key, value in bundle.data.items():
if getattr(bundle.obj, key, None) != value:
if key == 'phone_numbers':
bundle.obj.phone_numbers = []
for idx, phone_number in enumerate(bundle.data.get('phone_numbers', [])):
bundle.obj.add_phone_number(strip_plus(phone_number))
if idx == 0:
bundle.obj.set_default_phone_number(strip_plus(phone_number))
should_save = True
elif key in ['email', 'username']:
setattr(bundle.obj, key, value.lower())
should_save = True
else:
setattr(bundle.obj, key, value)
should_save = True
return should_save
def obj_create(self, bundle, request=None, **kwargs):
try:
self._meta.domain = kwargs['domain']
bundle.obj = WebUser.create(
domain=kwargs['domain'],
username=bundle.data['username'].lower(),
password=bundle.data['password'],
created_by=bundle.request.user,
created_via=USER_CHANGE_VIA_API,
email=bundle.data.get('email', '').lower(),
is_admin=bundle.data.get('is_admin', False)
)
del bundle.data['password']
self._update(bundle)
# is_admin takes priority over role
if not bundle.obj.is_admin and bundle.data.get('role'):
_set_role_for_bundle(kwargs, bundle)
bundle.obj.save()
except Exception:
bundle.obj.delete()
return bundle
def obj_update(self, bundle, **kwargs):
bundle.obj = WebUser.get(kwargs['pk'])
assert kwargs['domain'] in bundle.obj.domains
if self._update(bundle):
assert kwargs['domain'] in bundle.obj.domains
bundle.obj.save()
return bundle
def _invalid_user_role(self, request, details):
return details.get('role') not in UserRole.preset_and_domain_role_names(request.domain)
def _admin_assigned_another_role(self, details):
# default value Admin since that will be assigned later anyway since is_admin is True
return details.get('role', 'Admin') != 'Admin'
class AdminWebUserResource(v0_1.UserResource):
domains = fields.ListField(attribute='domains')
def obj_get(self, bundle, **kwargs):
return WebUser.get(kwargs['pk'])
def obj_get_list(self, bundle, **kwargs):
if 'username' in bundle.request.GET:
return [WebUser.get_by_username(bundle.request.GET['username'])]
return [WebUser.wrap(u) for u in UserES().web_users().run().hits]
class Meta(WebUserResource.Meta):
authentication = AdminAuthentication()
detail_allowed_methods = ['get']
list_allowed_methods = ['get']
class GroupResource(v0_4.GroupResource):
class Meta(v0_4.GroupResource.Meta):
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post', 'patch']
always_return_data = True
def serialize(self, request, data, format, options=None):
if not isinstance(data, dict):
if 'error_message' in data.data:
data = {'error_message': data.data['error_message']}
elif request.method == 'POST':
data = {'id': data.obj._id}
return self._meta.serializer.serialize(data, format, options)
def patch_list(self, request=None, **kwargs):
"""
Exactly copied from https://github.com/toastdriven/django-tastypie/blob/v0.9.14/tastypie/resources.py#L1466
(BSD licensed) and modified to pass the kwargs to `obj_create` and support only create method
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
bundles_seen = []
status = http.HttpAccepted
for data in deserialized[collection_name]:
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
try:
self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
except AssertionError as e:
status = http.HttpBadRequest
bundle.data['_id'] = str(e)
bundles_seen.append(bundle)
to_be_serialized = [bundle.data['_id'] for bundle in bundles_seen]
return self.create_response(request, to_be_serialized, response_class=status)
def post_list(self, request, **kwargs):
"""
Exactly copied from https://github.com/toastdriven/django-tastypie/blob/v0.9.14/tastypie/resources.py#L1314
(BSD licensed) and modified to catch Exception and not returning traceback
"""
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
except AssertionError as e:
bundle.data['error_message'] = str(e)
return self.create_response(request, bundle, response_class=http.HttpBadRequest)
def _update(self, bundle):
should_save = False
for key, value in bundle.data.items():
if key == 'name' and getattr(bundle.obj, key, None) != value:
if not Group.by_name(bundle.obj.domain, value):
setattr(bundle.obj, key, value or '')
should_save = True
else:
raise Exception("A group with this name already exists")
if key == 'users' and getattr(bundle.obj, key, None) != value:
users_to_add = set(value) - set(bundle.obj.users)
users_to_remove = set(bundle.obj.users) - set(value)
for user in users_to_add:
bundle.obj.add_user(user)
should_save = True
for user in users_to_remove:
bundle.obj.remove_user(user)
should_save = True
elif getattr(bundle.obj, key, None) != value:
setattr(bundle.obj, key, value)
should_save = True
return should_save
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_detail'):
if bundle_or_obj is None:
return super(GroupResource, self).get_resource_uri(bundle_or_obj, url_name)
elif isinstance(bundle_or_obj, Bundle):
obj = bundle_or_obj.obj
else:
obj = bundle_or_obj
return self._get_resource_uri(obj)
def _get_resource_uri(self, obj):
# This function is called up to 1000 times per request
# so build url from a known string template
# to avoid calling the expensive `reverse` function each time
return self._get_resource_uri_template.format(domain=obj.domain, pk=obj._id)
@memoized_property
def _get_resource_uri_template(self):
"""Returns the literal string "/a/{domain}/api/v0.5/group/{pk}/" in a DRY way"""
return reverse('api_dispatch_detail', kwargs=dict(
resource_name=self._meta.resource_name,
api_name=self._meta.api_name,
domain='__domain__',
pk='__pk__')).replace('__pk__', '{pk}').replace('__domain__', '{domain}')
def obj_create(self, bundle, request=None, **kwargs):
if not Group.by_name(kwargs['domain'], bundle.data.get("name")):
bundle.obj = Group(bundle.data)
bundle.obj.name = bundle.obj.name or ''
bundle.obj.domain = kwargs['domain']
bundle.obj.save()
for user in bundle.obj.users:
CommCareUser.get(user).set_groups([bundle.obj._id])
else:
raise AssertionError("A group with name %s already exists" % bundle.data.get("name"))
return bundle
def obj_update(self, bundle, **kwargs):
bundle.obj = Group.get(kwargs['pk'])
assert bundle.obj.domain == kwargs['domain']
if self._update(bundle):
assert bundle.obj.domain == kwargs['domain']
bundle.obj.save()
return bundle
def obj_delete(self, bundle, **kwargs):
group = self.obj_get(bundle, **kwargs)
group.soft_delete()
return bundle
class DomainAuthorization(ReadOnlyAuthorization):
def __init__(self, domain_key='domain', *args, **kwargs):
self.domain_key = domain_key
def read_list(self, object_list, bundle):
return object_list.filter(**{self.domain_key: bundle.request.domain})
class DeviceReportResource(HqBaseResource, ModelResource):
class Meta(object):
queryset = DeviceReportEntry.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'device-log'
authentication = RequirePermissionAuthentication(Permissions.edit_data)
authorization = DomainAuthorization()
paginator_class = NoCountingPaginator
filtering = {
# this is needed for the domain filtering but any values passed in via the URL get overridden
"domain": ('exact',),
"date": ('exact', 'gt', 'gte', 'lt', 'lte', 'range'),
"user_id": ('exact',),
"username": ('exact',),
"type": ('exact',),
"xform_id": ('exact',),
"device_id": ('exact',),
}
class StockTransactionResource(HqBaseResource, ModelResource):
class Meta(object):
queryset = StockTransaction.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'stock_transaction'
authentication = RequirePermissionAuthentication(Permissions.view_reports)
paginator_class = NoCountingPaginator
authorization = DomainAuthorization(domain_key='report__domain')
filtering = {
"case_id": ('exact',),
"section_id": ('exact'),
}
fields = ['case_id', 'product_id', 'type', 'section_id', 'quantity', 'stock_on_hand']
include_resource_uri = False
def build_filters(self, filters=None):
orm_filters = super(StockTransactionResource, self).build_filters(filters)
if 'start_date' in filters:
orm_filters['report__date__gte'] = filters['start_date']
if 'end_date' in filters:
orm_filters['report__date__lte'] = filters['end_date']
return orm_filters
def dehydrate(self, bundle):
bundle.data['product_name'] = bundle.obj.sql_product.name
bundle.data['transaction_date'] = bundle.obj.report.date
return bundle
ConfigurableReportData = namedtuple("ConfigurableReportData", [
"data", "columns", "id", "domain", "total_records", "get_params", "next_page"
])
class ConfigurableReportDataResource(HqBaseResource, DomainSpecificResourceMixin):
"""
A resource that replicates the behavior of the ajax part of the
ConfigurableReportView view.
"""
data = fields.ListField(attribute="data", readonly=True)
columns = fields.ListField(attribute="columns", readonly=True)
total_records = fields.IntegerField(attribute="total_records", readonly=True)
next_page = fields.CharField(attribute="next_page", readonly=True)
LIMIT_DEFAULT = 50
LIMIT_MAX = 50
def _get_start_param(self, bundle):
try:
start = int(bundle.request.GET.get('offset', 0))
if start < 0:
raise ValueError
except (ValueError, TypeError):
raise BadRequest("start must be a positive integer.")
return start
def _get_limit_param(self, bundle):
try:
limit = int(bundle.request.GET.get('limit', self.LIMIT_DEFAULT))
if limit < 0:
raise ValueError
except (ValueError, TypeError):
raise BadRequest("limit must be a positive integer.")
if limit > self.LIMIT_MAX:
raise BadRequest("Limit may not exceed {}.".format(self.LIMIT_MAX))
return limit
def _get_next_page(self, domain, id_, start, limit, total_records, get_query_dict):
if total_records > start + limit:
start += limit
new_get_params = get_query_dict.copy()
new_get_params["offset"] = start
# limit has not changed, but it may not have been present in get params before.
new_get_params["limit"] = limit
return reverse('api_dispatch_detail', kwargs=dict(
api_name=self._meta.api_name,
resource_name=self._meta.resource_name,
domain=domain,
pk=id_,
)) + "?" + new_get_params.urlencode()
else:
return ""
def _get_report_data(self, report_config, domain, start, limit, get_params):
report = ConfigurableReportDataSource.from_spec(report_config, include_prefilters=True)
string_type_params = [
filter.name
for filter in report_config.ui_filters
if getattr(filter, 'datatype', 'string') == "string"
]
filter_values = get_filter_values(
report_config.ui_filters,
query_dict_to_dict(get_params, domain, string_type_params)
)
report.set_filter_values(filter_values)
page = list(report.get_data(start=start, limit=limit))
columns = []
for column in report.columns:
simple_column = {
"header": column.header,
"slug": column.slug,
}
if isinstance(column, UCRExpandDatabaseSubcolumn):
simple_column['expand_column_value'] = column.expand_value
columns.append(simple_column)
total_records = report.get_total_records()
return page, columns, total_records
def obj_get(self, bundle, **kwargs):
domain = kwargs['domain']
pk = kwargs['pk']
start = self._get_start_param(bundle)
limit = self._get_limit_param(bundle)
report_config = self._get_report_configuration(pk, domain)
page, columns, total_records = self._get_report_data(
report_config, domain, start, limit, bundle.request.GET)
return ConfigurableReportData(
data=page,
columns=columns,
total_records=total_records,
id=report_config._id,
domain=domain,
get_params=bundle.request.GET,
next_page=self._get_next_page(
domain,
report_config._id,
start,
limit,
total_records,
bundle.request.GET,
)
)
def _get_report_configuration(self, id_, domain):
"""
Fetch the required ReportConfiguration object
:param id_: The id of the ReportConfiguration
:param domain: The domain of the ReportConfiguration
:return: A ReportConfiguration
"""
try:
if report_config_id_is_static(id_):
return StaticReportConfiguration.by_id(id_, domain=domain)
else:
return get_document_or_not_found(ReportConfiguration, domain, id_)
except DocumentNotFound:
raise NotFound
def detail_uri_kwargs(self, bundle_or_obj):
return {
'domain': get_obj(bundle_or_obj).domain,
'pk': get_obj(bundle_or_obj).id,
}
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
uri = super(ConfigurableReportDataResource, self).get_resource_uri(bundle_or_obj, url_name)
if bundle_or_obj is not None and uri:
get_params = get_obj(bundle_or_obj).get_params.copy()
if "offset" not in get_params:
get_params["offset"] = 0
if "limit" not in get_params:
get_params["limit"] = self.LIMIT_DEFAULT
uri += "?{}".format(get_params.urlencode())
return uri
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.view_reports, allow_session_auth=True)
list_allowed_methods = []
detail_allowed_methods = ["get"]
class SimpleReportConfigurationResource(CouchResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
id = fields.CharField(attribute='get_id', readonly=True, unique=True)
title = fields.CharField(readonly=True, attribute="title", null=True)
filters = fields.ListField(readonly=True)
columns = fields.ListField(readonly=True)
def dehydrate_filters(self, bundle):
obj_filters = bundle.obj.filters
return [{
"type": f["type"],
"datatype": f["datatype"],
"slug": f["slug"]
} for f in obj_filters]
def dehydrate_columns(self, bundle):
obj_columns = bundle.obj.columns
return [{
"column_id": c['column_id'],
"display": c['display'],
"type": c["type"],
} for c in obj_columns]
def obj_get(self, bundle, **kwargs):
domain = kwargs['domain']
pk = kwargs['pk']
try:
report_configuration = get_document_or_404(ReportConfiguration, domain, pk)
except Http404 as e:
raise NotFound(str(e))
return report_configuration
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
return ReportConfiguration.by_domain(domain)
def detail_uri_kwargs(self, bundle_or_obj):
return {
'domain': get_obj(bundle_or_obj).domain,
'pk': get_obj(bundle_or_obj)._id,
}
class Meta(CustomResourceMeta):
list_allowed_methods = ["get"]
detail_allowed_methods = ["get"]
paginator_class = DoesNothingPaginator
UserDomain = namedtuple('UserDomain', 'domain_name project_name')
UserDomain.__new__.__defaults__ = ('', '')
class UserDomainsResource(CorsResourceMixin, Resource):
domain_name = fields.CharField(attribute='domain_name')
project_name = fields.CharField(attribute='project_name')
class Meta(object):
resource_name = 'user_domains'
authentication = LoginAuthentication(allow_session_auth=True)
object_class = UserDomain
include_resource_uri = False
def dispatch_list(self, request, **kwargs):
try:
return super(UserDomainsResource, self).dispatch_list(request, **kwargs)
except ImmediateHttpResponse as immediate_http_response:
if isinstance(immediate_http_response.response, HttpUnauthorized):
raise ImmediateHttpResponse(
response=HttpUnauthorized(
content='Username or API Key is incorrect', content_type='text/plain'
)
)
else:
raise
def obj_get_list(self, bundle, **kwargs):
return self.get_object_list(bundle.request)
def get_object_list(self, request):
couch_user = CouchUser.from_django_user(request.user)
results = []
for domain in couch_user.get_domains():
if not domain_has_privilege(domain, privileges.ZAPIER_INTEGRATION):
continue
domain_object = Domain.get_by_name(domain)
results.append(UserDomain(
domain_name=domain_object.name,
project_name=domain_object.hr_name or domain_object.name
))
return results
class IdentityResource(CorsResourceMixin, Resource):
id = fields.CharField(attribute='get_id', readonly=True)
username = fields.CharField(attribute='username', readonly=True)
first_name = fields.CharField(attribute='first_name', readonly=True)
last_name = fields.CharField(attribute='last_name', readonly=True)
email = fields.CharField(attribute='email', readonly=True)
def obj_get_list(self, bundle, **kwargs):
return [bundle.request.couch_user]
class Meta(object):
resource_name = 'identity'
authentication = LoginAuthentication()
serializer = ListToSingleObjectSerializer()
detail_allowed_methods = []
list_allowed_methods = ['get']
object_class = CouchUser
include_resource_uri = False
Form = namedtuple('Form', 'form_xmlns form_name')
Form.__new__.__defaults__ = ('', '')
class DomainForms(Resource):
"""
Returns: list of forms for a given domain with form name formatted for display in Zapier
"""
form_xmlns = fields.CharField(attribute='form_xmlns')
form_name = fields.CharField(attribute='form_name')
class Meta(object):
resource_name = 'domain_forms'
authentication = RequirePermissionAuthentication(Permissions.access_api)
object_class = Form
include_resource_uri = False
allowed_methods = ['get']
limit = 200
max_limit = 1000
def obj_get_list(self, bundle, **kwargs):
application_id = bundle.request.GET.get('application_id')
if not application_id:
raise NotFound('application_id parameter required')
results = []
application = Application.get(docid=application_id)
if not application:
return []
forms_objects = application.get_forms(bare=False)
for form_object in forms_objects:
form = form_object['form']
module = form_object['module']
form_name = '{} > {} > {}'.format(application.name, module.default_name(), form.default_name())
results.append(Form(form_xmlns=form.xmlns, form_name=form_name))
return results
# Zapier requires id and name; case_type has no obvious id, placeholder inserted instead.
CaseType = namedtuple('CaseType', 'case_type placeholder')
CaseType.__new__.__defaults__ = ('', '')
class DomainCases(Resource):
"""
Returns: list of case types for a domain
Note: only returns case types for which at least one case has been made
"""
placeholder = fields.CharField(attribute='placeholder')
case_type = fields.CharField(attribute='case_type')
class Meta(object):
resource_name = 'domain_cases'
authentication = RequirePermissionAuthentication(Permissions.access_api)
object_class = CaseType
include_resource_uri = False
allowed_methods = ['get']
limit = 100
max_limit = 1000
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
case_types = get_case_types_for_domain_es(domain)
results = [CaseType(case_type=case_type) for case_type in case_types]
return results
UserInfo = namedtuple('UserInfo', 'user_id user_name')
UserInfo.__new__.__defaults__ = ('', '')
class DomainUsernames(Resource):
"""
Returns: list of usernames for a domain.
"""
user_id = fields.CharField(attribute='user_id')
user_name = fields.CharField(attribute='user_name')
class Meta(object):
resource_name = 'domain_usernames'
authentication = RequirePermissionAuthentication(Permissions.view_commcare_users)
object_class = User
include_resource_uri = False
allowed_methods = ['get']
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
user_ids_username_pairs = get_all_user_id_username_pairs_by_domain(domain)
results = [UserInfo(user_id=user_pair[0], user_name=raw_username(user_pair[1]))
for user_pair in user_ids_username_pairs]
return results
class BaseODataResource(HqBaseResource, DomainSpecificResourceMixin):
config_id = None
table_id = None
def dispatch(self, request_type, request, **kwargs):
if not domain_has_privilege(request.domain, privileges.ODATA_FEED):
raise ImmediateHttpResponse(
response=HttpResponseNotFound('Feature flag not enabled.')
)
self.config_id = kwargs['config_id']
self.table_id = int(kwargs.get('table_id', 0))
with TimingContext() as timer:
response = super(BaseODataResource, self).dispatch(
request_type, request, **kwargs
)
record_feed_access_in_datadog(request, self.config_id, timer.duration, response)
return response
def create_response(self, request, data, response_class=HttpResponse,
**response_kwargs):
data['domain'] = request.domain
data['config_id'] = self.config_id
data['api_path'] = request.path
data['table_id'] = self.table_id
response = super(BaseODataResource, self).create_response(
request, data, response_class, **response_kwargs)
return add_odata_headers(response)
def detail_uri_kwargs(self, bundle_or_obj):
# Not sure why this is required but the feed 500s without it
return {
'pk': get_obj(bundle_or_obj)['_id']
}
def determine_format(self, request):
# Results should be sent as JSON
return 'application/json'
@location_safe
class ODataCaseResource(BaseODataResource):
def obj_get_list(self, bundle, domain, **kwargs):
config = get_document_or_404(CaseExportInstance, domain, self.config_id)
if raise_odata_permissions_issues(bundle.request.couch_user, domain, config):
raise ImmediateHttpResponse(
HttpForbidden(ugettext_noop(
"You do not have permission to view this feed."
))
)
query = get_case_export_base_query(domain, config.case_type)
for filter in config.get_filters():
query = query.filter(filter.to_es_filter())
if not bundle.request.couch_user.has_permission(
domain, 'access_all_locations'
):
query = query_location_restricted_cases(query, bundle.request)
return query
class Meta(v0_4.CommCareCaseResource.Meta):
authentication = ODataAuthentication()
resource_name = 'odata/cases'
serializer = ODataCaseSerializer()
limit = 2000
max_limit = 10000
def prepend_urls(self):
return [
url(r"^(?P<resource_name>{})/(?P<config_id>[\w\d_.-]+)/(?P<table_id>[\d]+)/feed".format(
self._meta.resource_name), self.wrap_view('dispatch_list')),
url(r"^(?P<resource_name>{})/(?P<config_id>[\w\d_.-]+)/feed".format(
self._meta.resource_name), self.wrap_view('dispatch_list')),
]
@location_safe
class ODataFormResource(BaseODataResource):
def obj_get_list(self, bundle, domain, **kwargs):
config = get_document_or_404(FormExportInstance, domain, self.config_id)
if raise_odata_permissions_issues(bundle.request.couch_user, domain, config):
raise ImmediateHttpResponse(
HttpForbidden(ugettext_noop(
"You do not have permission to view this feed."
))
)
query = get_form_export_base_query(domain, config.app_id, config.xmlns, include_errors=False)
for filter in config.get_filters():
query = query.filter(filter.to_es_filter())
if not bundle.request.couch_user.has_permission(
domain, 'access_all_locations'
):
query = query_location_restricted_forms(query, bundle.request)
return query
class Meta(v0_4.XFormInstanceResource.Meta):
authentication = ODataAuthentication()
resource_name = 'odata/forms'
serializer = ODataFormSerializer()
limit = 2000
max_limit = 10000
def prepend_urls(self):
return [
url(r"^(?P<resource_name>{})/(?P<config_id>[\w\d_.-]+)/(?P<table_id>[\d]+)/feed".format(
self._meta.resource_name), self.wrap_view('dispatch_list')),
url(r"^(?P<resource_name>{})/(?P<config_id>[\w\d_.-]+)/feed".format(
self._meta.resource_name), self.wrap_view('dispatch_list')),
]
class MessagingEventResource(HqBaseResource, ModelResource):
class Meta(object):
queryset = MessagingEvent.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'messaging-event'
authentication = RequirePermissionAuthentication(Permissions.edit_data)
authorization = DomainAuthorization()
paginator_class = NoCountingPaginator
filtering = {
# this is needed for the domain filtering but any values passed in via the URL get overridden
"domain": ('exact',),
"date": ('exact', 'gt', 'gte', 'lt', 'lte', 'range'),
"source": ('exact',),
"content_type": ('exact',),
"status": ('exact',),
}
ordering = [
'date',
]
|
the-stack_0_7178 | from shared import readAssets
def doExtract(args):
print("Larkstongue v0.0.1-alpha")
def readGfx():
readLine = line.strip("\n")
if len(readLine) > 0:
areaGfx.append(readLine)
def readGff():
readLine = line.strip("\n")
if len(readLine) > 0:
areaGff.append(readLine)
def readMap():
readLine = line.strip("\n")
if len(readLine) > 0:
areaMap.append(readLine)
def readSfx():
readLine = line.strip("\n")
if len(readLine) > 0:
encodedLine = ""
header = readLine[:8]
for i in range(8, len(readLine), 5):
pitchHex = readLine[i : i + 2]
waveformHex = readLine[i + 2]
volumeHex = readLine[i + 3]
effectHex = readLine[i + 4]
pitchDec = int(pitchHex, 16)
pitchBinary = format(pitchDec, "06b")
waveformDec = int(waveformHex, 16)
waveformBinary = format(waveformDec, "04b")
instrumentBit = waveformBinary[0]
waveformBinary = waveformBinary[1:]
volumeDec = int(volumeHex, 16)
volumeBinary = format(volumeDec, "03b")
effectDec = int(effectHex, 16)
effectBinary = format(effectDec, "03b")
noteBinary = waveformBinary[1:] + pitchBinary + instrumentBit + effectBinary + volumeBinary + waveformBinary[0]
noteDec = int(noteBinary, 2)
noteHex = format(noteDec, "04x")
encodedLine = encodedLine + noteHex
encodedLine = encodedLine + header
areaSfx.append(encodedLine)
def readMusic():
readLine = line.strip("\n")
if len(readLine) > 0:
flagHex = readLine[1]
flagDec = int(flagHex, 16)
flagBinary = format(flagDec, "04b")
channelsHex = []
for i in range(3, len(readLine), 2):
channelsHex.append(readLine[i : i+2])
encodedBinary = []
for i in range(3, -1, -1):
encodedBinary.append(flagBinary[i])
encodedLine = ""
for i in range(0, 4):
channelDec = int(channelsHex[i], 16)
channelBinary = format(channelDec, "07b")
encodedBinary[i] = encodedBinary[i] + channelBinary
encodedDec = int(encodedBinary[i], 2)
encodedHex = format(encodedDec, "02x")
encodedLine = encodedLine + encodedHex
areaMusic.append(encodedLine)
def cropBitmap():
if len(areaGfx) == 0:
print("Bitmap not found on cart!")
quit()
cropScanline = ""
while len(cropScanline) < 128:
cropScanline += args.bgcolor
while areaGfx[0] == cropScanline:
areaGfx.pop(0)
while areaGfx[-1] == cropScanline:
areaGfx.pop(-1)
marginFound = False
for leftMargin in range(0, 127):
for y in range(0, len(areaGfx)):
if areaGfx[y][leftMargin] != args.bgcolor:
marginFound = True
break
if marginFound == True:
break
marginFound = False
for rightMargin in range(128, 0, -1):
for y in range(0, len(areaGfx)):
if areaGfx[y][rightMargin - 1] != args.bgcolor:
marginFound = True
break
if marginFound == True:
break
cropWidth = rightMargin - leftMargin
if cropWidth % 2 != 0:
if rightMargin < 128:
rightMargin += 1
else:
leftMargin -= 1
for i in range(0, len(areaGfx)):
areaGfx[i] = areaGfx[i][leftMargin : rightMargin]
def swapGfxNibbles():
for i in range(0, len(areaGfx)):
line = areaGfx[i]
swappedLine = ""
for j in range(0, len(line), 2):
swappedLine += line[j + 1] + line[j]
areaGfx[i] = swappedLine
def writeBitmap():
for line in areaGfx:
outputFile.write("bitmap=" + line + "\n")
def areaToString(areaID):
if areaID == "gfx":
readArea = areaGfx
areaLength = 16384
elif areaID == "gff":
readArea = areaGff
areaLength = 512
elif areaID == "map":
readArea = areaMap
areaLength = 8192
elif areaID == "sfx":
readArea = areaSfx
areaLength = 8704
elif areaID == "music":
readArea = areaMusic
areaLength = 512
outputString = ""
for line in readArea:
outputString += line
while len(outputString) < areaLength:
if readArea == areaMusic:
outputString += "40"
else:
outputString += "0"
return outputString
def writeSoundtrack():
fullString = ""
fullString += areaToString("music")
fullString += areaToString("sfx")
outputFile.write("data=" + fullString + "\n")
def writeAllData():
fullString = ""
fullString += areaToString("gfx")
fullString += areaToString("map")
fullString += areaToString("gff")
fullString += areaToString("music")
fullString += areaToString("sfx")
outputFile.write("data=" + fullString + "\n")
areaGfx = []
areaGff = []
areaMap = []
areaSfx = []
areaMusic = []
try:
file = open(args.input, "r")
except FileNotFoundError:
print(args.input + " not found!")
quit()
cartContent = file.readlines()
file.close()
if args.source == "bitmap":
acceptedBgColorInputs = "0123456789abcdef"
if len(args.bgcolor) != 1 or args.bgcolor not in acceptedBgColorInputs:
print("Error: Background color input must be a single hexadecimal digit in lowercase!")
quit()
readMode = 0
readModes = { 1: readGfx,
2: readGff,
3: readMap,
4: readSfx,
5: readMusic
}
for line in cartContent:
if len(line) > 1:
if line.startswith("__gfx__") and args.source in ["bitmap", "gfx", "all"]:
readMode = 1
elif line.startswith("__gff__") and args.source in ["gff", "all"]:
readMode = 2
elif line.startswith("__map__") and args.source in ["map", "all"]:
readMode = 3
elif line.startswith("__sfx__") and args.source in ["soundtrack", "sfx", "all"]:
readMode = 4
elif line.startswith("__music__") and args.source in ["soundtrack", "music", "all"]:
readMode = 5
elif line.startswith("__label__"):
readMode = 0
elif readMode != 0:
readModes[readMode]()
if args.source == "bitmap":
cropBitmap()
elif len(areaGfx) > 0:
swapGfxNibbles()
assetList = readAssets(args.output, False)
dupeFound = False
for asset in assetList:
if asset.name == args.assetname:
dupeFound = True
if dupeFound:
print("An asset named \"" + args.assetname + "\" already found in " + args.output + ", overwrite?")
while True:
userInput = input("(y/n): ")
if userInput == "y":
break
if userInput == "n":
print("Extract cancelled!")
quit()
assetList = list(filter(lambda a: a.name != args.assetname, assetList))
try:
outputFile = open(args.output, "w")
except PermissionError:
print("Error! Cannot write to " + args.output + " due to a permission error")
for a in assetList:
outputFile.write("name=" + a.name + "\n")
if a.bitmap != None:
for line in a.bitmap:
outputFile.write("bitmap=" + line + "\n")
if a.data != None:
outputFile.write("data=" + a.data + "\n")
outputFile.write("-\n")
outputFile.write("name=" + args.assetname + "\n")
if args.source == "bitmap":
writeBitmap()
elif args.source == "soundtrack":
writeSoundtrack()
elif args.source == "all":
writeAllData()
else:
outputString = areaToString(args.source)
outputFile.write("data=" + outputString + "\n")
outputFile.write("-\n")
outputFile.close
print("Asset extracted successfully!") |
the-stack_0_7179 | import logging
import pandas as pd
from bots import imps
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import wsj_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def futures_coms_command():
"""Futures and commodities overview [Wall St. Journal]"""
# Debug user input
if imps.DEBUG:
logger.debug("econ-futures")
# Retrieve data
df = wsj_model.top_commodities()
# Check for argument
if df.empty:
raise Exception("No available data found")
df["Last Price"] = pd.to_numeric(df["Price"].astype(float))
df["Change"] = pd.to_numeric(df["Chg"].astype(float))
df["%Chg"] = pd.to_numeric(df["%Chg"].astype(float))
# Debug user output
if imps.DEBUG:
logger.debug(df.to_string())
formats = {
"Last Price": "${:.2f}",
"Change": "${:.2f}",
"%Chg": "<b>{:.2f}%</b>",
}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df["Change"] = df.apply(lambda x: f"{x['Change']} (<b>{x['%Chg']}</b>)", axis=1)
df = df.fillna("")
df.set_index(" ", inplace=True)
font_color = ["white"] * 2 + [
["#e4003a" if boolv else "#00ACFF" for boolv in df["%Chg"].str.contains("-")]
]
df = df.drop(columns=["Price", "Chg", "%Chg"])
fig = imps.plot_df(
df,
fig_size=(620, (40 + (40 * len(df.index)))),
col_width=[4, 2.4, 3],
tbl_header=imps.PLT_TBL_HEADER,
tbl_cells=imps.PLT_TBL_CELLS,
font=imps.PLT_TBL_FONT,
row_fill_color=imps.PLT_TBL_ROW_COLORS,
paper_bgcolor="rgba(0, 0, 0, 0)",
)
fig.update_traces(
cells=(
dict(
align=["center", "right"],
font=dict(color=font_color),
)
)
)
imagefile = imps.save_image("econ-futures.png", fig)
return {"title": "Economy: [WSJ] Futures/Commodities", "imagefile": imagefile}
|
the-stack_0_7180 | """Support for Tasmota lights."""
from hatasmota.light import (
LIGHT_TYPE_COLDWARM,
LIGHT_TYPE_NONE,
LIGHT_TYPE_RGB,
LIGHT_TYPE_RGBCW,
LIGHT_TYPE_RGBW,
)
from homeassistant.components import light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from .const import DATA_REMOVE_DISCOVER_COMPONENT, DOMAIN as TASMOTA_DOMAIN
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
DEFAULT_BRIGHTNESS_MAX = 255
TASMOTA_BRIGHTNESS_MAX = 100
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Tasmota light dynamically through discovery."""
@callback
def async_discover(tasmota_entity, discovery_hash):
"""Discover and add a Tasmota light."""
async_add_entities(
[TasmotaLight(tasmota_entity=tasmota_entity, discovery_hash=discovery_hash)]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(light.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(light.DOMAIN, TASMOTA_DOMAIN),
async_discover,
)
class TasmotaLight(
TasmotaAvailability,
TasmotaDiscoveryUpdate,
LightEntity,
):
"""Representation of a Tasmota light."""
def __init__(self, **kwds):
"""Initialize Tasmota light."""
self._state = False
self._supported_features = 0
self._brightness = None
self._color_temp = None
self._effect = None
self._hs = None
self._white_value = None
self._flash_times = None
super().__init__(
discovery_update=self.discovery_update,
**kwds,
)
self._setup_from_entity()
async def discovery_update(self, update, write_state=True):
"""Handle updated discovery message."""
await super().discovery_update(update, write_state=False)
self._setup_from_entity()
self.async_write_ha_state()
def _setup_from_entity(self):
"""(Re)Setup the entity."""
supported_features = 0
light_type = self._tasmota_entity.light_type
if light_type != LIGHT_TYPE_NONE:
supported_features |= SUPPORT_BRIGHTNESS
supported_features |= SUPPORT_TRANSITION
if light_type in [LIGHT_TYPE_COLDWARM, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_COLOR_TEMP
if light_type in [LIGHT_TYPE_RGB, LIGHT_TYPE_RGBW, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_COLOR
supported_features |= SUPPORT_EFFECT
if light_type in [LIGHT_TYPE_RGBW, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_WHITE_VALUE
self._supported_features = supported_features
@callback
def state_updated(self, state, **kwargs):
"""Handle state updates."""
self._state = state
attributes = kwargs.get("attributes")
if attributes:
if "brightness" in attributes:
brightness = float(attributes["brightness"])
percent_bright = brightness / TASMOTA_BRIGHTNESS_MAX
self._brightness = percent_bright * 255
if "color" in attributes:
color = attributes["color"]
self._hs = color_util.color_RGB_to_hs(*color)
if "color_temp" in attributes:
self._color_temp = attributes["color_temp"]
if "effect" in attributes:
self._effect = attributes["effect"]
if "white_value" in attributes:
white_value = float(attributes["white_value"])
percent_white = white_value / TASMOTA_BRIGHTNESS_MAX
self._white_value = percent_white * 255
self.async_write_ha_state()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._tasmota_entity.min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._tasmota_entity.max_mireds
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._tasmota_entity.effect_list
@property
def hs_color(self):
"""Return the hs color value."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
supported_features = self._supported_features
attributes = {}
if ATTR_HS_COLOR in kwargs and supported_features & SUPPORT_COLOR:
hs_color = kwargs[ATTR_HS_COLOR]
attributes["color"] = {}
rgb = color_util.color_hsv_to_RGB(hs_color[0], hs_color[1], 100)
attributes["color"] = [rgb[0], rgb[1], rgb[2]]
if ATTR_TRANSITION in kwargs:
attributes["transition"] = kwargs[ATTR_TRANSITION]
if ATTR_BRIGHTNESS in kwargs and supported_features & SUPPORT_BRIGHTNESS:
brightness_normalized = kwargs[ATTR_BRIGHTNESS] / DEFAULT_BRIGHTNESS_MAX
device_brightness = min(
round(brightness_normalized * TASMOTA_BRIGHTNESS_MAX),
TASMOTA_BRIGHTNESS_MAX,
)
# Make sure the brightness is not rounded down to 0
device_brightness = max(device_brightness, 1)
attributes["brightness"] = device_brightness
if ATTR_COLOR_TEMP in kwargs and supported_features & SUPPORT_COLOR_TEMP:
attributes["color_temp"] = int(kwargs[ATTR_COLOR_TEMP])
if ATTR_EFFECT in kwargs:
attributes["effect"] = kwargs[ATTR_EFFECT]
if ATTR_WHITE_VALUE in kwargs:
white_value_normalized = kwargs[ATTR_WHITE_VALUE] / DEFAULT_BRIGHTNESS_MAX
device_white_value = min(
round(white_value_normalized * TASMOTA_BRIGHTNESS_MAX),
TASMOTA_BRIGHTNESS_MAX,
)
attributes["white_value"] = device_white_value
self._tasmota_entity.set_state(True, attributes)
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
attributes = {"state": "OFF"}
if ATTR_TRANSITION in kwargs:
attributes["transition"] = kwargs[ATTR_TRANSITION]
self._tasmota_entity.set_state(False, attributes)
|
the-stack_0_7181 | #!/usr/bin/env python
# This is a helper used by `update-pdfjs` to update the Mustache template for
# serving PDFs with PDFJS with the local dev server.
import os
import sys
# Header to insert at the top of the generated PDF.js viewer template
FILE_HEADER = """
<!-- AUTO-GENERATED BY {}. DO NOT EDIT. -->
""".format(
sys.argv[0]
)
# Header to insert after the original `<title>` tag in the PDF viewer HTML
# mustache template.
#
# This header is responsible for:
#
# - Adding a `<base>` tag so that relative URLs in the pre-built viewer HTML
# resolve to the right URL.
# - Injecting custom PDF.js viewer configuration
# - Injecting the Hypothesis client entry point and configuration
#
# The header needs to be inserted after the `<title>` tag so we can override it,
# but before any relative asset links which will be affected by the `<base>`
# tag.
#
HYPOTHESIS_HEADER = """
<!-- Begin Hypothesis modifications -->
<base href="/scripts/pdfjs/web/">
<title>via Hypothesis</title>
<!--
It's worth noting that this link tag is *not* currently used by the
Hypothesis client to determine the URL of this page. For consistency with
how these pages are served on via, however, we serve it with the PDF.js
viewer application.
-->
<link rel="canonical" href="{{{ documentUrl }}}"/>
<script>
window.DOCUMENT_URL = '{{{documentUrl}}}';
window.PDF_URL = '{{{ url }}}';
window.CLIENT_URL = '{{{clientUrl}}}'.replace('{current_host}', document.location.hostname);
</script>
<script src="/scripts/pdfjs-init.js"></script>
<!-- Configure Hypothesis client. -->
{{{hypothesisConfig}}}
<!-- End Hypothesis modifications -->
"""
def insert_after(str_, search_str, insert_str):
return str_.replace(search_str, search_str + insert_str)
input_file_path = sys.argv[1]
output_file_path = sys.argv[2]
input_file = open(input_file_path, "r")
output_file = open(output_file_path, "w")
base_dir = os.path.dirname(input_file_path)
viewer_html = input_file.read()
viewer_html = insert_after(viewer_html, "<!DOCTYPE html>", FILE_HEADER)
viewer_html = insert_after(
viewer_html, "</title>", HYPOTHESIS_HEADER.replace("$BASEDIR", base_dir)
)
output_file.write(viewer_html) |
the-stack_0_7182 | """ Simple Python class to access the JLR Remote Car API
https://github.com/ardevd/jlrpy
"""
from urllib.request import Request, build_opener
import json
import datetime
import calendar
import uuid
import time
class Connection(object):
"""Connection to the JLR Remote Car API"""
def __init__(self,
email='',
password='',
device_id='', ):
"""Init the connection object
The email address and password associated with your Jaguar InControl account is required.
"""
self.email = email
if device_id:
self.device_id = device_id
else:
self.device_id = str(uuid.uuid4())
self.oauth = {
"grant_type": "password",
"username": email,
"password": password}
self.expiration = 0 # force credential refresh
self.connect()
self.vehicles = []
try:
for v in self.get_vehicles(self.head)['vehicles']:
self.vehicles.append(Vehicle(v, self))
except TypeError:
print("[-] No vehicles associated with this account")
def get(self, command, url, headers):
"""GET data from API"""
return self.post(command, url, headers, None)
def post(self, command, url, headers, data=None):
"""POST data to API"""
now = calendar.timegm(datetime.datetime.now().timetuple())
if now > self.expiration:
# Auth expired, reconnect
self.connect()
return self.__open("%s/%s" % (url, command), headers=headers, data=data)
def connect(self):
print("[*] Connecting...")
auth = self.__authenticate(data=self.oauth)
self.__register_auth(auth)
print("[*] 1/3 authenticated")
self.__setheader(auth['access_token'], auth['expires_in'])
self.__register_device(self.head)
print("[*] 2/3 device id registered")
self.__login_user(self.head)
print("[*] 3/3 user logged in, user id retrieved")
def __open(self, url, headers=None, data=None):
req = Request(url, headers=headers)
if data:
req.data = bytes(json.dumps(data), encoding="utf8")
opener = build_opener()
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
return json.loads(resp.read().decode(charset))
def __register_auth(self, auth):
self.access_token = auth['access_token']
now = calendar.timegm(datetime.datetime.now().timetuple())
self.expiration = now + int(auth['expires_in'])
self.auth_token = auth['authorization_token']
self.refresh_token = auth['refresh_token']
def __setheader(self, access_token, expiration=float('inf')):
"""Set HTTP header fields"""
self.head = {
"Authorization": "Bearer %s" % access_token,
"X-Device-Id": self.device_id,
"Content-Type": "application/json"}
def __authenticate(self, data=None):
"""Raw urlopen command to the auth url"""
url = "https://jlp-ifas.wirelesscar.net/ifas/jlr/tokens"
auth_headers = {
"Authorization": "Basic YXM6YXNwYXNz",
"Content-Type": "application/json",
"X-Device-Id": self.device_id}
req = Request(url, headers=auth_headers)
# Convert data to json
req.data = bytes(json.dumps(data), encoding="utf8")
opener = build_opener()
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
return json.loads(resp.read().decode(charset))
def __register_device(self, headers=None):
"""Register the device Id"""
url = "https://jlp-ifop.wirelesscar.net/ifop/jlr/users/%s/clients" % self.email
data = {
"access_token": self.access_token,
"authorization_token": self.auth_token,
"expires_in": "86400",
"deviceID": self.device_id
}
req = Request(url, headers=headers)
req.data = bytes(json.dumps(data), encoding="utf8")
opener = build_opener()
resp = opener.open(req)
# TODO: Check for response code
def __login_user(self, headers=None):
"""Login the user"""
url = "https://jlp-ifoa.wirelesscar.net/if9/jlr/users?loginName=%s" % self.email
user_login_header = headers.copy()
user_login_header["Accept"] = "application/vnd.wirelesscar.ngtp.if9.User-v3+json"
req = Request(url, headers=user_login_header)
opener = build_opener()
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
"""Register user id"""
userdata = json.loads(resp.read().decode(charset))
self.user_id = userdata['userId']
return userdata
def get_vehicles(self, headers):
"""Get vehicles for user"""
url = "https://jlp-ifoa.wirelesscar.net/if9/jlr/users/%s/vehicles?primaryOnly=true" % self.user_id
req = Request(url, headers=headers)
opener = build_opener()
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
return json.loads(resp.read().decode(charset))
def get_user_info(self):
"""Get user information"""
return self.get(self.user_id, "https://jlp-ifoa.wirelesscar.net/if9/jlr/users", self.head)
class Vehicle(dict):
"""Vehicle class.
You can request data or send commands to vehicle. Consult the JLR API documentation for details
"""
def __init__(self, data, connection):
"""Initialize the vehicle class."""
super().__init__(data)
self.connection = connection
self.vin = data['vin']
# Authentiate to VHS
self.__authenticate_vhs()
def get_attributes(self):
"""Get vehicle attributes"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.ngtp.org.VehicleAttributes-v3+json"
result = self.get('attributes', headers)
return result
def get_status(self):
"""Get vehicle status"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.ngtp.org.if9.healthstatus-v2+json"
result = self.get('status', headers)
return result
def get_health_status(self):
"""Get vehicle health status"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json"
headers["Content-Type"] = "application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8"
return self.post('healthstatus', headers, self.vhs_data)
def get_departure_timers(self):
"""Get vehicle departure timers"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.wirelesscar.ngtp.if9.DepartureTimerSettings-v1+json"
return self.get("departuretimers", headers)
def get_wakeup_time(self):
"""Get configured wakeup time for vehicle"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.wirelesscar.ngtp.if9.VehicleWakeupTime-v2+json"
return self.get("wakeuptime", headers)
def get_subscription_packages(self):
"""Get vehicle status"""
result = self.get('subscriptionpackages', self.connection.head)
return result
def get_trips(self):
"""Get the last 1000 trips associated with vehicle"""
return self.get('trips?count=1000', self.connection.head)
def get_position(self):
"""Get current vehicle position"""
return self.get('position', self.connection.head)
def honk_blink(self):
"""Sound the horn and blink lights"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json"
headers["Content-Type"] = "application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8"
return self.post("honkBlink", headers, self.vhs_data)
def __authenticate_vhs(self):
"""Authenticate to vhs and get token"""
data = {
"serviceName": "VHS",
"pin": ""}
headers = self.connection.head.copy()
headers["Content-Type"] = "application/vnd.wirelesscar.ngtp.if9.AuthenticateRequest-v2+json; charset=utf-8"
vhs_auth_data = self.post("users/%s/authenticate" % self.connection.user_id, headers, data)
self.vhs_data = {
"token": vhs_auth_data['token']}
def post(self, command, headers, data):
"""Utility command to post data to VHS"""
return self.connection.post(command, 'https://jlp-ifoa.wirelesscar.net/if9/jlr/vehicles/%s' % self.vin,
headers, data)
def get(self, command, headers):
"""Utility command to get vehicle data from API"""
return self.connection.get(command, 'https://jlp-ifoa.wirelesscar.net/if9/jlr/vehicles/%s' % self.vin, headers)
|
the-stack_0_7183 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""BC networks definition."""
import dataclasses
from typing import Optional, Tuple
from acme import specs
from acme.agents.jax import actor_core, actors
from acme.jax import networks as networks_lib
from acme.jax import utils
import gin
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from jrl.utils.networks import procgen_networks
distributional = networks_lib.distributional
atari = networks_lib.atari
@dataclasses.dataclass
class BCNetworks:
"""Network and pure functions for the BC agent."""
policy_network: networks_lib.FeedForwardNetwork
log_prob: networks_lib.LogProbFn
sample: networks_lib.SampleFn
sample_eval: Optional[networks_lib.SampleFn] = None
img_encoder: Optional[networks_lib.FeedForwardNetwork] = None
def apply_policy_and_sample(
networks, eval_mode = False):
"""Returns a function that computes actions."""
sample_fn = networks.sample if not eval_mode else networks.sample_eval
if not sample_fn:
raise ValueError('sample function is not provided')
def apply_and_sample(params, key, obs):
return sample_fn(networks.policy_network.apply(params, obs), key)
return actor_core.batched_feed_forward_to_actor_core(apply_and_sample)
def apply_policy_and_sample_with_img_encoder(
networks, eval_mode = False):
"""Returns a function that computes actions."""
sample_fn = networks.sample if not eval_mode else networks.sample_eval
if not sample_fn:
raise ValueError('sample function is not provided')
def apply_and_sample(params, key, obs):
img = obs['state_image']
img_embedding = networks.img_encoder.apply(params[1], img)
x = dict(state_image=img_embedding, state_dense=obs['state_dense'])
return sample_fn(networks.policy_network.apply(params[0], x), key)
return actor_core.batched_feed_forward_to_actor_core(apply_and_sample)
w_init = hk.initializers.VarianceScaling(1.0, "fan_avg", "truncated_normal")
b_init = jnp.zeros
dist_w_init = hk.initializers.VarianceScaling(1.0, "fan_avg", "truncated_normal")
dist_b_init = jnp.zeros
@gin.register
def build_standard_actor_fn(
num_dimensions,
actor_hidden_layer_sizes = (256, 256, 256),):
def _actor_fn(obs):
# # for matching Ilya's codebase
# relu_orthogonal = hk.initializers.Orthogonal(scale=2.0**0.5)
# near_zero_orthogonal = hk.initializers.Orthogonal(1e-2)
# x = obs
# for hid_dim in actor_hidden_layer_sizes:
# x = hk.Linear(hid_dim, w_init=relu_orthogonal, b_init=jnp.zeros)(x)
# x = jax.nn.relu(x)
# dist = networks_lib.NormalTanhDistribution(
# num_dimensions,
# w_init=near_zero_orthogonal,
# b_init=jnp.zeros)(x)
# return dist
network = hk.Sequential([
hk.nets.MLP(
list(actor_hidden_layer_sizes),
# w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
# w_init=hk.initializers.VarianceScaling(1.0, "fan_avg", "truncated_normal"),
w_init=w_init,
b_init=b_init,
activation=jax.nn.relu,
activate_final=True),
# networks_lib.NormalTanhDistribution(num_dimensions),
networks_lib.NormalTanhDistribution(
num_dimensions,
w_init=dist_w_init,
b_init=dist_b_init,
min_scale=1e-2,
),
])
return network(obs)
return _actor_fn
def make_networks(
spec,
build_actor_fn=build_standard_actor_fn,
img_encoder_fn=None,
):
"""Creates networks used by the agent."""
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
if isinstance(spec.actions, specs.DiscreteArray):
num_dimensions = spec.actions.num_values
# _actor_fn = procgen_networks.build_procgen_actor_fn(num_dimensions)
else:
num_dimensions = np.prod(spec.actions.shape, dtype=int)
_actor_fn = build_actor_fn(num_dimensions)
if img_encoder_fn is not None:
img_encoder = hk.without_apply_rng(
hk.transform(img_encoder_fn, apply_rng=True))
key = jax.random.PRNGKey(seed=42)
temp_encoder_params = img_encoder.init(key, dummy_obs['state_image'])
dummy_hidden = img_encoder.apply(temp_encoder_params, dummy_obs['state_image'])
img_encoder_network = networks_lib.FeedForwardNetwork(
lambda key: img_encoder.init(key, dummy_hidden), img_encoder.apply)
dummy_policy_input = dict(
state_image=dummy_hidden,
state_dense=dummy_obs['state_dense'],)
else:
img_encoder_fn = None
dummy_policy_input = dummy_obs
img_encoder_network = None
policy = hk.without_apply_rng(hk.transform(_actor_fn, apply_rng=True))
return BCNetworks(
policy_network=networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_policy_input), policy.apply),
log_prob=lambda params, actions: params.log_prob(actions),
sample=lambda params, key: params.sample(seed=key),
sample_eval=lambda params, key: params.mode(),
img_encoder=img_encoder_network,)
|
the-stack_0_7184 | # Copyright (c) 2013 Tencent Inc.
# All rights reserved.
#
# Author: LI Yi <[email protected]>
# Created: September 27, 2013
"""
This module defines cu_library, cu_binary and cu_test rules
for cuda development.
"""
from __future__ import absolute_import
import os
from blade import build_manager
from blade import build_rules
from blade import config
from blade.blade_util import var_to_list
from blade.cc_targets import CcTarget
class CuTarget(CcTarget):
"""This class is derived from CcTarget and is the base class
of cu_library, cu_binary etc.
"""
def __init__(self,
name,
target_type,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs):
srcs = var_to_list(srcs)
deps = var_to_list(deps)
extra_cppflags = var_to_list(extra_cppflags)
extra_linkflags = var_to_list(extra_linkflags)
CcTarget.__init__(self,
name,
target_type,
srcs,
deps,
None,
warning,
defs,
incs,
[], [],
extra_cppflags,
extra_linkflags,
blade,
kwargs)
def _get_cu_flags(self):
"""Return the nvcc flags according to the BUILD file and other configs. """
nvcc_flags = []
# Warnings
if self.data.get('warning', '') == 'no':
nvcc_flags.append('-w')
# Defs
defs = self.data.get('defs', [])
nvcc_flags += [('-D' + macro) for macro in defs]
# Optimize flags
if (self.blade.get_options().profile == 'release' or
self.data.get('always_optimize')):
nvcc_flags += self._get_optimize_flags()
# Incs
incs = self._get_incs_list()
return nvcc_flags, incs
def _cu_objects_rules(self):
env_name = self._env_name()
flags_from_option, incs_list = self._get_cu_flags()
incs_string = " -I".join(incs_list)
flags_string = " ".join(flags_from_option)
objs = []
for src in self.srcs:
obj = 'obj_%s' % self._var_name_of(src)
target_path = os.path.join(
self.build_path, self.path, '%s.objs' % self.name, src)
self._write_rule(
'%s = %s.NvccObject(NVCCFLAGS="-I%s %s", target="%s" + top_env["OBJSUFFIX"]'
', source="%s")' % (obj,
env_name,
incs_string,
flags_string,
target_path,
self._target_file_path(src)))
objs.append(obj)
self._write_rule('%s = [%s]' % (self._objs_name(), ','.join(objs)))
class CuLibrary(CuTarget):
"""This class is derived from CuTarget and generates the cu_library
rules according to user options.
"""
def __init__(self,
name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs):
CuTarget.__init__(self,
name,
'cu_library',
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs)
def scons_rules(self):
"""Generate scons rules according to user options. """
self._prepare_to_generate_rule()
self._cu_objects_rules()
self._cc_library()
def cu_library(name,
srcs=[],
deps=[],
warning='yes',
defs=[],
incs=[],
extra_cppflags=[],
extra_linkflags=[],
**kwargs):
target = CuLibrary(name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
build_manager.instance,
kwargs)
build_manager.instance.register_target(target)
build_rules.register_function(cu_library)
class CuBinary(CuTarget):
"""This class is derived from CuTarget and generates the cu_binary
rules according to user options.
"""
def __init__(self,
name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs):
CuTarget.__init__(self,
name,
'cu_binary',
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs)
def _cc_binary(self):
env_name = self._env_name()
var_name = self._var_name()
(link_all_symbols_lib_list,
lib_str,
whole_link_flags) = self._get_static_deps_lib_list()
if whole_link_flags:
self._write_rule(
'%s.Append(LINKFLAGS=[%s])' % (env_name, whole_link_flags))
if self.data.get('export_dynamic'):
self._write_rule(
'%s.Append(LINKFLAGS="-rdynamic")' % env_name)
self._setup_link_flags()
self._write_rule('{0}.Replace('
'CC={0}["NVCC"], '
'CPP={0}["NVCC"], '
'CXX={0}["NVCC"], '
'LINK={0}["NVCC"])'.format(env_name))
self._write_rule('%s = %s.Program("%s", %s, %s)' % (
var_name,
env_name,
self._target_file_path(),
self._objs_name(),
lib_str))
self._write_rule('%s.Depends(%s, %s)' % (
env_name,
var_name,
self._objs_name()))
if link_all_symbols_lib_list:
self._write_rule('%s.Depends(%s, [%s])' % (
env_name, var_name, ', '.join(link_all_symbols_lib_list)))
# self._write_rule('%s.Append(LINKFLAGS=str(version_obj[0]))' % env_name)
self._write_rule('%s.Requires(%s, version_obj)' % (
env_name, var_name))
def scons_rules(self):
"""Generate scons rules according to user options. """
self._prepare_to_generate_rule()
self._cu_objects_rules()
self._cc_binary()
def cu_binary(name,
srcs=[],
deps=[],
warning='yes',
defs=[],
incs=[],
extra_cppflags=[],
extra_linkflags=[],
**kwargs):
target = CuBinary(name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
build_manager.instance,
kwargs)
build_manager.instance.register_target(target)
build_rules.register_function(cu_binary)
class CuTest(CuBinary):
"""This class is derived from CuBinary and generates the cu_test
rules according to user options.
"""
def __init__(self,
name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
testdata,
always_run,
exclusive,
blade,
kwargs):
# pylint: disable=too-many-locals
CuBinary.__init__(self,
name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs)
self.type = 'cu_test'
self.data['testdata'] = var_to_list(testdata)
self.data['always_run'] = always_run
self.data['exclusive'] = exclusive
cc_test_config = config.get_section('cc_test_config')
gtest_lib = var_to_list(cc_test_config['gtest_libs'])
gtest_main_lib = var_to_list(cc_test_config['gtest_main_libs'])
# Hardcode deps rule to thirdparty gtest main lib.
self._add_hardcode_library(gtest_lib)
self._add_hardcode_library(gtest_main_lib)
def cu_test(name,
srcs=[],
deps=[],
warning='yes',
defs=[],
incs=[],
extra_cppflags=[],
extra_linkflags=[],
testdata=[],
always_run=False,
exclusive=False,
**kwargs):
target = CuTest(name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
testdata,
always_run,
exclusive,
build_manager.instance,
kwargs)
build_manager.instance.register_target(target)
build_rules.register_function(cu_test)
|
the-stack_0_7186 | # Working test of textblob
# https://www.geeksforgeeks.org/spelling-checker-in-python/
from textblob import TextBlob
message = "Hello confsion houes"
print("entered: "+str(message))
corrected = TextBlob(message)
# prints the corrected spelling
print("corrected: "+str(corrected.correct()))
|
the-stack_0_7187 | import argparse
import os
import random
import shutil
import time
import warnings
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.nn.functional as F
from datasets import dataset
from networks import nets
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
model_names += ['resnext50_32x4d_reslt','resnet10_reslt', 'resnext101_32x4d_reslt']
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--root_path', type=str, default='data')
parser.add_argument('-dataset', type=str,
help='path to dataset')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('-a', '--arch', metavar='ARCH', default='ResNeXt152',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=None, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.2, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# ResLT
parser.add_argument('--mark', default=None, type=str, help='prefix of log file')
parser.add_argument('--beta', default=None, type=float)
parser.add_argument('--num_works', default=None, type=int)
parser.add_argument('--dropout', default=False, type=bool)
parser.add_argument('--lsm', default=0, type=float)
parser.add_argument('--warmup_epochs', default=5, type=int)
parser.add_argument('--after_1x1conv', action='store_true')
parser.add_argument('--gamma', default=0.5, type=float)
parser.add_argument('--num_classes', default=1000, type=int)
best_acc1 = 0
args = parser.parse_args()
args.root_model = f'{args.root_path}/{args.dataset}/{args.mark}'
os.makedirs(args.root_model, exist_ok=True)
def crossEntropy(softmax, logit, label, weight, num_classes):
label = F.one_hot(label, num_classes=num_classes)
target = label_smoothing(label, num_classes, delta=args.lsm)
loss = - (weight * (target * torch.log(softmax(logit)+1e-7)).sum(dim=1)).sum()
return loss
def disable_conv(model):
for module in model.modules():
if isinstance(module, nn.Conv2d):
module.weight.requires_grad=False
def label_smoothing(y_batch_tensor, num_classes, delta):
y_batch_smooth = (1 - delta - delta / (num_classes - 1)) * y_batch_tensor + delta / (num_classes - 1)
return y_batch_smooth
def main():
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = getattr(nets, args.arch)(dropout=args.dropout, after_1x1conv=args.after_1x1conv, gamma=args.gamma)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
filename = "checkpoints/ImageNet/"+args.mark+'_checkpoint.pth.tar'
if os.path.exists(filename):
args.resume = filename
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
print(args.dataset," ",type(args.dataset))
data=getattr(dataset,args.dataset)(batch_size=args.batch_size, num_works=args.num_works, root=args.data_path)
train_loader=data.train
val_loader=data.test
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
F_losses = AverageMeter('F_Loss', ':.4e')
I_losses = AverageMeter('I_Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, F_losses, I_losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
softmax = nn.Softmax(dim=1)
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
logitH, logitM, logitT = model(images)
######## ResLT
labelH=F.one_hot(target, num_classes=args.num_classes).sum(dim=1)
labelM=F.one_hot(target, num_classes=args.num_classes)[:,:825].sum(dim=1)
labelT=F.one_hot(target, num_classes=args.num_classes)[:,:220].sum(dim=1)
I_loss=(crossEntropy(softmax, logitH, target, labelH, args.num_classes) + crossEntropy(softmax, logitM, target, labelM, args.num_classes) \
+ crossEntropy(softmax, logitT, target, labelT, args.num_classes)) / (labelH.sum() + labelM.sum() + labelT.sum())
logit = (logitH + logitM + logitT)
F_loss = crossEntropy(softmax, logit, target, labelH, args.num_classes) / labelH.sum()
loss= (1-args.beta) * F_loss + args.beta * I_loss
# measure accuracy and record loss
acc1, acc5 = accuracy(logit, target, topk=(1, 5))
F_losses.update(F_loss.detach().item(), images.size(0))
I_losses.update(I_loss.detach().item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i, args)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('All_Acc@1', ':6.2f')
top5 = AverageMeter('All_Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
class_num=torch.zeros(1000).cuda()
correct=torch.zeros(1000).cuda()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
logitH, logitM, logitT = model(images)
output = logitH + logitM + logitT
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
_, predicted = output.max(1)
target_one_hot = F.one_hot(target, num_classes=args.num_classes)
predict_one_hot = F.one_hot(predicted, num_classes=args.num_classes)
class_num = class_num + target_one_hot.sum(dim=0).to(torch.float)
correct=correct + (target_one_hot + predict_one_hot==2).sum(dim=0).to(torch.float)
if i % args.print_freq == 0:
progress.display(i, args)
# TODO: this should also be done with the ProgressMeter
acc_classes = correct / class_num
head_acc = acc_classes[610:].mean()
medium_acc = acc_classes[165:610].mean()
tail_acc = acc_classes[:165].mean()
open(args.root_model+"/"+"train.log","a+").write((' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f} HAcc {head_acc:.3f} MAcc {medium_acc:.3f} TAcc {tail_acc:.3f} \n').format(top1=top1, top5=top5, head_acc=head_acc, medium_acc=medium_acc, tail_acc=tail_acc))
return top1.avg
def save_checkpoint(state, is_best, filename=args.root_model+'/checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, args.root_model+'/model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch, args):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
open(args.root_model+"/train.log","a+").write('\t'.join(entries)+"\n")
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr_min = 0
lr_max = args.lr
if epoch < args.warmup_epochs:
lr = args.lr / args.warmup_epochs * (epoch+1)
else:
lr= lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos( (epoch - args.warmup_epochs + 1) / (args.epochs - args.warmup_epochs + 1) * 3.1415926))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
the-stack_0_7188 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'sheet_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
sys.exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
sys.exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
the-stack_0_7189 | # Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Postprocessing utility functions for CLIF."""
# CLIF postprocessor for a C++ function with signature:
# bool MyFunc(input_arg1, ..., *output_arg1, *output_arg2, ..., *error)
#
# If MyFunc returns True, returns (output_arg1, output_arg2, ...)
# If MyFunc returns False, raises ValueError(error).
def ValueErrorOnFalse(ok, *output_args):
"""Raises ValueError if not ok, otherwise returns the output arguments."""
n_outputs = len(output_args)
if n_outputs < 2:
raise ValueError(
"Expected 2 or more output_args. Got: {}".format(n_outputs))
if not ok:
error = output_args[-1]
raise ValueError(error)
if n_outputs == 2:
output = output_args[0]
else:
output = output_args[0:-1]
return output
# CLIF postprocessor for a C++ function with signature:
# *result MyFactory(input_arg1, ..., *error)
#
# If result is not null, returns result.
# If result is null, raises ValueError(error).
def ValueErrorOnNull(result, error):
"""Raises ValueError(error) if result is None, otherwise returns result."""
if result is None:
raise ValueError(error)
return result
|
the-stack_0_7191 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq_mod.data.encoders import register_tokenizer
@register_tokenizer('moses')
class MosesTokenizer(object):
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--moses-source-lang', metavar='SRC',
help='source language')
parser.add_argument('--moses-target-lang', metavar='TARGET',
help='target language')
parser.add_argument('--moses-no-dash-splits', action='store_true', default=False,
help='don\'t apply dash split rules')
parser.add_argument('--moses-no-escape', action='store_true', default=False,
help='don\'t perform HTML escaping on apostrophy, quotes, etc.')
# fmt: on
def __init__(self, args):
self.args = args
if getattr(args, 'moses_source_lang', None) is None:
args.moses_source_lang = getattr(args, 'source_lang', 'en')
if getattr(args, 'moses_target_lang', None) is None:
args.moses_target_lang = getattr(args, 'target_lang', 'en')
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(args.moses_source_lang)
self.detok = MosesDetokenizer(args.moses_target_lang)
except ImportError:
raise ImportError('Please install Moses tokenizer with: pip install sacremoses')
def encode(self, x: str) -> str:
return self.tok.tokenize(
x,
aggressive_dash_splits=(not self.args.moses_no_dash_splits),
return_str=True,
escape=(not self.args.moses_no_escape),
)
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split())
|
the-stack_0_7193 | """
Default exit plugin
"""
import shutil
import logging
import os
class ExitPlugin(object):
""" Removes temporary files and exits the program """
def __init__(self, skye):
self.skye = skye
def close_program(self):
""" Closes the program """
self.skye.speak("Goodbye")
logging.debug("Removing temporary folders")
if os.path.exists("temp"):
shutil.rmtree("temp", ignore_errors=True)
logging.info("Exiting")
quit()
def setup(skye):
"""Called when the plugin is set up. Used to register commands and other
initializations
Arguments:
skye {Skye} -- The singleton Skye instance
"""
exit_plugin = ExitPlugin(skye)
skye.register_command(("exit", "leave", "quit", "stop"),
exit_plugin.close_program)
|
the-stack_0_7194 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from seq2seq.models.decoder import Classifier
class Stage3(torch.nn.Module):
def __init__(self):
super(Stage3, self).__init__()
self.layer5 = torch.nn.LSTM(2048, 1024)
self.layer8 = Classifier(1024, 32320)
def forward(self, input1, input3, input0):
input2 = [None]
out0 = input0.clone()
out1 = input1.clone()
out2 = input2[0]
out3 = input3.clone()
out4 = torch.cat([out0, out1], 2)
out5 = self.layer5(out4, out2)
out6 = out5[0]
out6 = out6 + out3
out8 = self.layer8(out6)
return out8
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
|
the-stack_0_7196 | #chainer good bc of ACER
#https://github.com/chainer/chainerrl
import numpy as np
import gym
import h4rm0ny
import chainer
import chainer.functions as F
import chainer.links as L
import chainerrl
from chainerrl.action_value import DiscreteActionValue
from chainerrl import links
from chainerrl.agents import acer
from chainerrl.distribution import SoftmaxDistribution
from chainerrl import misc
from chainerrl.optimizers import rmsprop_async
from chainerrl import policies
from chainerrl import q_functions
from chainerrl.replay_buffer import EpisodicReplayBuffer
from chainerrl import v_functions
from chainerrl.initializers import LeCunNormal
from tqdm import tqdm
#creates an ACER agent
def create_acer_agent(env):
#our observation space dimension of malware
obs_dim = env.observation_space.shape[0]
#the list of actions that we can perform on the malware
n_actions = env.action_space.n
#our acer network
#consists of pi (our policy) and our q (our q function)
model = acer.ACERSeparateModel(
pi=links.Sequence(
L.Linear( obs_dim, 1024, initialW=LeCunNormal(1e-3)),
F.relu,
L.Linear( 1024, 512, initialW=LeCunNormal(1e-3)),
F.relu,
L.Linear( 512, n_actions, initialW=LeCunNormal(1e-3)),
SoftmaxDistribution),
q=links.Sequence(
L.Linear( obs_dim, 1024, initialW=LeCunNormal(1e-3)),
F.relu,
L.Linear( 1024, 512, initialW=LeCunNormal(1e-3)),
F.relu,
L.Linear( 512, n_actions, initialW=LeCunNormal(1e-3)),
DiscreteActionValue),
)
#optimizer for the acer
opt = rmsprop_async.RMSpropAsync( lr=7e-4, eps=1e-2, alpha=0.99)
opt.setup( model )
#hook to the chainer model
opt.add_hook( chainer.optimizer.GradientClipping(40) )
replay_buffer = EpisodicReplayBuffer( 128 )
#the agent itself, params from original file
agent = acer.ACER( model, opt,
gamma=0.95, # reward discount factor
t_max=32, # update the model after this many local steps
replay_buffer=replay_buffer,
n_times_replay=4, # number of times experience replay is repeated for each update
replay_start_size=64, # don't start replay unless we have this many experiences in the buffer
disable_online_update=True, # rely only on experience buffer
use_trust_region=True, # enable trust region policy optimiztion
trust_region_delta=0.1, # a parameter for TRPO
truncation_threshold=5.0, # truncate large importance weights
beta=1e-2, # entropy regularization parameter
phi= lambda obs: obs.astype(np.float32, copy=False) )
return agent
class QFunction(chainer.Chain):
def __init__(self, obs_size, n_actions, n_hidden_channels=[1024,256]):
super(QFunction,self).__init__()
net = []
inpdim = obs_size
for i,n_hid in enumerate(n_hidden_channels):
net += [ ('l{}'.format(i), L.Linear( inpdim, n_hid ) ) ]
net += [ ('norm{}'.format(i), L.BatchNormalization( n_hid ) ) ]
net += [ ('_act{}'.format(i), F.relu ) ]
inpdim = n_hid
net += [('output', L.Linear( inpdim, n_actions) )]
with self.init_scope():
for n in net:
if not n[0].startswith('_'):
setattr(self, n[0], n[1])
self.forward = net
def __call__(self, x, test=False):
"""
Args:
x (ndarray or chainer.Variable): An observation
test (bool): a flag indicating whether it is in test mode
"""
for n, f in self.forward:
if not n.startswith('_'):
x = getattr(self, n)(x)
else:
x = f(x)
return chainerrl.action_value.DiscreteActionValue(x)
def create_ddqn_agent(env):
obs_dim = env.observation_space.shape[0]
n_actions = env.action_space.n
q_func = QFunction(obs_dim, n_actions)
optimizer = chainer.optimizers.Adam(eps=1e-2)
optimizer.setup(q_func)
# Set the discount factor that discounts future rewards.
gamma = 0.95
# Use epsilon-greedy for exploration
explorer = chainerrl.explorers.Boltzmann()
# DQN uses Experience Replay.
# Specify a replay buffer and its capacity.
replay_buffer = chainerrl.replay_buffer.ReplayBuffer(capacity=1000)
# Chainer only accepts numpy.float32 by default, make sure
# a converter as a feature extractor function phi.
phi = lambda x: x.astype(np.float32, copy=False)
# Now create an agent that will interact with the environment.
# DQN agent as described in Mnih (2013) and Mnih (2015).
# http://arxiv.org/pdf/1312.5602.pdf
# http://arxiv.org/abs/1509.06461
agent = chainerrl.agents.DoubleDQN(
q_func, optimizer, replay_buffer, gamma, explorer,
replay_start_size=32, update_interval=1,
target_update_interval=100, phi=phi)
return agent
import os
def get_latest_model_from(basedir):
dirs = os.listdir(basedir)
lastmodel = -1
for d in dirs:
try:
if int(d) > lastmodel:
lastmodel = int(d)
except ValueError:
continue
assert lastmodel >= 0, "No saved models!"
return os.path.join(basedir, str(lastmodel))
from h4rm0ny.envs.utils import interface, malconv
from h4rm0ny.envs.controls import modifier
import random
ACTION_LOOKUP = {
i: act for i, act in enumerate(modifier.ACTION_TABLE.keys())
}
def get_latest_model_from(basedir):
dirs = os.listdir(basedir)
lastmodel = -1
for d in dirs:
try:
if int(d) > lastmodel:
lastmodel = int(d)
except ValueError:
continue
assert lastmodel >= 0, "No saved models!"
return os.path.join(basedir, str(lastmodel))
def gen_dataset(train_path, test_path, agent=None):
if not os.path.exists(test_path):
os.makedirs(test_path)
if not os.path.exists(train_path):
os.makedirs(train_path)
mc = malconv.MalConv()
sha256_train = interface.get_available_sha256('/home/jovyan/Research/malware_rl/rl_train_exp.csv')[:700]
sha256_test = interface.get_available_sha256('/home/jovyan/Research/malware_rl/rl_test_exp.csv')[:300]
print(sha256_train)
def __gen(sha, ouput_path):
for s in tqdm(sha):
if not agent:
action = random.choice(ACTION_LOOKUP)
else:
mal = np.array(mc.extract(interface.fetch_file(s)))
action = ACTION_LOOKUP[agent.act(mal)]
bytez = interface.fetch_file(s)
bytez = modifier.modify_sample(bytez, action)
evade_path = os.path.join(ouput_path, os.path.basename(s))
with open(evade_path, 'wb') as out:
out.write(bytez)
__gen(sha256_train, train_path, )
__gen(sha256_test, test_path, )
#training the ACER agent
def train_agent(rounds=10000, use_score=False, name='result_dir', test_set = "/home/jovyan/Research/malware_rl/sets/test_set", train_set = "/home/jovyan/Research/malware_rl/sets/train_set", create_agent=create_acer_agent, gym_env = "malconv-train-v0", train=True):
if(train):
print("inside train")
if(name == "random"):
gen_dataset(train_set, test_set, agent=None)
return 1
#we are training on the malconv gym
env = gym.make( gym_env )
#setting random seeds so we can reproduce results
np.random.seed(41)
env.seed(41)
#creating our agent
agent = create_agent(env)
#run through training, evaluate and give reward based on outcome
chainerrl.experiments.train_agent_with_evaluation(
agent, env,
steps=rounds, # Train the agent for this many rounds steps
train_max_episode_len=600, # Maximum length of each episodes
eval_interval=10, # Evaluate the agent after every step
eval_n_episodes = 10, #eval every episode
eval_n_steps = None,
save_best_so_far_agent = False,
outdir=name) # Save everything to 'result' directory
gen_dataset(train_set, test_set, agent)
else:
print("not in train")
env = gym.make(gym_env)
agent = create_acer_agent(env)
# pull latest stored model
#last_model_dir = get_latest_model_from("/home/jovyan/Research/malware_rl/" +name + "_1/500_finish")
agent.load( "/home/jovyan/Research/malware_rl/" +name + "_1/500_finish" )
gen_dataset(train_set, test_set, agent)
#training the ACER agent
def test(rounds=10, use_score=False, name='result_dir', create_agent=create_acer_agent, gym_env = "malconv-train-v0"):
#we are training on the malconv gym
env = gym.make( gym_env )
#setting random seeds so we can reproduce results
np.random.seed(42)
env.seed(42)
#creating our agent
agent = create_agent(env)
# pull latest stored model
last_model_dir = get_latest_model_from('models/')
agent.load( last_model_dir )
chainerrl.experiments.collect_demonstrations(agent,env,steps = rounds,episodes = 1,outdir = name)
# Save everything to 'result' directory
if __name__ == '__main__':
print("We go")
agent_score = train_agent(rounds=50000, use_score=True, name='models/', create_agent=create_acer_agent) # allow agent to see scores
# models are automatically saved
print("done score")
#use this model if you want to see if the RL can learn against a black box model
agent_blackbox = train_agent( rounds=50000, use_score=False, name='models/acer_chainer', create_agent=create_acer_agent) # black blox
# models are automatically saved
|
the-stack_0_7198 |
import logging, itertools, os
from datetime import date
import astropy.io.ascii as at
import matplotlib.pyplot as plt
from k2spin.config import *
from k2spin import plot
today = date.today().isoformat()
def plot_list(results_list):
"""
"""
res = at.read(base_path+"tables/"+results_list)
f = open("/home/stephanie/my_papers/hyadesk2/figure_sets/f8.tbl","w")
count = 1
for i, epic in enumerate(res["EPIC"]):
logging.info(epic)
outfilename = "ktwo{0}-c04_lc_analysis.png".format(epic)
plot.paper_lcs(epic,res[i])
plt.savefig(base_path+"plot_outputs/"+outfilename,bbox_inches="tight")
if ((epic==2107361051) or (epic==2107361050) or
(epic==210963067) or (epic==2109630670) or
(epic==210675409)):
# Use lc from smaller centroiding box for 210735105
# but daofind lc for 210963067
# 210675409 is too bright but still in my list somehow
# note I never ran 211037886
continue
elif epic==2109630671:
save_epic = 210963067
else:
save_epic = epic
figsetname = "f8_{0}.eps".format(count)
f.write("{0} & EPIC {1}\n".format(figsetname,save_epic))
plt.savefig("/home/stephanie/my_papers/hyadesk2/figure_sets/"+figsetname,bbox_inches="tight")
plt.close("all")
count += 1
f.close()
if __name__=="__main__":
plot_list("c4_lcs_aps_results_2015-12-18_comments.csv")
"""
lc_file = "ktwo210408563-c04.csv"
epic = "210408563"
ap = 5
res = at.read(base_path+"tables/c4_lcs_aps_results_2015-12-18.csv")
plot.paper_lcs(epic,res[4])
plt.savefig("/home/stephanie/my_papers/hyadesk2/sample_lc.eps",
bbox_inches="tight")
plt.savefig("/home/stephanie/Dropbox/plots_for_sharing/sample_lc.png",
bbox_inches="tight")
"""
|
the-stack_0_7200 | # -*- coding: utf-8 -*-
import pickle
from os import path
import jieba
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
comment = []
with open('quan.txt', mode = 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
arr = line.split(',')
if len(arr) == 5:
comment.append(arr[4].replace('\n',''))
# comment_after_split = jieba.cut(str(comment),cut_all=False)
# wl_space_split = ''.join(comment_after_split)
# wordcloud=WordCloud(font_path="fyuan.ttf",background_color="black",width=600,height=300,max_words=50).generate(wl_space_split)
# #3.生成图片
# image=wordcloud.to_image()
# #4.显示图片
# image.show()
comment_after_split = jieba.cut(str(comment),cut_all=False)
wl_space_split = ' '.join(comment_after_split)
# # print(wl_space_split)
backgroud_Image = plt.imread('IMG_3246.JPG')
# 设置屏蔽词
stopwords = STOPWORDS.copy()
stopwords.add('电影')
stopwords.add('一部')
stopwords.add('里面')
stopwords.add('讲')
stopwords.add('是')
stopwords.add('有点')
stopwords.add('还是')
stopwords.add('这部')
stopwords.add('真的')
stopwords.add('也许')
stopwords.add('可能')
stopwords.add('之后')
# 设置词云的字体,背景色,最大词大小,背景图
wc = WordCloud(width=1024, height=768,
background_color='white',
mask=backgroud_Image,
font_path='fyuan.ttf',
stopwords=stopwords,
max_font_size = 400,
random_state = 50
)
wc.generate_from_text(wl_space_split)
img_colors = ImageColorGenerator(backgroud_Image)
wc.recolor(color_func=img_colors)
plt.imshow(wc)
plt.axis('off')
plt.show()
wc.to_file('./image.jpg')
|
the-stack_0_7201 | from setuptools import find_packages, setup
def readme():
with open("README.md") as f:
return f.read()
# read version file
exec(open("alibi_detect/version.py").read())
extras_require = {"examples": ["seaborn>=0.9.0", "tqdm>=4.28.1", "nlp>=0.3.0"],
"prophet": ["fbprophet>=0.5, <0.7", "holidays==0.9.11", "pystan<3.0"],
"torch": ["torch>=1.0"]}
setup(
name="alibi-detect",
author="Seldon Technologies Ltd.",
author_email="[email protected]",
version=__version__, # type: ignore # noqa F821
description="Algorithms for outlier detection, concept drift and metrics.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/SeldonIO/alibi-detect",
license="Apache 2.0",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.6",
# lower bounds based on Debian Stable versions where available
install_requires=[
"matplotlib>=3.0.0, <4.0.0",
"numpy>=1.16.2, <2.0.0",
"pandas>=0.23.3, <2.0.0",
"Pillow>=5.4.1, <9.0.0",
"opencv-python>=3.2.0, <5.0.0",
"scipy>=1.3.0, <2.0.0",
'scikit-image>=0.14.2, !=0.17.1, <0.19', # https://github.com/SeldonIO/alibi/issues/215
"scikit-learn>=0.20.2, <0.25.0",
"tensorflow>=2.0.0, <2.5.0",
"tensorflow_probability>=0.8.0, <0.13.0",
"transformers>=2.10.0, <5.0.0"
],
extras_require=extras_require,
test_suite="tests",
zip_safe=False,
)
|
the-stack_0_7202 | from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import os
prototxtPath = os.path.sep.join(["Res10Face_Detector", "deploy.prototxt"])
weightsPath = os.path.sep.join(["Res10Face_Detector",
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
model = load_model("Models/mask_detector.model")
def classify(image):
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
(104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
count= 0
labels = []
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
count+=1
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
(mask, withoutMask) = model.predict(face)[0]
label = "Mask" if mask > withoutMask else "No Mask"
labels.append(label)
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
cv2.putText(image, label, (startX, startY - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
return (count, labels, image) |
the-stack_0_7203 | import sys
import numpy as np
def coadd_cameras(flux_cam, wave_cam, ivar_cam, mask_cam=None):
"""Adds spectra from the three cameras as long as they have the same number of wavelength bins.
This is not a replacement for desispec.coaddition.coadd_cameras,
but a simpler (versatile and faster) implementation which uses only numpy.
This also assumes the input spectra grid are already aligned
(i.e. same wavelength grid in the overlapping regions),
This is likely the case if the spectra are from the official data releases.
Parameters
----------
flux_cam : dict
Dictionary containing the flux values from the three cameras
wave_cam : dict
Dictionary containing the wavelength values from the three cameras
ivar_cam : dict
Dictionary containing the inverse variance values from the three cameras
mask_cam : dict, optional
Dictionary containing the mask values from the three cameras
Returns
-------
Tuple
returns the combined flux, wavelength and inverse variance grids.
"""
sbands = np.array(["b", "r", "z"]) # bands sorted by inc. wavelength
# create wavelength array
wave = None
tolerance = 0.0001 # A , tolerance
shifts = {}
for b in sbands:
wave_camera = np.atleast_2d(wave_cam[b].copy())
if wave is None:
wave = wave_camera
else:
shifts[b] = np.sum(
np.all((wave + tolerance) < wave_camera[:, 0][:, None], axis=0)
)
wave = np.append(
wave,
wave_camera[
:, np.all(wave_camera > (wave[:, -1][:, None] + tolerance), axis=0)
],
axis=1,
)
nwave = wave.shape[1]
blue = sbands[0]
ntarget = len(flux_cam[blue])
flux = None
ivar = None
mask = None
for b in sbands:
flux_camera = np.atleast_2d(flux_cam[b].copy())
ivar_camera = np.atleast_2d(ivar_cam[b].copy())
ivar_camera[ivar_camera <= 0] = 0
if mask_cam is not None:
mask_camera = np.atleast_2d(mask_cam[b].astype(bool))
ivar_camera[mask_camera] = 0
if flux is None:
flux = np.zeros((ntarget, nwave), dtype=flux_cam[blue].dtype)
flux[:, : flux_camera.shape[1]] += flux_camera * ivar_camera
ivar = np.zeros((ntarget, nwave), dtype=flux_cam[blue].dtype)
ivar[:, : ivar_camera.shape[1]] += ivar_camera
if mask is not None:
mask = np.ones((ntarget, nwave), dtype=mask_cam[blue].dtype)
mask[:, : mask_camera.shape[1]] &= mask_camera
else:
flux[:, shifts[b] : (shifts[b] + flux_camera.shape[1])] += (
flux_camera * ivar_camera
)
ivar[:, shifts[b] : (shifts[b] + ivar_camera.shape[1])] += ivar_camera
if mask is not None:
mask[:, shifts[b] : (shifts[b] + mask_camera.shape[1])] &= mask_camera
flux = flux / ivar
flux[~np.isfinite(flux)] = 0
ivar[~np.isfinite(ivar)] = 0
if wave_cam[blue].ndim == 1:
wave = np.squeeze(wave)
if mask_cam is not None:
return flux, wave, ivar, mask
else:
return flux, wave, ivar |
the-stack_0_7204 | #!/usr/bin/env python3
"""Make rhyming words"""
import argparse
import re
import string
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Make rhyming "words"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('word', metavar='str', help='A word to rhyme')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
prefixes = list('bcdfghjklmnpqrstvwxyz') + (
'bl br ch cl cr dr fl fr gl gr pl pr sc '
'sh sk sl sm sn sp st sw th tr tw thw wh wr '
'sch scr shr sph spl spr squ str thr').split()
start, rest = stemmer(args.word)
if rest:
print('\n'.join(sorted([p + rest for p in prefixes if p != start])))
else:
print(f'Cannot rhyme "{args.word}"')
# --------------------------------------------------
def stemmer(word):
"""Return leading consonants (if any), and 'stem' of word"""
vowels = 'aeiou'
consonants = ''.join(
[c for c in string.ascii_lowercase if c not in vowels])
pattern = (
'([' + consonants + ']+)?' # capture one or more, optional
'(' # start capture
'[' + vowels + ']' # at least one vowel
'.*' # zero or more of anything else
')?') # end capture, optional group
match = re.match(pattern, word.lower())
return (match.group(1) or '', match.group(2) or '') if match else ('', '')
# --------------------------------------------------
def test_stemmer():
"""test the stemmer"""
assert stemmer('') == ('', '')
assert stemmer('cake') == ('c', 'ake')
assert stemmer('chair') == ('ch', 'air')
assert stemmer('APPLE') == ('', 'apple')
assert stemmer('RDNZL') == ('rdnzl', '')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
the-stack_0_7205 | from sys import argv
from Bio import SeqIO, Seq, AlignIO
import pandas as pd
# user input:
aligned_fasta_path = argv[1]
outfile_path = argv[2]
regions_table_path = argv[3] # tables of regions of the genome, to determine translation reading frame in translation.
excel_mutations_table_path = argv[4] # TODO: pipeline - add as argument
def highlight_row(row):
"""
Highlight the mutations cells in excel, row by row.
:param row: row to return it's colors
:return: colors list matching row indices.
"""
colors_list = [""] * 7 + ["background-color: silver"] * 2 # color of the fixed part of the table
# (the mutation table part, first 8 columns)
mut = row["mut"]
ref = row["REF"]
for samp in row[9:]: # now color all other cells (each column belongs to a different sample)
if samp != ref: # highlight cell if it has a mutation.
if samp == mut: # highlight only if the mutation is matching the mutation in table
color = "background-color: yellow"
else: # sample == 'X' -> do not color.
color = ''
else:
color = ''
colors_list.append(color)
return colors_list
codon_map = {
"TTT": "F", "TTC": "F", "TTA": "L", "TTG": "L",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S",
"TAT": "Y", "TAC": "Y", "TAA": "*", "TAG": "*",
"TGT": "C", "TGC": "C", "TGA": "*", "TGG": "W",
"CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAT": "H", "CAC": "H", "CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R",
"ATT": "I", "ATC": "I", "ATA": "I", "ATG": "M",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"AAT": "N", "AAC": "N", "AAA": "K", "AAG": "K",
"AGT": "S", "AGC": "S", "AGA": "R", "AGG": "R",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GAT": "D", "GAC": "D", "GAA": "E", "GAG": "E",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G"
}
def full_codon_gaps(sequence, start, end, gap='-'):
"""
avoid partial gaps in codon and convert to whole gaps codon
exmple: from A-- to ---
:param sequence: Seq object (Biopython)
:param start: start of reading frame
:param end: end of reading frame
:param gap: gap character. default: '-'
:return: new sequence, with full codons
"""
old_seq = str(sequence)
new_seq = old_seq[:start]
for i in range(start-1, end, 3):
codon = old_seq[i: i+3]
if '-' in codon:
codon = '---'
new_seq += codon
new_seq += old_seq[end:]
return Seq.Seq(new_seq)
def translate(sequence, start, end, codon_table):
"""
translate nucleotides sequence in given region to amino acid sequence according to codons in region start->end
:param sequence: nucleotides sequence as str #
:param start: position of first nucleotide
:param end: position of last nucleotide
:param codon_table: dictionary of codons as keys and AA name as values, for translation.
:return: translated sequence (aa seq)
"""
tranlsated = []
for i in range(start, end, 3):
codon = sequence[i:i+3]
if codon in codon_table:
aa = codon_table[codon] # get the codon's matching amino acid by codon table dictionary
else:
aa = 'X' # ignore frameshifts
tranlsated.append(aa)
return tranlsated
# 1. load sequences and tables
regionsTable = pd.read_csv(regions_table_path)
multifasta = SeqIO.to_dict(SeqIO.parse(aligned_fasta_path, 'fasta'))
multifasta.pop('NC_045512.2', None) # remove refseq sequence from alignment file if exists.
multifasta.pop('REF_NC_045512.2', None)
mutTable_excel = pd.read_excel(excel_mutations_table_path, sheet_name=None, engine='openpyxl')
for name in mutTable_excel:
mutTable_excel[name]['lineage'] = name # add a lineage column to all variant's tables
mutTable = pd.concat(mutTable_excel.values(), ignore_index=True)
# select only part of the columns:
mutTable = mutTable[['Position', 'Reference', 'Mutation', 'protein',
'variant', 'Mutation type', 'lineage', 'annotation']]
# compress identical mutations into one line and concat lineage names in the lineage column:
# mutTable = mutTable.groupby( # to create compressed table:
# ['Position', 'Reference', 'Mutation', 'protein', 'variant', 'Mutation type', 'annotation'], as_index=False).agg(
# {'lineage': ';'.join}
# )
# 2. keep only non-synonymous mutations
# comparing in lower case to avoid mistakes such as SNP_Stop != SNP_stop. to catch all cases.
mutTable = mutTable[(mutTable['Mutation type'].str.lower() == 'snp') | (mutTable['Mutation type'].str.lower() == 'snp_stop')]
finalTable = mutTable
# 3. iterate over mutations and create final table.
for sample, record in multifasta.items():
# each sample found in fasta will create a column of final table next to mutations info.
seq = record.seq # 1 fasta sequence as string
sample_muts = [] # will aggregate translated value of each mutation to this list that will be added as column.
for mut in mutTable.iterrows():
pos = mut[1][0]
gene = mut[1][3]
aa = mut[1][4]
aa_number = int(aa[1:-1]) # strip letters of both sides (snp mutations example: Q57H) letter-number-letter
aa_from = aa[0] # original AA
aa_to = aa[-1] # mutated AA
# get start and end regions sequence location from regions table.
region_start = int(regionsTable[regionsTable.id == gene].start.values[0])
region_end = int(regionsTable[regionsTable.id == gene].end.values[0])
# translate region (with translate() function in the top of the page)
region_translated = translate(str(seq), region_start-1, region_end, codon_map)
alt = region_translated[aa_number-1] # get the specific aa by its number (in its name)
sample_muts.append(alt) # add to mutations list of the specific mutation to create a column to the final table
finalTable[sample] = sample_muts # add the column of the sample.
varcol = finalTable.apply(lambda row: row[8:].unique(), axis=1) # add a 'var' column to list unique values of row
finalTable.insert(6, 'var', varcol) # insert var column
finalTable = finalTable.sort_values(by=["lineage", "protein"], ascending=[True, False]) # sort by lineage and then gene
finalTable = finalTable.rename(columns={'Position': 'nuc pos', 'Mutation type': 'type', 'protein': 'gene',
'variant': 'name', 'Reference': 'REF', 'Mutation': 'mut'}) # rename columns (old: new)
sorted_cols = ['nuc pos', 'type', 'gene', 'var', 'name', 'lineage', 'annotation', 'REF', 'mut'] # re-order columns
finalTable = finalTable[sorted_cols + [col for col in finalTable.columns if col not in sorted_cols]] # re-order columns
# write to file
# add highlights with designated function in the top of the page
finalTable.style.apply(lambda row: highlight_row(row), axis=1).to_excel(outfile_path, index=False) # add highlights
|
the-stack_0_7206 | # -*- coding: utf-8 -*-
import pandas as pd
import os
file_path = os.path.dirname(os.path.realpath(__file__))
# File uploads - Extended Data Figure 5
other = pd.read_excel(file_path + "/../../data/other_category.xlsx")
# Plot colors
c = ['#725843', '#9f7f65', '#7c7b78', '#bbbbbb', '#90b493']
# Plotting
ax = other.plot(x='Year', kind='area', color = c, legend='reverse', xlim=(1900, 2014),ylim=(0, 22), xticks=[1910, 1930, 1950, 1970, 1990, 2010], yticks=[0, 4, 8, 12, 16, 20], lw=0)
ax.set_xticklabels([1910, 1930, 1950, 1970, 1990, 2010], rotation=0, fontsize=6)
ax.set_yticklabels([0, 4, 8, 12, 16, 20], rotation=0, fontsize=6)
ax.set_xlabel('year', fontsize=7)
ax.set_ylabel('dry weight (Gigatonnes)', fontsize=7)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), prop={'size': 6}, bbox_to_anchor=(0, 1.680/1.750), loc="upper left",frameon=False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.figure.set_figheight(2.3)
ax.figure.set_figwidth(3.5)
file_out_name = file_path + '/../output/extended_data_figure5'
ax.figure.savefig(file_out_name+'.png', bbox_inches='tight', pad_inches = 0.05, dpi = 600)
ax.figure.savefig(file_out_name+'.eps', bbox_inches='tight', pad_inches = 0.05)
ax.figure.savefig(file_out_name+'.svg', bbox_inches='tight', pad_inches = 0.05) |
the-stack_0_7208 | import math
'''
isReceiving returns true if a transaction was a return
Integer transactionAmount
'''
def isReceiving(transactionAmount):
if transactionAmount == 0:
return None # should not happen
else:
return transactionAmount > 0
'''
isPaying returns true is a transaction was a payment
Integer transactionAmount
'''
def isPaying(transactionAmount):
if transactionAmount == 0:
return None # should not happen
return transactionAmount < 0
'''
getAbsoluteAmount returns the absolute value of a relative transaction amount
Integer transactionAmount
'''
def getAbsoluteAmount(transactionAmount):
return math.fabs(transactionAmount)
'''
checks if a String represents a Fractional or Integral
'''
def isNumber(str):
if (str[0] == '.' or str[len(str) - 1] == '.'):
return False
foundFloatingPoint = False
for digit in str:
if not digit.isdigit():
if (digit == '.'):
if (foundFloatingPoint):
return False
else:
foundFloatingPoint = True
else:
return False
return True
'''
accepted characters: A-z (case-insensitive), 0-9 and underscores.
length: 5-32 characters.
'''
def isValidTelegramUsername(string):
length = len(string)
validLength = length >= 5 and length <= 32
if validLength:
for char in string:
if not(char.isalpha() or char.isdigit() or char == '_'):
return False
return True
else:
return False
'''
tests
'''
def main():
print(isPaying(-1), isPaying(1), isReceiving(-1), isReceiving(1), getAbsoluteAmount(-1), getAbsoluteAmount(-1))
if __name__ == '__main__':
main()
|
the-stack_0_7209 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from h._compat import xrange
from unittest.mock import Mock
import datetime
import pytest
from h.activity import bucketing
from tests.common import factories
UTCNOW = datetime.datetime(year=1970, month=2, day=21, hour=19, minute=30)
FIVE_MINS_AGO = UTCNOW - datetime.timedelta(minutes=5)
YESTERDAY = UTCNOW - datetime.timedelta(days=1)
THIRD_MARCH_1968 = datetime.datetime(year=1968, month=3, day=3)
FIFTH_NOVEMBER_1969 = datetime.datetime(year=1969, month=11, day=5)
class timeframe_with: # noqa: N801
def __init__(self, label, document_buckets):
self.label = label
self.document_buckets = document_buckets
def __eq__(self, timeframe):
return (
self.label == timeframe.label
and self.document_buckets == timeframe.document_buckets
)
def __repr__(self):
return '{class_} "{label}" with {n} document buckets'.format(
class_=self.__class__, label=self.label, n=len(self.document_buckets)
)
@pytest.mark.usefixtures("factories")
class TestDocumentBucket:
def test_init_sets_the_document_title(self, db_session, document):
title_meta = factories.DocumentMeta(
type="title", value=["The Document Title"], document=document
)
document.title = "The Document Title"
db_session.add(title_meta)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.title == "The Document Title"
def test_init_uses_the_document_web_uri(self, db_session, document):
document.web_uri = "http://example.com"
bucket = bucketing.DocumentBucket(document)
assert bucket.uri == "http://example.com"
def test_init_sets_None_uri_when_no_http_or_https_can_be_found(
self, db_session, document
):
document.web_uri = None
bucket = bucketing.DocumentBucket(document)
assert bucket.uri is None
def test_init_sets_the_domain_from_the_extracted_uri(self, db_session, document):
document.web_uri = "https://www.example.com/foobar.html"
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "www.example.com"
def test_init_sets_domain_to_local_file_when_no_uri_is_set(
self, db_session, document
):
docuri_pdf = factories.DocumentURI(
uri="urn:x-pdf:fingerprint", document=document
)
db_session.add(docuri_pdf)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "Local file"
def test_annotations_count_returns_count_of_annotations(self, db_session, document):
bucket = bucketing.DocumentBucket(document)
for _ in xrange(7):
annotation = factories.Annotation()
bucket.append(annotation)
assert bucket.annotations_count == 7
def test_append_appends_the_annotation(self, document):
bucket = bucketing.DocumentBucket(document)
annotations = []
for _ in xrange(7):
annotation = factories.Annotation()
annotations.append(annotation)
bucket.append(annotation)
assert bucket.annotations == annotations
def test_append_adds_unique_annotation_tag_to_bucket(self, document):
ann_1 = factories.Annotation(tags=["foo", "bar"])
ann_2 = factories.Annotation(tags=["foo", "baz"])
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
assert bucket.tags == set(["foo", "bar", "baz"])
def test_append_adds_unique_annotation_user_to_bucket(self, document):
ann_1 = factories.Annotation(userid="luke")
ann_2 = factories.Annotation(userid="alice")
ann_3 = factories.Annotation(userid="luke")
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
bucket.append(ann_3)
assert bucket.users == set(["luke", "alice"])
def test_eq(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
for _ in xrange(5):
annotation = factories.Annotation()
bucket_1.append(annotation)
bucket_2.append(annotation)
assert bucket_1 == bucket_2
def test_eq_annotations_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.annotations = [1, 2, 3]
bucket_2.annotations = [2, 3, 4]
assert not bucket_1 == bucket_2
def test_eq_tags_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.tags.update(["foo", "bar"])
bucket_2.tags.update(["foo", "baz"])
assert not bucket_1 == bucket_2
def test_eq_users_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.users.update(["alice", "luke"])
bucket_2.users.update(["luke", "paula"])
assert not bucket_1 == bucket_2
def test_eq_uri_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.uri = "http://example.com"
bucket_2.uri = "http://example.org"
assert not bucket_1 == bucket_2
def test_eq_domain_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.domain = "example.com"
bucket_2.domain = "example.org"
assert not bucket_1 == bucket_2
def test_eq_title_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.title = "First Title"
bucket_2.title = "Second Title"
assert not bucket_1 == bucket_2
def test_incontext_link_returns_link_to_first_annotation(self, document, patch):
incontext_link = patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
ann = factories.Annotation()
bucket.append(ann)
request = Mock()
assert bucket.incontext_link(request) == incontext_link.return_value
def test_incontext_link_returns_none_if_bucket_empty(self, document, patch):
patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
request = Mock()
assert bucket.incontext_link(request) is None
@pytest.fixture
def document(self, db_session):
document = factories.Document()
db_session.add(document)
db_session.flush()
return document
@pytest.mark.usefixtures("factories", "utcnow")
class TestBucket:
def test_no_annotations(self):
assert bucketing.bucket([]) == []
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_one_annotation(self, annotation_datetime, timeframe_label):
annotation = factories.Annotation(
document=factories.Document(), updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation.document: bucketing.DocumentBucket(
annotation.document, [annotation]
)
},
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_multiple_annotations_of_one_document_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
results = [
factories.Annotation(
target_uri="https://example.com", updated=annotation_datetime
)
for _ in range(3)
]
timeframes = bucketing.bucket(results)
document = results[0].document
assert timeframes == [
timeframe_with(
timeframe_label, {document: bucketing.DocumentBucket(document, results)}
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(YESTERDAY, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_annotations_of_multiple_documents_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
annotation_1 = factories.Annotation(
target_uri="http://example1.com", updated=annotation_datetime
)
annotation_2 = factories.Annotation(
target_uri="http://example2.com", updated=annotation_datetime
)
annotation_3 = factories.Annotation(
target_uri="http://example3.com", updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation_1, annotation_2, annotation_3])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation_1.document: bucketing.DocumentBucket(
annotation_1.document, [annotation_1]
),
annotation_2.document: bucketing.DocumentBucket(
annotation_2.document, [annotation_2]
),
annotation_3.document: bucketing.DocumentBucket(
annotation_3.document, [annotation_3]
),
},
)
]
def test_annotations_of_the_same_document_in_different_timeframes(self):
results = [
factories.Annotation(),
factories.Annotation(updated=FIFTH_NOVEMBER_1969),
factories.Annotation(updated=THIRD_MARCH_1968),
]
document = factories.Document()
for annotation in results:
annotation.document = document
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(document, [results[2]])
assert timeframes == [
timeframe_with("Last 7 days", {document: expected_bucket_1}),
timeframe_with("Nov 1969", {document: expected_bucket_2}),
timeframe_with("Mar 1968", {document: expected_bucket_3}),
]
def test_recent_and_older_annotations_together(self):
results = [
factories.Annotation(target_uri="http://example1.com"),
factories.Annotation(target_uri="http://example2.com"),
factories.Annotation(target_uri="http://example3.com"),
factories.Annotation(
target_uri="http://example4.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example5.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example6.com", updated=THIRD_MARCH_1968
),
]
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(results[0].document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(results[1].document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(results[2].document, [results[2]])
expected_bucket_4 = bucketing.DocumentBucket(results[3].document, [results[3]])
expected_bucket_5 = bucketing.DocumentBucket(results[4].document, [results[4]])
expected_bucket_6 = bucketing.DocumentBucket(results[5].document, [results[5]])
assert timeframes == [
timeframe_with(
"Last 7 days",
{
results[0].document: expected_bucket_1,
results[1].document: expected_bucket_2,
results[2].document: expected_bucket_3,
},
),
timeframe_with(
"Mar 1968",
{
results[3].document: expected_bucket_4,
results[4].document: expected_bucket_5,
results[5].document: expected_bucket_6,
},
),
]
def test_annotations_from_different_days_in_same_month(self):
"""
Test bucketing multiple annotations from different days of same month.
Annotations from different days of the same month should go into one
bucket.
"""
one_month_ago = UTCNOW - datetime.timedelta(days=30)
annotations = [
factories.Annotation(
target_uri="http://example.com", updated=one_month_ago
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=1),
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=2),
),
]
timeframes = bucketing.bucket(annotations)
expected_bucket = bucketing.DocumentBucket(annotations[0].document)
expected_bucket.update(annotations)
assert timeframes == [
timeframe_with("Jan 1970", {annotations[0].document: expected_bucket})
]
@pytest.fixture
def utcnow(self, patch):
utcnow = patch("h.activity.bucketing.utcnow")
utcnow.return_value = UTCNOW
return utcnow
|
the-stack_0_7210 | from django import forms
class SearchForm(forms.Form):
CHOICES = [
(u'ISBN', u'ISBN'),
(u'书名', u'书名'),
(u'作者', u'作者')
]
search_by = forms.ChoiceField(
label='',
choices=CHOICES,
widget=forms.RadioSelect(),
initial=u'书名',
)
keyword = forms.CharField(
label='',
max_length=32,
widget=forms.TextInput(attrs={
'class': 'form-control input-lg',
'placeholder': u'请输入需要检索的图书信息',
'name': 'keyword',
})
) |
the-stack_0_7212 | """
(C) IBM Corporation 2021
Description:
Creates new config files within the default config file dir.
Uses both user input and authentification file for auth informations.
Repository:
https://github.com/IBM/spectrum-protect-sppmon
Author:
Niels Korschinsky
"""
import argparse
import json
import logging
import os
import re
import signal
import subprocess
import sys
from os.path import dirname, isfile, join, realpath
from typing import Any, Dict, List
from utils import Utils
LOGGER: logging.Logger
class ConfigFileSetup:
"""
Creates new config files within the default config file dir.
Uses both user input and authentification file for auth informations.
Functions:
addSshClient - Asks for ssh-login information for a certain ssh-client type.
createServerDict - Asks SPP-REST-Server information from user and returns them.
createInfluxDict - Reads InfluxDB config from authfile or asks user.
main - See description above.
"""
@staticmethod
def addSshClient(ssh_type: str) -> List[Dict[str, Any]]:
"""Asks for ssh-login information for a certain ssh-client type.
Args:
ssh_type (str): Type of ssh-client.
Returns:
List[Dict[str, Any]]: List of added ssh-clients.
"""
ssh_clients: List[Dict[str, Any]] = []
Utils.printRow()
LOGGER.info(
f"> Collecting {ssh_type} ssh information")
# counter for naming like: vsnap-1 / vsnap-2
counter: int = 1
while(Utils.confirm(f"Do you want to add (another) {ssh_type}-client?")):
try:
ssh_client: Dict[str, Any] = {}
print(
"> Test the requested logins by logging into" +
f"the {ssh_type}-client via ssh yourself.")
ssh_client["name"] = Utils.prompt_string(
f"Please enter the name of the {ssh_type}-client (display only)",
f"{ssh_type}-{counter}")
counter += 1 # resetted on next ssh_type
ssh_client["srv_address"] = Utils.prompt_string(
f"Please enter the server address of the {ssh_type}-client")
ssh_client["srv_port"] = int(
Utils.prompt_string(
f"Please enter the port of the {ssh_type}-client",
"22",
filter=(lambda x: x.isdigit())))
ssh_client["username"] = Utils.prompt_string(
f"Please enter the {ssh_type}-client username (equal to login via ssh)")
ssh_client["password"] = Utils.prompt_string(
f"Please enter the {ssh_type}-client user password (equal to login via ssh)",
is_password=True)
ssh_client["type"] = ssh_type
# Saving config
ssh_clients.append(ssh_client)
Utils.printRow()
except ValueError as err:
LOGGER.error(err)
LOGGER.info(
"Aborted adding this ssh client. Continuing with next client")
return ssh_clients
@staticmethod
def createServerDict() -> Dict[str, Any]:
"""
Asks SPP-REST-Server information from user and returns them.
Returns:
Dict[str, Any]: All Informations for SPP-REST-Access
"""
spp_server: Dict[str, Any] = {}
spp_server["username"] = Utils.prompt_string(
"Please enter the SPP REST-API Username (equal to login via website)")
spp_server["password"] = Utils.prompt_string(
"Please enter the REST-API Users Password (equal to login via website)", is_password=True)
spp_server["srv_address"] = Utils.prompt_string(
"Please enter the SPP server address")
spp_server["srv_port"] = int(
Utils.prompt_string(
"Please enter the SPP server port",
"443",
filter=(lambda x: x.isdigit())))
spp_server["jobLog_retention"] = Utils.prompt_string(
"How long are the JobLogs saved within the Server? (Format: 48h, 60d, 2w)",
"60d",
filter=(lambda x: bool(re.match(r"^[0-9]+[hdw]$", x))))
return spp_server
@staticmethod
def createInfluxDict(server_name: str) -> Dict[str, Any]:
"""
Reads InfluxDB config from authfile or asks user.
Args:
server_name (str): Name of SPP server to set influxDB-name
Returns:
Dict[str, Any]: All Informations for Influx-Access
"""
influxDB: Dict[str, Any] = {}
influxDB["username"] = Utils.readAuthOrInput(
"influxAdminName",
"Please enter the influxAdmin username",
"influxAdmin"
)
influxDB["password"] = Utils.readAuthOrInput(
"influxAdminPassword",
"Please enter the influxAdmin user password",
is_password=True
)
influxDB["ssl"] = bool(Utils.readAuthOrInput(
"sslEnabled",
"Please enter whether ssl is enabled (True/False)",
"True",
filter=(lambda x: bool(re.match(r"^(True)|(False)$", x)))
))
# Only check this if ssl is enabled
# Note: verify_ssl is the logical opposite of unsafeSsl
influxDB["verify_ssl"] = False if (not influxDB["ssl"]) else not bool(Utils.readAuthOrInput(
"unsafeSsl",
"Please enter whether the ssl certificate is selfsigned (True/False)",
filter=(lambda x: bool(re.match(r"^(True)|(False)$", x)))
))
influxDB["srv_address"] = Utils.readAuthOrInput(
"influxAddress",
"Please enter the influx server address"
)
influxDB["srv_port"] = int(Utils.readAuthOrInput(
"influxPort",
"Please enter the influx server port",
"8086",
filter=(lambda x: x.isdigit())
))
# Need to remove any illegal characters from the db name. For now, we will limit the characters
# to letters and numbers
dbName = ''.join(filter(str.isalnum, server_name))
LOGGER.info(
f"> Your influxDB database name for this server is \"{dbName}\"")
influxDB["dbName"] = dbName
return influxDB
def main(self, config_path: str, auth_file: str, auto_confirm: bool):
"""
Creates new config files within the default config file dir.
Uses both user input and authentification file for auth informations.
Args:
config_path (str): Config file DIR
auth_file (str): File with pairs of authentification data
auto_confirm (bool): Skip any confirm messages
"""
fileDirPath = dirname(sys.argv[0])
logPath = join(fileDirPath, "logs", "installLog.txt")
global LOGGER_NAME
LOGGER_NAME = 'configFileLogger'
global LOGGER
LOGGER = Utils.setupLogger(LOGGER_NAME, logPath)
Utils.printRow()
Utils.auto_confirm = auto_confirm
Utils.LOGGER = LOGGER
signal.signal(signal.SIGINT, Utils.signalHandler)
LOGGER.info("> Checking for sudo rights")
# Only works on Linux, therefore error here.
if os.name == 'posix':
if os.geteuid() == 0:
print("Already root")
else:
print("Root rights required to run script.")
subprocess.call(['sudo', 'python3', *sys.argv])
sys.exit()
LOGGER.info("> Generating new Config files")
# ### Config dir setup
config_path = realpath(config_path)
LOGGER.info(
f"> All new configurations files will be written into the directory:\n {config_path}")
# ### authFile setup
try:
if(not auth_file):
LOGGER.info("> No authentification file specifed")
Utils.setupAuthFile(None)
else: # take none if not exists, otherwise take auth path
Utils.setupAuthFile(auth_file)
except Exception as error:
LOGGER.info(f"> Setup of auth-file failed due error: {error}")
# ########## EXECUTION ################
LOGGER.info("> You may add multiple SPP-Server now.")
print("> Each server requires it's own config file")
try:
while(Utils.confirm("\nDo you want to to add a new SPP-Server now?")):
config_file_path: str = ""
server_name: str = ""
while(not config_file_path or not server_name):
# Servername for filename and config
server_name = Utils.prompt_string(
"What is the name of the SPP-Server? (Human Readable, no Spaces)",
filter=(lambda x: not " " in x))
# Replace spaces
config_file_path = join(
realpath(config_path), server_name + ".conf")
if(isfile(config_file_path)):
LOGGER.info(
f"> There is already a file at {config_file_path}.")
if(not Utils.confirm("Do you want to replace it?")):
LOGGER.info(
"> Please re-enter a different server name")
# remove content to allow loop to continue
config_file_path = ""
server_name = ""
else:
LOGGER.info("> Overwriting old config file")
os.system("touch " + config_file_path)
os.system("sudo chmod 600 " + config_file_path)
LOGGER.info(f"> Created config file under {config_file_path}")
# Overwrite existing file
with open(config_file_path, "w") as config_file:
LOGGER.info(
f"> Accessed config file under {config_file_path}")
# Structure of the config file
configs: Dict[str, Any] = {}
# #################### SERVER ###############################
Utils.printRow()
LOGGER.info("> collecting server information")
# Saving config
configs["sppServer"] = ConfigFileSetup.createServerDict()
LOGGER.info("> finished collecting server information")
# #################### influxDB ###############################
Utils.printRow()
LOGGER.info("> collecting influxDB information")
# Saving config
configs["influxDB"] = ConfigFileSetup.createInfluxDict(
server_name)
LOGGER.info("> finished collecting influxdb information")
# #################### ssh clients ###############################
Utils.printRow()
LOGGER.info("> collecting ssh client information")
ssh_clients: List[Dict[str, Any]] = []
print("")
print("> NOTE: You will now be asked for multiple ssh logins")
print(
"> You may test all these logins yourself by logging in via ssh")
print("> Following categories will be asked:")
# server excluded here
ssh_types: List[str] = [
"vsnap", "vadp", "cloudproxy", "other"]
LOGGER.info("> server, " + ", ".join(ssh_types))
print("> Please add all clients accordingly.")
print()
print(
"> If you misstyped anything you may edit the config file manually afterwards")
print(
"> NOTE: It is highly recommended to add at least one vSnap client")
if(not Utils.confirm("Do you want to continue now?")):
json.dump(configs, config_file, indent=4)
LOGGER.info(
f"> saved all information into file {config_file_path}")
LOGGER.info("> finished setup for this server.")
continue # Contiuing to the next server config file loop
# #################### ssh clients: SERVER ###############################
Utils.printRow()
LOGGER.info("> Collecting SPP-Server ssh information")
ssh_server: Dict[str, Any] = {}
print(
"> Test the requested logins by logging into the SPP-Server via ssh yourself.")
ssh_server["name"] = server_name
spp_server_dict: Dict[str, Any] = configs["sppServer"]
ssh_server["srv_address"] = spp_server_dict["srv_address"]
ssh_server["srv_port"] = int(
Utils.prompt_string(
f"Please enter the SSH port of the SPP server",
"22",
filter=(lambda x: x.isdigit())))
ssh_server["username"] = Utils.prompt_string(
"Please enter the SPP-Server SSH username (equal to login via ssh)")
ssh_server["password"] = Utils.prompt_string(
"Please enter the SPP-Server SSH user password (equal to login via ssh)",
is_password=True)
ssh_server["type"] = "server"
# Saving config
ssh_clients.append(ssh_server)
# #################### ssh clients all other ###############################
for ssh_type in ssh_types:
try:
ssh_clients.extend(ConfigFileSetup.addSshClient(ssh_type))
except ValueError as err:
LOGGER.error(err)
LOGGER.info(
"Skipped this type of SSH-Client. Continuing with next type.")
# save all ssh-clients
configs["sshclients"] = ssh_clients
print("> Finished setting up SSH Clients")
# #################### SAVE & EXIT ###############################
LOGGER.info("> Writing into config file")
json.dump(configs, config_file, indent=4)
LOGGER.info(
f"> Configuraton saved into the file:\n{config_file_path}")
Utils.printRow()
continue # Contiuing to the next server config file loop
except ValueError as err:
LOGGER.error(err)
LOGGER.info("> Finished config file creation")
if __name__ == "__main__":
fileDirPath = dirname(sys.argv[0])
configPathDefault = realpath(join(fileDirPath, "..", "config_files"))
authPathDefault = realpath(join(fileDirPath, "delete_me_auth.txt"))
parser = argparse.ArgumentParser(
"Support agent to create new server configuration files for SPPMon")
parser.add_argument("--configPath", dest="config_path",
default=configPathDefault,
help=f"Path to folder containing the config files (default: `{configPathDefault}`)")
parser.add_argument("--authFile", dest="auth_file",
required=False,
default=authPathDefault,
help=f"Path to authentification file (default: `{authPathDefault}`)")
parser.add_argument("--autoConfirm", dest="auto_confirm",
action="store_true",
help="Autoconfirm most confirm prompts")
args = parser.parse_args()
ConfigFileSetup().main(args.config_path, args.auth_file, args.auto_confirm)
|
the-stack_0_7213 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from copy import deepcopy
import os
import platform
import pytest
import subprocess
import sys
from anaconda_project.test.environ_utils import minimal_environ, strip_environ
from anaconda_project.test.project_utils import project_no_dedicated_env
from anaconda_project.internal.test.tmpfile_utils import (with_directory_contents,
with_directory_contents_completing_project_file)
from anaconda_project.internal import conda_api
from anaconda_project.prepare import (prepare_without_interaction, unprepare, prepare_in_stages, PrepareSuccess,
PrepareFailure, _after_stage_success, _FunctionPrepareStage)
from anaconda_project.project import Project
from anaconda_project.project_file import DEFAULT_PROJECT_FILENAME
from anaconda_project.project_commands import ProjectCommand
from anaconda_project.requirements_registry.requirement import UserConfigOverrides
from anaconda_project.conda_manager import (push_conda_manager_class, pop_conda_manager_class, CondaManager,
CondaEnvironmentDeviations, CondaLockSet)
@pytest.mark.slow
def test_prepare_empty_directory():
def prepare_empty(dirname):
project = Project(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
assert result.env_prefix is not None
assert dict(PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert dict() == strip_environ(environ)
assert result.command_exec_info is None
with_directory_contents(dict(), prepare_empty)
def test_prepare_bad_provide_mode():
def prepare_bad_provide_mode(dirname):
with pytest.raises(ValueError) as excinfo:
project = project_no_dedicated_env(dirname)
environ = minimal_environ()
prepare_in_stages(project, mode="BAD_PROVIDE_MODE", environ=environ)
assert "invalid provide mode" in repr(excinfo.value)
with_directory_contents(dict(), prepare_bad_provide_mode)
@pytest.mark.slow
@pytest.mark.skipif(platform.system() == 'Windows' and
not (sys.version_info.major == 3 and sys.version_info.minor == 4),
reason="on Windows, can't delete env dir except on python 3.4, don't know why")
def test_unprepare_empty_directory():
def unprepare_empty(dirname):
project = Project(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
status = unprepare(project, result)
assert status.errors == []
assert status
with_directory_contents(dict(), unprepare_empty)
def test_unprepare_problem_project():
def unprepare_problems(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ)
assert not result
assert result.env_prefix is None
status = unprepare(project, result)
assert not status
assert status.status_description == 'Unable to load the project.'
assert status.errors == [('%s: variables section contains wrong value type 42, ' +
'should be dict or list of requirements') % project.project_file.basename]
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: "variables:\n 42"}, unprepare_problems)
@pytest.mark.slow
def test_unprepare_nothing_to_do():
def unprepare_nothing(dirname):
project = Project(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
status = unprepare(project, result, whitelist=[])
assert status.errors == []
assert status
assert status.status_description == 'Nothing to clean up.'
with_directory_contents(dict(), unprepare_nothing)
def test_default_to_system_environ():
def prepare_system_environ(dirname):
project = project_no_dedicated_env(dirname)
os_environ_copy = deepcopy(os.environ)
result = prepare_without_interaction(project)
assert project.directory_path == strip_environ(result.environ)['PROJECT_DIR']
# os.environ wasn't modified
assert os_environ_copy == os.environ
# result.environ inherits everything in os.environ
for key in os_environ_copy:
if key == 'PATH' and platform.system() == 'Windows' and result.environ[key] != os.environ[key]:
print("prepare changed PATH on Windows and ideally it would not.")
else:
if key == 'PATH' and result.environ[key] != os.environ[key]:
original = os.environ[key].split(os.pathsep)
updated = result.environ[key].split(os.pathsep)
print("ORIGINAL PATH: " + repr(original))
print("UPDATED PATH: " + repr(updated))
assert original == updated
assert result.errors == []
assert result
assert result.environ.get(key) == os.environ.get(key)
with_directory_contents_completing_project_file(
{
DEFAULT_PROJECT_FILENAME: """
packages: []
"""
}, prepare_system_environ)
def test_prepare_some_env_var_already_set():
def prepare_some_env_var(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(FOO='bar')
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
assert dict(FOO='bar', PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
assert dict(FOO='bar') == strip_environ(environ)
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
"""}, prepare_some_env_var)
def test_prepare_some_env_var_not_set():
def prepare_some_env_var(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(BAR='bar')
result = prepare_without_interaction(project, environ=environ)
assert not result
assert result.env_prefix is not None
assert dict(BAR='bar') == strip_environ(environ)
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
"""}, prepare_some_env_var)
def test_prepare_some_env_var_not_set_keep_going():
def prepare_some_env_var_keep_going(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(BAR='bar')
stage = prepare_in_stages(project, environ=environ, keep_going_until_success=True)
assert "Set up project." == stage.description_of_action
assert ['FOO', 'CONDA_PREFIX'] == [status.requirement.env_var for status in stage.statuses_before_execute]
# there's an initial stage to set the conda env
next_stage = stage.execute()
assert ['FOO', 'CONDA_PREFIX'] == [status.requirement.env_var for status in stage.statuses_after_execute]
assert not stage.failed
assert stage.environ['PROJECT_DIR'] == dirname
assert "Set up project." == next_stage.description_of_action
assert ['FOO', 'CONDA_PREFIX'] == [status.requirement.env_var for status in next_stage.statuses_before_execute]
stage = next_stage
for i in range(1, 10):
next_stage = stage.execute()
assert next_stage is not None
assert stage.failed
assert stage.environ['PROJECT_DIR'] == dirname
stage = next_stage
assert dict(BAR='bar') == strip_environ(environ)
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
"""}, prepare_some_env_var_keep_going)
def test_prepare_with_app_entry():
def prepare_with_app_entry(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(FOO='bar')
env_path = conda_api.environ_get_prefix(environ)
result = prepare_without_interaction(project, environ=environ)
assert result
command = result.command_exec_info
assert 'FOO' in command.env
assert command.cwd == project.directory_path
if platform.system() == 'Windows':
commandpath = os.path.join(env_path, "python.exe")
else:
commandpath = os.path.join(env_path, "bin", "python")
assert command.args == [commandpath, 'echo.py', env_path, 'foo', 'bar']
p = command.popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
# strip is to pull off the platform-specific newline
assert out.decode().strip() == ("['echo.py', '%s', 'foo', 'bar']" % (env_path.replace("\\", "\\\\")))
assert err.decode() == ""
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
commands:
default:
conda_app_entry: python echo.py ${PREFIX} foo bar
""",
"echo.py": """
from __future__ import print_function
import sys
print(repr(sys.argv))
"""}, prepare_with_app_entry)
def test_prepare_choose_command():
def check(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, command_name='foo')
assert result.errors == []
assert result
assert os.path.join(project.directory_path, 'foo.py') in result.command_exec_info.args
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, command_name='bar')
assert result.errors == []
assert result
assert os.path.join(project.directory_path, 'bar.py') in result.command_exec_info.args
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
commands:
foo:
bokeh_app: foo.py
bar:
bokeh_app: bar.py
packages:
- bokeh
""",
"foo.py": "# foo",
"bar.py": "# bar"}, check)
def test_prepare_command_not_in_project():
def check(dirname):
# create a command that isn't in the Project
project = project_no_dedicated_env(dirname)
command = ProjectCommand(name="foo",
attributes=dict(bokeh_app="foo.py",
env_spec=project.default_env_spec_name))
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, command=command)
assert result.errors == []
assert result
assert os.path.join(project.directory_path, 'foo.py') in result.command_exec_info.args
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
commands:
decoy:
description: "do not use me"
unix: foobar
windows: foobar
""",
"foo.py": "# foo"}, check)
def test_prepare_bad_command_name():
def check(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(BAR='bar')
result = prepare_without_interaction(project, environ=environ, command_name="blah")
assert not result
assert result.env_prefix is None
assert result.errors
assert "Command name 'blah' is not in" in result.errors[0]
with_directory_contents_completing_project_file({DEFAULT_PROJECT_FILENAME: """
"""}, check)
def _push_fake_env_creator():
class HappyCondaManager(CondaManager):
def __init__(self, frontend):
pass
def resolve_dependencies(self, package_specs, channels, platforms):
return CondaLockSet({})
def find_environment_deviations(self, prefix, spec):
return CondaEnvironmentDeviations(summary="all good",
missing_packages=(),
wrong_version_packages=(),
missing_pip_packages=(),
wrong_version_pip_packages=())
def fix_environment_deviations(self, prefix, spec, deviations=None, create=True):
pass
def remove_packages(self, prefix, packages):
pass
push_conda_manager_class(HappyCondaManager)
def _pop_fake_env_creator():
pop_conda_manager_class()
def test_prepare_choose_environment():
def check(dirname):
env_var = conda_api.conda_prefix_variable()
try:
_push_fake_env_creator()
project = Project(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, env_spec_name='foo')
expected_path = project.env_specs['foo'].path(project.directory_path)
assert result.environ[env_var] == expected_path
environ = minimal_environ()
result = prepare_without_interaction(project, environ=environ, env_spec_name='bar')
assert result.errors == []
assert result
expected_path = project.env_specs['bar'].path(project.directory_path)
assert result.environ[env_var] == expected_path
finally:
_pop_fake_env_creator()
with_directory_contents(
{DEFAULT_PROJECT_FILENAME: """
name: blah
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
env_specs:
foo: {}
bar: {}
"""}, check)
def test_prepare_use_command_specified_env_spec():
def check(dirname):
env_var = conda_api.conda_prefix_variable()
try:
_push_fake_env_creator()
project = Project(dirname)
environ = minimal_environ()
# we specify the command name but not the
# env_spec_name but it should imply the proper env
# spec name.
result = prepare_without_interaction(project, environ=environ, command_name='hello')
expected_path = project.env_specs['foo'].path(project.directory_path)
assert result.environ[env_var] == expected_path
finally:
_pop_fake_env_creator()
with_directory_contents(
{DEFAULT_PROJECT_FILENAME: """
name: blah
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
env_specs:
default: {}
foo: {}
bar: {}
commands:
hello:
env_spec: foo
unix: echo hello
windows: echo hello
"""}, check)
def test_update_environ():
def prepare_then_update_environ(dirname):
project = project_no_dedicated_env(dirname)
environ = minimal_environ(FOO='bar')
result = prepare_without_interaction(project, environ=environ)
assert result.errors == []
assert result
other = minimal_environ(BAR='baz')
result.update_environ(other)
assert dict(FOO='bar', BAR='baz', PROJECT_DIR=dirname) == strip_environ(other)
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
variables:
FOO: {}
"""}, prepare_then_update_environ)
def test_attempt_to_grab_result_early():
def early_result_grab(dirname):
project = project_no_dedicated_env(dirname)
first_stage = prepare_in_stages(project)
with pytest.raises(RuntimeError) as excinfo:
first_stage.result
assert "result property isn't available" in repr(excinfo.value)
with_directory_contents(dict(), early_result_grab)
def test_attempt_to_grab_statuses_early():
def early_status_grab(dirname):
project = project_no_dedicated_env(dirname)
first_stage = prepare_in_stages(project)
with pytest.raises(RuntimeError) as excinfo:
first_stage.statuses_after_execute
assert "statuses_after_execute isn't available" in repr(excinfo.value)
with_directory_contents(dict(), early_status_grab)
def test_skip_after_success_function_when_second_stage_fails():
state = {'state': 'start'}
def do_first(stage):
assert state['state'] == 'start'
state['state'] = 'first'
stage.set_result(
PrepareSuccess(statuses=(),
command_exec_info=None,
environ=dict(),
overrides=UserConfigOverrides(),
env_spec_name='first'),
[])
def last(stage):
assert state['state'] == 'first'
state['state'] = 'second'
stage.set_result(
PrepareFailure(statuses=(),
errors=[],
environ=dict(),
overrides=UserConfigOverrides(),
env_spec_name='last'),
[])
return None
return _FunctionPrepareStage(dict(), UserConfigOverrides(), "second", [], last)
first_stage = _FunctionPrepareStage(dict(), UserConfigOverrides(), "first", [], do_first)
def after(updated_statuses):
raise RuntimeError("should not have been called")
stage = _after_stage_success(first_stage, after)
assert stage.overrides is first_stage.overrides
assert isinstance(stage.environ, dict)
while stage is not None:
next_stage = stage.execute()
result = stage.result
if result.failed:
assert stage.failed
break
else:
assert not stage.failed
stage = next_stage
assert result.failed
assert state['state'] == 'second'
def test_run_after_success_function_when_second_stage_succeeds():
state = {'state': 'start'}
def do_first(stage):
assert state['state'] == 'start'
state['state'] = 'first'
stage.set_result(
PrepareSuccess(statuses=(),
command_exec_info=None,
environ=dict(),
overrides=UserConfigOverrides(),
env_spec_name='foo'),
[])
def last(stage):
assert state['state'] == 'first'
state['state'] = 'second'
stage.set_result(
PrepareSuccess(statuses=(),
command_exec_info=None,
environ=dict(),
overrides=UserConfigOverrides(),
env_spec_name='bar'),
[])
return None
return _FunctionPrepareStage(dict(), UserConfigOverrides(), "second", [], last)
first_stage = _FunctionPrepareStage(dict(), UserConfigOverrides(), "first", [], do_first)
def after(updated_statuses):
assert state['state'] == 'second'
state['state'] = 'after'
stage = _after_stage_success(first_stage, after)
assert stage.overrides is first_stage.overrides
assert stage.description_of_action == first_stage.description_of_action
assert stage.environ == first_stage.environ
assert stage.statuses_before_execute is first_stage.statuses_before_execute
stage.configure() # checking it doesn't raise
while stage is not None:
next_stage = stage.execute()
if hasattr(stage, '_stage'):
assert stage.statuses_after_execute is stage._stage.statuses_after_execute
assert stage.failed is stage._stage.failed
result = stage.result
if result.failed:
assert stage.failed
break
else:
assert not stage.failed
stage = next_stage
assert not result.failed
assert state['state'] == 'after'
def _monkeypatch_download_file(monkeypatch, dirname, filename='MYDATA', checksum=None):
from tornado import gen
@gen.coroutine
def mock_downloader_run(self, loop):
class Res:
pass
res = Res()
res.code = 200
with open(os.path.join(dirname, filename), 'w') as out:
out.write('data')
if checksum:
self._hash = checksum
raise gen.Return(res)
monkeypatch.setattr("anaconda_project.internal.http_client.FileDownloader.run", mock_downloader_run)
def test_provide_whitelist(monkeypatch):
def check(dirname):
from anaconda_project.requirements_registry.requirements.conda_env import CondaEnvRequirement
_monkeypatch_download_file(monkeypatch, dirname, filename="nope")
no_foo = [('missing requirement to run this project: A downloaded file which is ' + 'referenced by FOO.'),
' Environment variable FOO is not set.']
# whitelist only the env req by class
project = project_no_dedicated_env(dirname)
environ = minimal_environ()
result = prepare_without_interaction(project, provide_whitelist=(CondaEnvRequirement, ), environ=environ)
assert result.errors == no_foo
# whitelist by instance
env_req = [req for req in project.requirements(None) if isinstance(req, CondaEnvRequirement)][0]
result = prepare_without_interaction(project, provide_whitelist=(env_req, ), environ=environ)
assert result.errors == no_foo
# whitelist by variable name
result = prepare_without_interaction(project, provide_whitelist=(env_req.env_var, ), environ=environ)
assert result.errors == no_foo
# whitelist the download
result = prepare_without_interaction(project,
provide_whitelist=(env_req, project.download_requirements(None)[0]),
environ=environ)
assert result.errors == []
assert 'FOO' in result.environ
with_directory_contents_completing_project_file(
{DEFAULT_PROJECT_FILENAME: """
downloads:
FOO: "http://example.com/nope"
"""}, check)
|
the-stack_0_7214 | # clone
import os
import repo
# フォルダを削除
def rmtree(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, 0o777)
os.remove(filename)
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
def run(first_flag):
root_path = "project" # レポジトリのパス
print("解析するレポジトリのURLを入力してください")
url = input(">>> ")
# 最初だけ確認
if os.path.isdir(root_path):
if first_flag == 0:
rmtree(root_path) # projectフォルダが存在していれば削除
clone_flag = True
while clone_flag:
try:
# "://"でURLか判定
if url.find("://"):
file_name = url[url.rfind('/') + 1:] # 一番後ろのファイル名を抽出
path = root_path + "/" + file_name # project/..
# 既にクローン済みの場合、pathの最後に(i)をつける
i = 1
while os.path.isdir(path):
path = root_path + "/" + file_name + "({:d})".format(i)
i += 1
repo.clone_repo(url, path) # レポジトリをクローン
print("レポジトリをクローンしました\n")
clone_flag = False
except Exception as err:
print("レポジトリをクローン出来ませんでした\nもう一度、入力してください")
url = input(">>> ") |
the-stack_0_7215 | from django.contrib import admin
from blogs.models import Post, Category_post, Comment
from django_summernote.admin import SummernoteModelAdmin
# Register your models here.
class PostAdmin(SummernoteModelAdmin):
summernote_fields = ('content',)
list_display = ('title', 'slug', 'short_desciption', 'status','created_on')
list_filter = ("status",)
search_fields = ['title', 'content']
prepopulated_fields = {'slug': ('title',)}
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'body')
admin.site.register(Post, PostAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Category_post)
|
the-stack_0_7216 | from scipy.spatial import ConvexHull, Delaunay
import numpy as np
class WeightedDelaunay:
def __init__(self, points, weights):
self.points = points
self.weights = weights
self.complete = False
self.tri = None
def triangulation(self):
if not self.complete:
num, dim = np.shape(self.points)
lifted = np.zeros((num, dim + 1))
for i in range(num):
p = self.points[i, :]
lifted[i, :] = np.append(p, np.sum(p ** 2) - self.weights[i] ** 2)
lifted = np.vstack((lifted, np.append(np.zeros((1, dim)), 1e12)))
hull = ConvexHull(lifted)
delaunay = []
for simplex in hull.simplices:
if num not in simplex:
delaunay.append(simplex.tolist())
self.tri = delaunay
self.complete = True
return self.tri
def add_point(self, point, weight):
num, dim = np.shape(self.points)
tmp = np.ndarray((num + 1, dim))
for i in range(num):
tmp[i] = self.points[i]
tmp[num] = point
self.points = tmp
self.weights.append(weight)
self.complete = False
|
the-stack_0_7218 | # coding=utf-8
import tensorflow as tf
import tensorflow_compression as tfc
import os
import sys
import math
import numpy as np
# tf.enable_eager_execution()
from collections import namedtuple
# BASE_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = '/home/wenxuanzheng/pc_compression/pc_compression'
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import part_dataset
from transform_nets import input_transform_net
import tf_utils
def py_func_decorator(output_types=None, output_shapes=None, stateful=True, name=None):
def decorator(func):
def call(*args, **kwargs):
return tf.contrib.framework.py_func(
func=func,
args=args, kwargs=kwargs,
output_types=output_types, output_shapes=output_shapes,
stateful=stateful, name=name
)
return call
return decorator
def from_indexable(iterator, output_types, output_shapes=None, num_parallel_calls=None, stateful=True, name=None):
ds = tf.data.Dataset.range(len(iterator))
@py_func_decorator(output_types, output_shapes, stateful=stateful, name=name)
def index_to_entry(index):
return iterator[index]
return ds.map(index_to_entry, num_parallel_calls=num_parallel_calls)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
0.0001,
batch , # global_step 当前迭代次数
10000,
0.7,
staircase = True) # global_step / decay_steps始终取整数
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
0.5,
batch,
20000,
0.5,
staircase=True)
bn_decay = tf.minimum(0.99, 1 - bn_momentum)
return bn_decay
def input_fn(features, batch_size, preprocess_threads, repeat=True, prefetch_size=1):
with tf.device('/cpu:0'):
# 需要iter对象
# dataset = tf.data.Dataset.from_tensor_slices(features)
dataset = from_indexable(features, output_types=tf.float32,output_shapes=[2048, 3], num_parallel_calls=preprocess_threads)
if repeat:
dataset = dataset.shuffle(buffer_size=len(features))
dataset = dataset.repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
# 流水线,一边生成一边使用
dataset = dataset.prefetch(buffer_size=prefetch_size)
return dataset
def model_fn(features, labels, mode, params):
'''
:param features: batch_features from input_fn
:param labels: batch_labels from input_fn
:param mode: An instance of tf.estimator.ModeKeys
:param params: Additional configuration
:return:
'''
#del para
del labels
if params.get('decompress') is None:
params['decompress'] = False
# if params.decompression:
# assert mode == tf.estimator.ModeKeys.PREDICT, 'Decompression must use prediction mode'
params = namedtuple('Struct', params.keys())(*params.values())
training = (mode == tf.estimator.ModeKeys.TRAIN)
num_points = (params.batch_size * params.num_points)
pc = features
bn_decay = get_bn_decay(tf.train.get_global_step())
learning_rate = get_learning_rate(tf.train.get_global_step())
tf.summary.scalar('bn_decay', bn_decay)
tf.summary.scalar('learning_rate', learning_rate)
# ============= encoder =============
nasmples = params.knn
y = pc_encoder(pc, nasmples, is_training=training, bn_decay=bn_decay)
entropy_bottleneck = tfc.EntropyBottleneck()
y_tilde, likelihoods = entropy_bottleneck(y, training=True)
# ============= decoder =============
x_tilde = pc_decoder(y_tilde, is_training=training, bn_decay=bn_decay)
# number of bits divided by number of points
train_bpp = tf.reduce_sum(tf.log(likelihoods)) / (-np.log(2) * num_points)
# 预测模式直接返回结果
if mode == tf.estimator.ModeKeys.PREDICT:
string = entropy_bottleneck.compress(y)
predictions = {
'x_tilde': x_tilde,
'likelihoods': likelihoods,
'y_tilde': y_tilde
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions) # 生成predict字典
# 训练和评估
losses = tf_utils.get_loss(x_tilde, pc)
rd_loss = losses + params.lmbda * train_bpp
# tf.summary.scalar('likelihoods',likelihoods)
tf.summary.scalar('loss', losses)
tf.summary.scalar('rd_loss', rd_loss)
tf.summary.scalar('bpp', train_bpp)
if mode == tf.estimator.ModeKeys.TRAIN:
main_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
main_step = main_optimizer.minimize(rd_loss, global_step=tf.train.get_global_step())
aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])
train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])
return tf.estimator.EstimatorSpec(mode, loss=rd_loss, train_op=train_op)
if mode == tf.estimator.ModeKeys.EVAL:
summary_hook = tf.train.SummarySaverHook(
save_steps=5,
output_dir=os.path.join(params.checkpoint_dir, 'eval'),
summary_op=tf.summary.merge_all())
return tf.estimator.EstimatorSpec(mode, loss=rd_loss, evaluation_hooks=[summary_hook])
def pc_encoder(point_cloud, nasmples, is_training, bn_decay=None):
nn_dis, idx_batch = tf_utils.get_knn(point_cloud, nasmples)
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
point_dim = point_cloud.get_shape()[2].value
idx_batch = tf.cast(idx_batch, dtype=tf.int32)
latent_feature = {}
# con_point = tf.concat([point_cloud, cov_batch], axis=2)
# encoder_input = tf.expand_dims(con_point, -1) # (32 2048 3 1)
encoder_input = tf.expand_dims(point_cloud, -1) # (32 2048 3 1)
# (32, 2048, 1, 64)
net = tf_utils.conv2d(encoder_input, 64, [1, 3],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_1', bn_decay=bn_decay)
# (32, 2048, 1, 64)
net = tf_utils.conv2d(net, 64, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_2', bn_decay=bn_decay)
# (32, 2048, 1, 64)
net = tf_utils.conv2d(net, 64, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_3', bn_decay=bn_decay)
net = tf_utils.conv2d(net, 128, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_4', bn_decay=bn_decay)
net = tf_utils.conv2d(net, 1024, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='mlp_5', bn_decay=bn_decay)
global_feat = tf_utils.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
net = tf.reshape(global_feat, [batch_size, -1])
return net
def pc_decoder(y_tilde, is_training, bn_decay):
# UPCONV Decoder
batch_size = y_tilde.get_shape()[0].value
net = tf_utils.fully_connected(y_tilde, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_utils.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_utils.fully_connected(net, 2048 * 3, activation_fn=None, scope='fc3')
net = tf.reshape(net, (batch_size, 2048, 3))
return net
if __name__=='__main__':
tf.enable_eager_execution()
TRAIN_DATASET = part_dataset.PartDataset(
root='/data/dataset/shapenetcore_partanno_segmentation_benchmark_v0', npoints=2048,
classification=False, class_choice=None, split='trainval')
print('=============')
print(input_fn(TRAIN_DATASET,2,8,repeat=True,prefetch_size=6))
|
the-stack_0_7221 | # -*- coding: utf-8 -*-
#
# Dataverse Documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 16 09:34:18 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import datetime
sys.path.insert(0, os.path.abspath('../../'))
import sphinx_bootstrap_theme
# Activate the theme.
# html_theme = 'bootstrap'
# html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Dataverse'
copyright = u'%d, The President & Fellows of Harvard College' % datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5.6'
# The full version, including alpha/beta/rc tags.
release = '5.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Custom Setup: add the CSS file to the app's theme.
# def setup(app):
# app.add_stylesheet( "docsdataverse_org.css" )
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
# 'navbar_title': "Demo",
# Tab name for entire site. (Default: "Site")
# 'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
# 'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
# ],
'navbar_links': [
("View 3.6.2 Guides", "http://docs.dataverse.org/en/3.6.2/", True),
],
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': -1,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
#'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
#'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme such
# as "amelia" or "cosmo".
#
# Themes:
# * amelia
# * cerulean
# * cosmo
# * cyborg
# * cupid (v3 only)
# * flatly
# * journal
# * lumen (v3 only)
# * readable
# * simplex
# * slate
# * spacelab
# * spruce (v2 only)
# * superhero
# * united
# * yeti (v3 only)
#'bootswatch_theme': "cupid",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
# ``get_html_theme_path`` returns a list, so you can concatenate with
# any other theme directories you would like.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> Documentation".
html_title = 'Dataverse.org'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_static/fontcustom-preview.html']
#html_js_files = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
#html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
html_sidebars = {'**': ['searchbox.html', 'sidebartoc.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
htmlhelp_basename = 'Dataversedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Dataverse.tex', u'Dataverse Documentation',
u'Dataverse Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dataverse40', u'Dataverse 4.0 Documentation',
[u'Dataverse Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Dataverse40', u'Dataverse 4.0 Documentation',
u'Dataverse Team', 'Dataverse40', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Dataverse'
epub_author = u'Dataverse Team'
epub_publisher = u'Dataverse Team'
epub_copyright = u'%d, The President & Fellows of Harvard College' % datetime.now().year
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'Consilience Documentation'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Suppress "WARNING: unknown mimetype for ..." https://github.com/IQSS/dataverse/issues/3391
suppress_warnings = ['epub.unknown_project_files']
rst_prolog = """
.. |toctitle| replace:: Contents:
.. |anotherSub| replace:: Yes, there can be multiple.
"""
|
the-stack_0_7222 | #!/usr/bin/env python
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
from contextlib import closing
import os
import socket
from tornado.concurrent import Future
from tornado.netutil import bind_sockets, Resolver
from tornado.tcpclient import TCPClient, _Connector
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncTestCase, gen_test
from tornado.test.util import skipIfNoIPv6, unittest, refusing_port
# Fake address families for testing. Used in place of AF_INET
# and AF_INET6 because some installations do not have AF_INET6.
AF1, AF2 = 1, 2
class TestTCPServer(TCPServer):
def __init__(self, family):
super(TestTCPServer, self).__init__()
self.streams = []
sockets = bind_sockets(None, 'localhost', family)
self.add_sockets(sockets)
self.port = sockets[0].getsockname()[1]
def handle_stream(self, stream, address):
self.streams.append(stream)
def stop(self):
super(TestTCPServer, self).stop()
for stream in self.streams:
stream.close()
class TCPClientTest(AsyncTestCase):
def setUp(self):
super(TCPClientTest, self).setUp()
self.server = None
self.client = TCPClient()
def start_server(self, family):
if family == socket.AF_UNSPEC and 'TRAVIS' in os.environ:
self.skipTest("dual-stack servers often have port conflicts on travis")
self.server = TestTCPServer(family)
return self.server.port
def stop_server(self):
if self.server is not None:
self.server.stop()
self.server = None
def tearDown(self):
self.client.close()
self.stop_server()
super(TCPClientTest, self).tearDown()
def skipIfLocalhostV4(self):
# The port used here doesn't matter, but some systems require it
# to be non-zero if we do not also pass AI_PASSIVE.
Resolver().resolve('localhost', 80, callback=self.stop)
addrinfo = self.wait()
families = set(addr[0] for addr in addrinfo)
if socket.AF_INET6 not in families:
self.skipTest("localhost does not resolve to ipv6")
@gen_test
def do_test_connect(self, family, host):
port = self.start_server(family)
stream = yield self.client.connect(host, port)
with closing(stream):
stream.write(b"hello")
data = yield self.server.streams[0].read_bytes(5)
self.assertEqual(data, b"hello")
def test_connect_ipv4_ipv4(self):
self.do_test_connect(socket.AF_INET, '127.0.0.1')
def test_connect_ipv4_dual(self):
self.do_test_connect(socket.AF_INET, 'localhost')
@skipIfNoIPv6
def test_connect_ipv6_ipv6(self):
self.skipIfLocalhostV4()
self.do_test_connect(socket.AF_INET6, '::1')
@skipIfNoIPv6
def test_connect_ipv6_dual(self):
self.skipIfLocalhostV4()
if Resolver.configured_class().__name__.endswith('TwistedResolver'):
self.skipTest('TwistedResolver does not support multiple addresses')
self.do_test_connect(socket.AF_INET6, 'localhost')
def test_connect_unspec_ipv4(self):
self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1')
@skipIfNoIPv6
def test_connect_unspec_ipv6(self):
self.skipIfLocalhostV4()
self.do_test_connect(socket.AF_UNSPEC, '::1')
def test_connect_unspec_dual(self):
self.do_test_connect(socket.AF_UNSPEC, 'localhost')
@gen_test
def test_refused_ipv4(self):
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
with self.assertRaises(IOError):
yield self.client.connect('127.0.0.1', port)
class TestConnectorSplit(unittest.TestCase):
def test_one_family(self):
# These addresses aren't in the right format, but split doesn't care.
primary, secondary = _Connector.split(
[(AF1, 'a'),
(AF1, 'b')])
self.assertEqual(primary, [(AF1, 'a'),
(AF1, 'b')])
self.assertEqual(secondary, [])
def test_mixed(self):
primary, secondary = _Connector.split(
[(AF1, 'a'),
(AF2, 'b'),
(AF1, 'c'),
(AF2, 'd')])
self.assertEqual(primary, [(AF1, 'a'), (AF1, 'c')])
self.assertEqual(secondary, [(AF2, 'b'), (AF2, 'd')])
class ConnectorTest(AsyncTestCase):
class FakeStream(object):
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def setUp(self):
super(ConnectorTest, self).setUp()
self.connect_futures = {}
self.streams = {}
self.addrinfo = [(AF1, 'a'), (AF1, 'b'),
(AF2, 'c'), (AF2, 'd')]
def tearDown(self):
# Unless explicitly checked (and popped) in the test, we shouldn't
# be closing any streams
for stream in self.streams.values():
self.assertFalse(stream.closed)
super(ConnectorTest, self).tearDown()
def create_stream(self, af, addr):
future = Future()
self.connect_futures[(af, addr)] = future
return future
def assert_pending(self, *keys):
self.assertEqual(sorted(self.connect_futures.keys()), sorted(keys))
def resolve_connect(self, af, addr, success):
future = self.connect_futures.pop((af, addr))
if success:
self.streams[addr] = ConnectorTest.FakeStream()
future.set_result(self.streams[addr])
else:
future.set_exception(IOError())
def start_connect(self, addrinfo):
conn = _Connector(addrinfo, self.io_loop, self.create_stream)
# Give it a huge timeout; we'll trigger timeouts manually.
future = conn.start(3600)
return conn, future
def test_immediate_success(self):
conn, future = self.start_connect(self.addrinfo)
self.assertEqual(list(self.connect_futures.keys()),
[(AF1, 'a')])
self.resolve_connect(AF1, 'a', True)
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
def test_immediate_failure(self):
# Fail with just one address.
conn, future = self.start_connect([(AF1, 'a')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assertRaises(IOError, future.result)
def test_one_family_second_try(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', True)
self.assertEqual(future.result(), (AF1, 'b', self.streams['b']))
def test_one_family_second_try_failure(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', False)
self.assertRaises(IOError, future.result)
def test_one_family_second_try_timeout(self):
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
self.assert_pending((AF1, 'a'))
# trigger the timeout while the first lookup is pending;
# nothing happens.
conn.on_timeout()
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.resolve_connect(AF1, 'b', True)
self.assertEqual(future.result(), (AF1, 'b', self.streams['b']))
def test_two_families_immediate_failure(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'), (AF2, 'c'))
self.resolve_connect(AF1, 'b', False)
self.resolve_connect(AF2, 'c', True)
self.assertEqual(future.result(), (AF2, 'c', self.streams['c']))
def test_two_families_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF2, 'c', True)
self.assertEqual(future.result(), (AF2, 'c', self.streams['c']))
# resolving 'a' after the connection has completed doesn't start 'b'
self.resolve_connect(AF1, 'a', False)
self.assert_pending()
def test_success_after_timeout(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF1, 'a', True)
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
# resolving 'c' after completion closes the connection.
self.resolve_connect(AF2, 'c', True)
self.assertTrue(self.streams.pop('c').closed)
def test_all_fail(self):
conn, future = self.start_connect(self.addrinfo)
self.assert_pending((AF1, 'a'))
conn.on_timeout()
self.assert_pending((AF1, 'a'), (AF2, 'c'))
self.resolve_connect(AF2, 'c', False)
self.assert_pending((AF1, 'a'), (AF2, 'd'))
self.resolve_connect(AF2, 'd', False)
# one queue is now empty
self.assert_pending((AF1, 'a'))
self.resolve_connect(AF1, 'a', False)
self.assert_pending((AF1, 'b'))
self.assertFalse(future.done())
self.resolve_connect(AF1, 'b', False)
self.assertRaises(IOError, future.result)
|
the-stack_0_7223 | import os
import paddle
import math
from paddle.optimizer.optimizer import Optimizer
from collections import defaultdict
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.framework import Variable
from paddle.fluid import layers
from paddle.fluid import unique_name
from paddle.fluid.framework import in_dygraph_mode, _dygraph_tracer
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph import base as imperative_base
import numpy as np
from paddle.utils.cpp_extension import get_build_directory
# op
build_dir = get_build_directory()
op_lib = os.path.join(build_dir, "ranger_op/ranger_op.so")
if op_lib is not None and os.path.isfile(op_lib):
# Maybe it has been loadad by `ext_utils.load`
paddle.utils.cpp_extension.load_op_meta_info_and_register_op(
op_lib)
class Ranger(Optimizer):
_moment1_acc_str = "moment1"
_moment2_acc_str = "moment2"
_beta1_pow_acc_str = "beta1_pow_acc"
_beta2_pow_acc_str = "beta2_pow_acc"
_slow_str = "slow"
#_inf_norm_acc_str = "inf_norm"
def __init__(self,
learning_rate=0.001,
alpha=0.5, k=6, # Look Ahead
beta1=0.95, beta2=0.999, epsilon=1e-5, weight_decay=0.0, N_sma_threshhold=5., # RAdam
use_gc=True,gc_conv_only=False, # gradient centralization
parameters=None,
name=None):
if not isinstance(beta1, Variable):
if not 0 <= beta1 < 1:
raise ValueError(
"Invaild value of beta1, expect beta1 in [0,1).")
if not isinstance(beta2, Variable):
if not 0 <= beta2 < 1:
raise ValueError(
"Invaild value of beta2, expect beta2 in [0,1).")
if not isinstance(epsilon, Variable):
if not 0 <= epsilon:
raise ValueError(
"Invaild value of epsilon, expect epsilon >= 0.")
assert (
0.0 <= alpha <= 1.0
), "alpha should be larger or equal to 0.0, and less or equal than 1.0"
assert (isinstance(k, int) and k > 0), "k should be a positive integer"
super(Ranger, self).__init__(
learning_rate=learning_rate,
parameters=parameters,
weight_decay=None,
name=name)
self.type = "ranger"
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._weight_decay = weight_decay
self._N_sma_threshhold = N_sma_threshhold
self._N_sma_max = 2 / (1 - beta2) - 1 # ρ无穷
self.use_gc = use_gc
self.gc_gradient_threshold = 3 if gc_conv_only else 1
self.alpha = alpha
self.k = k
self.helper = LayerHelper(self.__class__.__name__)
#self._k_var = None
#self._global_step_var = None
#self._step_size_var = None
#self._sma_flag_var = None
self._global_step = 0
self._step_size = None
self._sma_flag = None
def _get_accumulator(self, name, param):
if self._name is not None:
name = self._name + "_" + name
if (name not in self._accumulators or
param.name not in self._accumulators[name]):
raise Exception("Accumulator {} does not exist for parameter {}".
format(name, param.name))
return self._accumulators[name][param.name]
def _add_moments_pows(self, p):
self._add_accumulator(self._moment1_acc_str, p)
self._add_accumulator(self._moment2_acc_str, p)
#self._add_accumulator(self._inf_norm_acc_str, p)
self._add_accumulator(
name=self._beta1_pow_acc_str,
param=p,
fill_value=self._beta1,
shape=[1])
self._add_accumulator(
name=self._beta2_pow_acc_str,
param=p,
fill_value=self._beta2,
shape=[1])
"""
def _increment_global_var(self):
# 如果不行的话,把加一放到c文件里面
if self._global_step_var is None:
self._global_step_var = layers.create_global_var(
name=unique_name.generate("lookhead_step"),
shape=[1],
value=0,
dtype='int32',
persistable=True
)
self.helper.append_op(
type='increment',
inputs={'X': [self._global_step_var]},
outputs={'Out':[self._global_step_var]},
attrs={'step': 1.0}
)
"""
def _cal_sma(self):
"""
beta2_pow = self._beta2**self._global_step_var
beta1_pow = self._beta1**self._global_step_var
N_sma = self._N_sma_max - 2. * self._global_step_var * beta2_pow / (1. - beta2_pow)
sma_flag = N_sma > self._N_sma_threshhold
if sma_flag:
step_size = paddle.sqrt( (1.-beta2_pow) * (N_sma-4.) / (self._N_sma_max-4.) * (N_sma-2.) / N_sma * self._N_sma_max /(self._N_sma_max-2.)) / (1.-beta1_pow)
else:
step_size = 1./(1. - beta1_pow)
if self._step_size_var is None:
self._step_size_var = layers.create_global_var(
name=unique_name.generate("radam_step_size"),
shape=[1],
value=step_size,
dtype='int32',
persistable=True
)
if self._sma_flag_var is None:
self._sma_flag_var = layers.create_global_var(
name=unique_name.generate("radam_sma_flag"),
shape=[1],
value=sma_flag,
dtype='bool',
persistable=True
)
paddle.assign(step_size, self._step_size_var)
paddle.assign(sma_flag, self._sma_flag_var)
"""
beta2_pow = self._beta2**self._global_step
beta1_pow = self._beta1**self._global_step
N_sma = self._N_sma_max - 2. * self._global_step * beta2_pow / (1. - beta2_pow)
sma_flag = N_sma > self._N_sma_threshhold
if sma_flag:
step_size = math.sqrt( (1.-beta2_pow) * (N_sma-4.) / (self._N_sma_max-4.) * (N_sma-2.) / N_sma * self._N_sma_max /(self._N_sma_max-2.)) / (1.-beta1_pow)
else:
step_size = 1./(1. - beta1_pow)
self._step_size = step_size
self._sma_flag = sma_flag
def _append_optimize_op(self, block, param_and_grad):
# gradient centralization对于grad,不是param
# GC operation for Conv layers and FC layers
# GC可以看作是具有受约束损失函数的投影梯度下降方法。受约束损失函数及其梯度的Lipschitzness更好,
# 因此训练过程变得更加有效和稳定。
g_tmp = param_and_grad[1]
if self.use_gc and g_tmp.dim() > self.gc_gradient_threshold:
#print("grad before gc:", g_tmp)
g_tmp = g_tmp - g_tmp.mean(axis=tuple(range(1, g_tmp.dim())), keepdim=True)
#print("grad after gc:",g_tmp)
"""
moment = self._get_accumulator(self._moment1_acc_str, param_and_grad[0])
inf_norm = self._get_accumulator(self._inf_norm_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
# create the adamax optimize op
adamax_op = block.append_op(
type="adamax",
inputs={
"Param": param_and_grad[0],
"Grad": param_and_grad[1],
"LearningRate": self._create_param_lr(param_and_grad),
"Moment": moment,
"InfNorm": inf_norm,
"Beta1Pow": beta1_pow_acc
},
outputs={
"ParamOut": param_and_grad[0],
"MomentOut": moment,
"InfNormOut": inf_norm
},
attrs={
"beta1": self._beta1,
"beta2": self._beta2,
"epsilon": self._epsilon
},
stop_gradient=True)"""
# RAdam
assert isinstance(block, framework.Block)
moment1 = self._get_accumulator(self._moment1_acc_str,
param_and_grad[0])
moment2 = self._get_accumulator(self._moment2_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param_and_grad[0])
block.append_op(
type=self.type,
inputs={
"Param": param_and_grad[0],
"Grad": g_tmp,
"LearningRate": self._create_param_lr(param_and_grad),
"Moment1": moment1,
"Moment2": moment2,
"Beta1Pow": beta1_pow_acc,
"Beta2Pow": beta2_pow_acc,
#"StepSize": [self._step_size_var],
#"SmaFlag": [self._sma_flag_var]
},
outputs={
"ParamOut": param_and_grad[0],
"Moment1Out": moment1,
"Moment2Out": moment2,
"Beta1PowOut": beta1_pow_acc,
"Beta2PowOut": beta2_pow_acc
},
attrs={
"beta1": self._beta1,
"beta2": self._beta2,
"epsilon": self._epsilon,
"weight_decay": self._weight_decay,
"step_size": self._step_size,
"sma_flag": self._sma_flag
},
stop_gradient=True)
#print("after radam, param:", param_and_grad[0])
#print("after radam, grad:", param_and_grad[1])
# Look ahead
"""
one_var = paddle.ones(shape=[1], dtype='int32', name='lookahead_ones')
zero_var = paddle.zeros(shape=[1], dtype='int32', name='lookhead_zeros')
k_var = layers.create_global_var(
name=unique_name.generate("lookahead_k"),
shape=[1],
value=self.k,
dtype='int32',
persistable=True
)
# paddle.mod代替? https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/mod_cn.html#mod
mod = paddle.mod(self._global_step_var, k_var)
cond_1 = paddle.equal(self._global_step_var, one_var) # global step是不是等于1
cond_1 = paddle.cast(cond_1, dtype='float32')
cond_2 = paddle.equal(mod, zero_var) # global step%k是不是等于0
cond_2 = paddle.cast(cond_2, dtype='float32')
slow_var = self._get_accumulator(self._slow_str, param_and_grad[0]) # 缓存的slow var
# 初始化slow_var
tmp_var = cond_1 * param_and_grad[0] + (1 - cond_1) * slow_var
paddle.assign(tmp_var, slow_var)
# 融合model param
tmp_var = self.alpha * param_and_grad[0] + (1.0 - self.alpha) * slow_var
tmp_var_1 = cond_2 * tmp_var + (1 - cond_2) * param_and_grad[0]
paddle.assign(tmp_var_1, param_and_grad[0])
tmp_var_1 = cond_2 * tmp_var + (1 - cond_2) * slow_var
paddle.assign(tmp_var_1, slow_var)
"""
# look ahead的if写法
mod = self._global_step % self.k
slow_var = self._get_accumulator(self._slow_str, param_and_grad[0]) # 缓存的slow var
if self._global_step == 1:
paddle.assign(param_and_grad[0], slow_var)
if mod == 0:
tmp_var = self.alpha * param_and_grad[0] + (1.0 - self.alpha) * slow_var
paddle.assign(tmp_var, slow_var)
paddle.assign(tmp_var, param_and_grad[0])
return None
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for p in parameters:
self._add_accumulator(self._slow_str, p)
self._add_moments_pows(p)
@imperative_base.no_grad
@framework.dygraph_only
def step(self):
# Look Ahead global_step+1
#self._increment_global_var()
self._global_step += 1
# RAdam 计算N_sma和step_size,然后对于op,传入N_sma>N_sma_threshold的bool值和step_size
self._cal_sma()
if not isinstance(self._parameter_list[0], dict):
params_grads = []
for param in self._parameter_list:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
if hasattr(grad_var, "_is_sparse") and grad_var._is_sparse(
) and self.regularization is not None:
raise RuntimeError(
"Ranger don't support weight_decay with sparse parameters, please set it to None."
)
params_grads.append((param, grad_var))
#print(params_grads[0])
#print(params_grads[1])
optimize_ops = self._apply_optimize(
loss=None, startup_program=None, params_grads=params_grads)
else:
# optimize parameters in groups
for param_group in self._param_groups:
params_grads = defaultdict(lambda: list())
for param in param_group['params']:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads['params'].append((param, grad_var))
params_grads.update(
{k: v
for k, v in param_group.items() if k != 'params'})
#print(params_grads[0])
#print(params_grads[1])
self._apply_optimize(
loss=None, startup_program=None, params_grads=params_grads)
|
the-stack_0_7225 | from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, balanced_accuracy_score
def get_commonscores(y_true, y_pred):
"""
Calculate the precision, recall, f1, accuracy, balanced_accuracy scores
Args:
y_true (pd.Series): y_true (with limited set of index)
y_pred (pd.Series): y_pred (with limited set of index)
Returns:
dict : scores calculated on intersection of index. Keys Precision, recall, f1, accuracy, balanced_accuracy
"""
commonindex = y_true.index.intersection(y_pred.index)
myscores = dict()
y2_true = y_true.loc[commonindex]
y2_pred = y_pred.loc[commonindex]
myscores['precision'] = precision_score(y_true=y2_true, y_pred=y2_pred)
myscores['recall'] = recall_score(y_true=y2_true, y_pred=y2_pred)
myscores['f1'] = f1_score(y_true=y2_true, y_pred=y2_pred)
myscores['accuracy'] = accuracy_score(y_true=y2_true, y_pred=y2_pred)
myscores['balanced_accuracy'] = balanced_accuracy_score(y_true=y2_true, y_pred=y2_pred)
return myscores
suffixexact = 'exact'
suffixtoken = 'token'
suffixfuzzy = 'simple'
name_freetext = 'FreeText'
name_exact = 'Exact'
name_pruning_threshold = 'threshold'
name_usescores = 'use_scores'
name_stop_words = 'stop_words'
navalue_score = 0 |
the-stack_0_7228 | from opytimizer.spaces import HyperComplexSpace
# Defines the number of agents, decision variables,
# and search space dimensions
n_agents = 2
n_variables = 5
n_dimensions = 4
# Creates the HyperComplexSpace
s = HyperComplexSpace(n_agents=n_agents, n_variables=n_variables, n_dimensions=n_dimensions)
# Prints out some properties
print(s.n_agents, s.n_variables, s.n_dimensions)
print(s.agents, s.best_agent)
print(s.best_agent.position)
|
the-stack_0_7229 | #!/usr/bin/env python2
from Tkinter import *
import Tkinter as tk
import ttk
import tkFileDialog
import tkMessageBox
from tkFileDialog import askdirectory
import six
from pkg_resources import resource_stream
import os
from os import listdir
from os.path import isfile, join
from os import walk
from subprocess import Popen
from subprocess import PIPE
import keyword
import re
from multiprocessing import Process
import paramiko
from access_ssh import access_ssh
from method_dialog import method_dialog
from editor import EditorClass
from find_and_replace_dialog import find_and_replace_dialog
from remote_file_chooser import remote_file_chooser
from new_dialog import new_dialog
from new_folder_dialog import new_folder_dialog
from open_file_dialog import open_file_dialog
from change_color import change_color
from interface import Paramiko_Interface
from create_config import create_config
from run_script import run_script_python_2
from run_script import run_script_python_3
from about_dialog import about_dialog
from project_opener import project_opener
from project_manager import project_manager
class App:
def open_file(self, path):
if isfile(path):
if not path in self.tab_names:
pane = PanedWindow(self.n, orient=HORIZONTAL, opaqueresize=True)
ed = EditorClass(self.root, path, self)
pane.add(ed.frame)
self.n.add(pane, text=path)
self.n.grid(row=0, column=1, rowspan=40, sticky=N+S+E+W)
w = self.root.winfo_width()
h = self.root.winfo_height()
self.tab_names.append(path)
ed.text.config(insertbackground='white')
ed.text.config(background=self.background)
ed.text.config(foreground=self.foreground)
with open(path, 'r') as f_in:
text = f_in.read()
lines = text.split('\n')
for line in lines:
ed.text.insert(END, line+'\n')
ed.lnText.config(foreground=self.line_num_color)
ed.lnText.config(background=self.line_num_background_color)
ed.syntax_coloring(None)
self.eds.append(ed)
self.n.select(self.tab_names.index(path))
def change_ed_colors(self):
for ed in self.eds:
ed.text.config(insertbackground='white')
ed.text.config(background=self.background)
ed.text.config(foreground=self.foreground)
ed.text.tag_configure('Token.Keyword', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Constant', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Declaration', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Namespace', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Pseudo', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Reserved', foreground=self.token_keyword)
ed.text.tag_configure('Token.Keyword.Type', foreground=self.token_keyword)
ed.text.tag_configure('Token.Name', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Attribute', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Builtin', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Builtin.Pseudo', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Class', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Constant', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Decorator', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Entity', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Exception', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Function', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Label', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Namespace', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Other', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Tag', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Variable', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Variable.Class', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Variable.Global', foreground=self.token_name)
ed.text.tag_configure('Token.Name.Variable.Instance', foreground=self.token_name)
ed.text.tag_configure('Token.Literal', foreground=self.token_literal)
ed.text.tag_configure('Token.Literal.Date', foreground=self.token_literal)
ed.text.tag_configure('Token.Literal.String', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Backtick', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Char', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Doc', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Double', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Escape', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Heredoc', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Interpol', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Other', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Regex', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Single', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.String.Symbol', foreground=self.token_string)
ed.text.tag_configure('Token.Literal.Number', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Bin', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Float', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Hex', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Integer', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Integer.Long', foreground=self.token_number)
ed.text.tag_configure('Token.Literal.Number.Oct', foreground=self.token_number)
ed.text.tag_configure('Token.Operator', foreground=self.token_operators)
ed.text.tag_configure('Token.Operator.Word', foreground=self.token_operators)
ed.text.tag_configure('Token.Punctuation', foreground=self.token_punctuation)
ed.text.tag_configure('Token.Comment', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Hashbang', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Multiline', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Preproc', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Single', foreground=self.token_comments)
ed.text.tag_configure('Token.Comment.Special', foreground=self.token_comments)
ed.text.tag_configure('Token.Generic', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Deleted', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Emph', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Error', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Heading', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Inserted', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Output', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Prompt', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Strong', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Subheading', foreground=self.token_generic)
ed.text.tag_configure('Token.Generic.Traceback', foreground=self.token_generic)
self.tree.tag_configure('directory', background=self.background, foreground=self.dir_color)
self.tree.tag_configure('file', background=self.background, foreground=self.file_color)
self.menubar.config(background=self.file_bar_color)
self.root.configure(background=self.background)
self.menubar.config(background=self.file_bar_color, foreground=self.file_bar_text_color)
ttk.Style().configure("TNotebook", background=self.notebook_background)
def copy_click(self):
index = self.n.tabs().index(self.n.select())
self.eds[index].text.clipboard_clear()
text = self.eds[index].text.get(tk.SEL_FIRST, tk.SEL_LAST)
self.eds[index].text.clipboard_append(text)
def cut_click(self):
index = self.n.tabs().index(self.n.select())
self.copy_click()
self.eds[index].text.delete(tk.SEL_FIRST, tk.SEL_LAST)
def paste_click(self):
index = self.n.tabs().index(self.n.select())
text = self.eds[index].text.selection_get(selection='CLIPBOARD')
self.eds[index].text.insert('insert', text)
def recursive_find_nodes(self, rootDir, parent, indent):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
#self.files.append(path)
if os.name == 'posix':
if os.path.isdir(path):
parent.children.append(node(path[path.rfind('/'):], parent, indent, path, 'folder', self.top, self))
self.recursive_find(path, parent.children[len(parent.children) - 1], indent + 1)
else:
parent.children.append(node(path[path.rfind('/'):], parent, indent, path, 'file', self.top, self))
else:
if os.path.isdir(path):
parent.children.append(node(path[path.rfind('\\'):], parent, indent, path, 'folder', self.top, self))
self.recursive_find(path, parent.children[len(parent.children) - 1], indent + 1)
else:
parent.children.append(node(path[path.rfind('\\'):], parent, indent, path, 'file', self.top, self))
return parent
def recursive_print_nodes(self, parent):
for child in parent.children:
self.recursive_print(child)
print(('-' * parent.indent) + parent.name)
def list_nodes(self, path, tree, parent, full_path):
self.root = node(os.getcwd()[os.getcwd().rfind('\\'):], None, 0, os.getcwd(), 'folder', self.top, self)
self.root = self.recursive_find(os.getcwd(), self.root, 1)
self.recursive_print(self.root)
def draw_structure(self, parent, height):
parent.name_label.place(anchor=NW, x=parent.indent*10 + 20, y=height)
parent.image_label.place(anchor=NW, x=parent.indent*10, y=height)
self.name_labels.append(parent.name_label)
self.image_labels.append(parent.image_label)
if parent.display_children:
for child in parent.children:
height = height + 20
height = self.draw_structure(child, height)
return height
def recursive_find(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
self.files.append(path)
if os.path.isdir(path):
self.recursive_find(path)
def list_files(self, path, tree, parent, full_path):
self.files = [os.getcwd()]
self.recursive_find(os.getcwd())
counter = 0
for f in self.files:
if counter != 0:
if os.name == 'posix':
if(isfile(f)):
tree.insert(f[:f.rfind('/')], 0, f, text=f[f.rfind('/') + 1:], tags = ('file',))
else:
tree.insert(f[:f.rfind('/')], 0, f, text=f[f.rfind('/') + 1:], tags = ('directory',))
else:
if(isfile(f)):
tree.insert(f[:f.rfind('\\')], 0, f, text=f[f.rfind('\\') + 1:], tags = ('file',))
else:
tree.insert(f[:f.rfind('\\')], 0, f, text=f[f.rfind('\\') + 1:], tags = ('directory',))
else:
if os.name == 'posix':
tree.insert('', 3, f, text=f[f.rfind('/') + 1:], tags = ('directory',))
else:
tree.insert('', 3, f, text=f[f.rfind('\\') + 1:], tags = ('directory',))
counter = counter + 1
return tree
def on_double_click(self, event):
item = self.tree.selection()[0]
self.open_file(item)
def close_all_tabs(self):
val = tkMessageBox.askokcancel('Open New Folder', "This will close all current tabs, continue?")
if val:
for i in range(0, len(self.n.tabs())):
self.n.forget(0)
del(self.tab_names[0])
del(self.eds[0])
return val
def close_tab(self):
index = self.n.tabs().index(self.n.select())
self.n.forget(self.n.select())
del(self.tab_names[index])
del(self.eds[index])
def close_tab_event(self, event):
index = self.n.tabs().index(self.n.select())
self.n.forget(self.n.select())
del(self.tab_names[index])
del(self.eds[index])
def open_click(self):
of = open_file_dialog(self.root, self, os.getcwd().replace('\\', '/'))
def save_click(self):
path = self.n.tab(self.n.select())['text']
index = self.n.tabs().index(self.n.select())
with open(path, 'w') as f_out:
f_out.write(self.eds[index].text.get("1.0",END))
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd().replace('\\', '/'), open=True)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(path,path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
except:
print('Could not push for some reason')
def save_type(self, event):
path = self.n.tab(self.n.select())['text']
index = self.n.tabs().index(self.n.select())
with open(path, 'w') as f_out:
f_out.write(self.eds[index].text.get("1.0",END))
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(path,path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
except:
print('Could not push for some reason')
def exit_click(self):
sys.exit()
def keyPressed(self, event):
print("--")
if event.keysym == 's':
self.save_click
def open_folder_click(self):
val = self.close_all_tabs()
if val:
folder = askdirectory().replace('\\', '/')
os.chdir(folder)
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
self.folder = folder
self.lines[19] = self.lines[19][:self.lines[19].find('=')+1]+self.folder
self.write_config()
self.editing_pi = False
def open_folder(self, folder):
val = self.close_all_tabs()
if val:
os.chdir(folder)
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
self.folder = folder
self.lines[19] = self.lines[19][:self.lines[19].find('=')+1]+self.folder
self.write_config()
self.editing_pi = False
def find_text_dialog(self):
temp = find_and_replace_dialog(self.root, self)
self.root.wait_window(temp.top)
def find(self, f):
index = self.n.tabs().index(self.n.select())
ed = self.eds[index]
ed.highlight_pattern(f, "highlight")
def find_one(self, f):
index = self.n.tabs().index(self.n.select())
ed = self.eds[index]
text = ed.text.get("1.0",END)
count = text.count(f)
if self.find_counter >= count:
self.find_counter = 0
ed.highlight_one(f, "highlight", self.find_counter)
self.find_counter = self.find_counter + 1
def replace(self, f, r):
index = self.n.tabs().index(self.n.select())
text = self.eds[index].text.get("1.0",END)
self.eds[index].text.delete("1.0",END)
text = text.replace(f, r, 1)
self.eds[index].text.insert(END, text[:-1])
def replace_all(self, f, r):
index = self.n.tabs().index(self.n.select())
text = self.eds[index].text.get("1.0",END)
self.eds[index].text.delete("1.0",END)
text = text.replace(f, r)
self.eds[index].text.insert(END, text[:-1])
def undo_command(self):
index = self.n.tabs().index(self.n.select())
self.eds[index].undo(None)
def redo_command(self):
index = self.n.tabs().index(self.n.select())
self.eds[index].redo(None)
def reset_counters(self):
self.find_counter = 0
def find_type(self, event):
path = self.n.tab(self.n.select())['text']
self.find_text_dialog()
def tree_rename(self):
item = self.tree.selection()[0]
path = item
found = True
if found:
args = ['python2', self.meringue_path + '/' + 'rename.py', 'test']
p = Popen(args, stdin=PIPE, stdout=PIPE, shell=False)
p.wait()
out = p.stdout.read().replace('\n', '')
if not out == '!!DO NOT RENAME!!':
i = path.replace('\\', '/').rfind('/')
try:
if i != -1:
os.rename(path, path[:path.rfind('/')]+'/'+out)
else:
os.rename(path, out)
except:
print('file does not exist, not renaming anything but the tab')
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd().replace('\\', '/'), open=True)
if self.editing_pi:
new_name = path[:path.rfind('/')]+'/'+out
new_name = new_name[new_name.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.rename(item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):], new_name)
except:
print('not a file')
try:
sftp.rmdir(item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):], new_name)
except:
print('not a directory')
def delete(self):
if tkMessageBox.askyesno("Delete", "Delte this file or folder?"):
item = self.tree.selection()[0]
try:
os.remove(item)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.remove(item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
except:
print('not a file')
except:
print('Not a file')
try:
self.delete_file(item)
except:
print('Not a directory')
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
def delete_file(self, path):
dirs = [f for f in listdir(path) if not isfile(join(path, f))]
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
os.remove(path+'/'+f)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.remove(path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]+'/'+f)
except:
print('not a file')
for d in dirs:
self.delete_file(path+'/'+d)
os.rmdir(path)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.rmdir(path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]+'/'+d)
except:
print('not a directory')
def show_menu(self, event):
self.directory_menu.post(event.x_root, event.y_root)
def on_right_click(self, event):
if len(self.tree.selection()) > 0:
self.selected_file_dir = self.tree.selection()[0]
self.show_menu(event)
def save_project(self):
with open(self.meringue_path + '/data/projects.txt', 'w') as f_out:
f_out.write('TEMP;{}'.format(self.folder))
def tab_rename(self, event):
path = self.n.tab(self.n.select())['text']
if os.name == 'nt':
print(self.meringue_path)
args = ['python', self.meringue_path + 'rename.py', path[path.rfind('\\')+1:]]
else:
args = ['python2', self.meringue_path + 'rename.py', path[path.rfind('/')+1:]]
p = Popen(args, stdin=PIPE, stdout=PIPE, shell=False)
p.wait()
out = p.stdout.read().replace('\n', '')
if not out == '!!DO NOT RENAME!!':
self.n.tab(self.n.select(), text=out)
def end_find(self, event):
for ed in self.eds:
ed.remove_highlight(None)
def function_dialog(self, event):
dialog = method_dialog(self.root, self)
def ssh(self, event=None):
dialog = access_ssh(self.root, self)
def open_terminal(self):
if sys.platform == "linux" or sys.platform == "linux2":
os.system('gnome-terminal')
if sys.platform == 'darwin':
os.system('open Terminal')
if sys.platform == 'win32':
os.system('start cmd')
def open_project(self):
project_opener(self.root, self)
def manage_projects(self):
project_manager(self.root, self)
def start(self, noOfEditors, noOfLines):
'''
scroll_style = ttk.Style()
scroll_style.element_create("My.Scrollbar.trough", "from", "default")
scroll_style.element_create("My.Scrollbar.bg", "from", "default")
scroll_style.element_create("My.Scrollbar.activebackground", "from", "default")
# Redefine the horizontal scrollbar layout to use the custom trough.
# This one is appropriate for the 'vista' theme.
scroll_style.layout("My.TScrollbar",
[('My.Scrollbar.trough', {'children':
[('Horizontal.Scrollbar.leftarrow', {'side': 'left', 'sticky': ''}),
('Horizontal.Scrollbar.rightarrow', {'side': 'right', 'sticky': ''}),
('Horizontal.Scrollbar.thumb', {'unit': '1', 'children':
[('Horizontal.Scrollbar.grip', {'sticky': ''})],
'sticky': 'nswe'})],
'sticky': 'we'})])
# Copy original style configuration and add our new custom configuration option.
scroll_style.configure("My.TScrollbar", *scroll_style.configure("Horizontal.TScrollbar"))
scroll_style.configure("My.TScrollbar", troughcolor="black")
'''
#s.configure('Tab_Style', background='cyan')
try:
self.read_config()
except:
create_config(self.meringue_path)
self.read_config()
'''
self.pane = PanedWindow(self.n, orient=HORIZONTAL, opaqueresize=True)
ed = EditorClass(self.root, 'untitled')
ed.text.config(insertbackground='white')
ed.text.config(background=self.background)
ed.text.config(foreground=self.foreground)
#ed.vScrollbar.config(style="My.TScrollbar")
ed.text.tag_configure("highlight", background=self.highlight_background, foreground=self.highlight_foreground)
ed.text.tag_configure("keyword", foreground=self.highlight_keyword)
ed.text.tag_configure("function_name", foreground=self.highlight_function_name)
ed.text.tag_configure("function", foreground=self.highlight_function)
ed.text.tag_configure("boolean", foreground=self.highlight_boolean)
ed.text.tag_configure("string", foreground=self.highlight_string)
ed.text.tag_configure("number", foreground=self.highlight_number)
ed.text.tag_configure("operator", foreground=self.highlight_operator)
#ed.text.tag_configure('normal', foreground=self.highlight_normal)
ed.text.tag_configure('comment', foreground=self.highlight_comment)
ed.lnText.config(foreground=self.line_num_color)
ed.lnText.config(background=self.line_num_background_color)
self.pane.add(ed.frame)
self.eds.append(ed)
'''
ttk.Style().configure('TFrame', fieldbackground=self.background, background=self.background)
self.tree_frame = Frame(self.root, bg=self.background, width=200, height=10000)
#ttk.Style().configure('TFrame', fieldbackground=self.background, background=self.background)
#self.tree_frame = Frame(self.root, bg=self.background, width=200, height=10000)
self.bg_frame = Frame(self.tree_frame, width=200, height=10000, bg=self.background)
#self.display_frame = Frame(self.root, width=150, height=10000, bg=self.background)
self.tree = ttk.Treeview(self.tree_frame)
#self.tree["columns"]=("Files_and_Folders")
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
if os.name != 'nt':
self.tree.tag_configure('directory', background=self.background, foreground=self.dir_color)
self.tree.tag_configure('file', background=self.background, foreground=self.file_color)
ttk.Style().configure("Treeview", fieldbackground=self.background, background=self.background)
self.treeScroll = ttk.Scrollbar(self.tree_frame, orient=VERTICAL)
self.treeScroll.configure(command=self.tree.yview)
self.treeScroll.grid(row=0, column=1, rowspan=40, sticky=N+S)
self.tree.configure(xscrollcommand=self.treeScroll.set)
self.tree.bind("<3>", self.on_right_click)
self.tree.bind("<2>", self.on_right_click)
self.tree.bind("<Double-1>", self.on_double_click)
self.tree.grid(row=0, column=0, rowspan=40, sticky=N+S)
self.tree_frame.grid(row=0, column=0, rowspan=40, sticky=N+S)
#self.display_frame.pack(side=RIGHT, fill=Y, expand=0)
#self.pane.pack(fill='both', expand=1)
#self.n.add(self.pane, text='untitled')
self.n.bind("<Double-1>", self.tab_rename)
self.n.bind('<3>', self.close_tab_event)
self.n.bind('<2>', self.close_tab_event)
#self.n.bind("<1>", self.reset_display_text)
self.n.grid(row=0, column=1, rowspan=40, columnspan=60, sticky=N+S+E+W)
ttk.Style().configure("TNotebook", background=self.notebook_background)
#ttk.Style().configure("TPanedwindow", background=self.pane_color, foreground=self.notebook_foreground)
#self.tab_names.append('untitled')
filemenu = Menu(self.menubar, tearoff=0)
filemenu.add_command(label="Open", command=self.open_click)
filemenu.add_command(label="Open Folder", command=self.open_folder_click)
filemenu.add_command(label="Save", command=self.save_click)
filemenu.add_command(label="Close Tab", command=self.close_tab)
filemenu.add_separator()
filemenu.add_command(label='Open Project', command = self.open_project)
filemenu.add_command(label='Save Project', command = self.save_project)
filemenu.add_command(label='Manage Projects', command = self.manage_projects)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.exit_click)
self.menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(self.menubar, tearoff=0)
editmenu.add_command(label="Undo", command=self.undo_command)
editmenu.add_command(label="Redo", command=self.redo_command)
editmenu.add_separator()
editmenu.add_command(label="Change Editor Colors", command=self.color_config)
self.menubar.add_cascade(label="Edit", menu=editmenu)
viewmenu = Menu(self.menubar, tearoff=0)
viewmenu.add_command(label="Toggle Menubar", command=self.hide_show_menubar_command)
viewmenu.add_command(label="Toggle File Explorer", command=self.hide_show_tree_command)
self.menubar.add_cascade(label="View", menu=viewmenu)
#optionsmenu = Menu(self.menubar, tearoff=0)
#optionsmenu.add_command(label="Change Colors", command=self.color_config)
#self.menubar.add_cascade(label="Options", menu=optionsmenu)
helpmenu = Menu(self.menubar, tearoff=0)
helpmenu.add_command(label="About", command=self.open_about)
self.menubar.add_cascade(label="Help", menu=helpmenu)
#self.menubar.add_command(label="Close Tab", command=self.close_tab)
terminalmenu = Menu(self.menubar, tearoff=0)
terminalmenu.add_command(label="Local Terminal", command=self.open_terminal)
terminalmenu.add_command(label="Remote Terminal", command=self.open_remote_terminal)
self.menubar.add_cascade(label="Open Terminal", menu=terminalmenu)
remotemenu = Menu(self.menubar, tearoff=0)
remotemenu.add_command(label='Connect to Remote', command=self.ssh)
#remotemenu.add_command(label='Edit Directory', command=self.remote_folder_choose)
remotemenu.add_command(label="Open Explorer", command=self.paramiko_interface_open)
self.menubar.add_cascade(label="Remote Actions", menu=remotemenu)
#runmenu = Menu(self.menubar, tearoff=0)
runmenu = Menu(self.menubar, tearoff=0)
runmenu.add_command(label='Python 2', command=self.run_file_python_2)
runmenu.add_command(label='Python 3', command=self.run_file_python_3)
self.menubar.add_cascade(label="Run File", menu=runmenu)
#self.menubar.add_command(label="Open Terminal", command=self.open_terminal)
self.menubar.config(background=self.file_bar_color, foreground=self.file_bar_text_color)
self.root.configure(background=self.background)
self.root.title("meringue")
self.root.bind('<Control-s>', self.save_type)
self.root.bind('<Control-f>', self.find_type)
#self.root.bind('<Control-Shift-p>', self.git_commands)
self.root.bind('<Escape>', self.end_find)
#self.root.bind('<Control-r>', self.function_dialog)
self.root.bind('<Control-h>', self.ssh)
self.root.bind('<Alt_R>', self.hide_show_menubar);
self.root.bind('<Control-e>', self.hide_show_tree);
#self.root.bind("<Configure>", self.configure)
self.root['bg'] = 'black'
self.root.geometry('{}x{}'.format(600, 400))
self.root.config(menu=self.menubar)
if os.name == 'nt':
ttk.Style().theme_use('default')
for x in range(60):
Grid.columnconfigure(self.n, x, weight=1)
for y in range(30):
Grid.rowconfigure(self.n, y, weight=1)
for x in range(60):
Grid.columnconfigure(self.tree, x, weight=1)
for y in range(30):
Grid.rowconfigure(self.tree, y, weight=1)
for x in range(60):
Grid.columnconfigure(self.tree_frame, x, weight=1)
for y in range(30):
Grid.rowconfigure(self.tree_frame, y, weight=1)
for x in range(60):
Grid.columnconfigure(self.root, x, weight=1)
for y in range(30):
Grid.rowconfigure(self.root, y, weight=1)
self.hide_menubar = True;
self.hide_tree = True;
self.emptyMenu = Menu(self.root)
def open_about(self):
about_dialog(self)
def hide_show_menubar(self, event):
if self.hide_menubar:
self.root.config(menu=self.emptyMenu)
self.hide_menubar = False;
else:
self.root.config(menu=self.menubar)
self.hide_menubar = True
def hide_show_tree(self, event):
if self.hide_tree:
self.tree_frame.grid_forget()
self.hide_tree = False
else:
self.tree_frame.grid(row=0, column=0, rowspan=40, sticky=N+S)
self.hide_tree = True
def hide_show_menubar_command(self):
self.hide_show_menubar(None)
def hide_show_tree_command(self):
self.hide_show_tree(None)
def run_file_python_2(self):
index = self.n.tabs().index(self.n.select())
print(self.tab_names[index])
run_script_python_2(self.tab_names[index], self.root)
def run_file_python_3(self):
index = self.n.tabs().index(self.n.select())
print(self.tab_names[index])
run_script_python_3(self.tab_names[index], self.root)
def paramiko_interface_open(self):
Paramiko_Interface(self, self.username, self.password, self.ip, self.port)
def remote_folder_choose(self):
#We're going to store the directory tree here
self.remote_tree_array = []
#Let's ssh into the remote machine
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(self.ip, username=self.username, password=self.password, port=int(self.port))
#Capture the directory output
print('Running and capturing directories')
tkMessageBox.showwarning("SSH Connect", "Pulling the directory structure -- please wait")
stdin, stdout, stderr = ssh.exec_command('tree -f -i -l -d')
stdin.close()
#Extract the name of all of the directories from the tree and store them
for line in stdout.read().splitlines():
if ' -> ' in line:
self.remote_tree_array.append(line[:line.find(' -> ')])
else:
self.remote_tree_array.append(line)
#Elimiate the top directory as it is not needed
self.remote_tree_array = self.remote_tree_array[:-1]
#Go to letting the user select the directory that they want
rfc = remote_file_chooser(self, self, self.username, self.ip, self.password, ssh, int(self.port))
except:
#If something failed throw an error message
tkMessageBox.showwarning("SSH Connect", "Something failed -- Please try again")
ssh.close()
def open_remote_terminal(self):
self.current_directory = '.'
if sys.platform == "win32":
try:
#os.system('start python ' + self.meringue_path + 'paramiko_terminal.py "{}" {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
os.system('start python "' + self.meringue_path + 'paramiko_terminal.py" "{}" {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
except:
# try:
# os.system('start python2 paramiko_terminal.py {} {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
# except:
pass
if sys.platform == "darwin":
#try:
# os.system('open python paramiko_terminal.py {} {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
#except:
try:
os.system('open python2 "' + self.meringue_path + 'paramiko_terminal.py" "{}" {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
except:
pass
if sys.platform == "linux" or sys.platform == "linux2":
#try:
# os.system('xterm -hold -e python paramiko_terminal.py {} {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
#except:
try:
os.system('xterm -e python2 "' + self.meringue_path + 'paramiko_terminal.py" "{}" {} {} {} {}'.format(self.current_directory, self.ip, self.username, self.password, self.port))
except:
pass
def copy_file(self):
if len(self.tree.selection()) > 0:
item = self.tree.selection()[0]
self.copy_path = item
def paste_file(self):
if self.copy_path != '':
if len(self.tree.selection()) > 0:
item = self.tree.selection()[0]
dirs = [item+'/'+f for f in listdir(item) if not isfile(join(item, f))]
files = [item+'/'+f for f in listdir(item) if isfile(join(item, f))]
if not isfile(item):
f_name = self.copy_path[self.copy_path.rfind('/')+1:]
write_path = item+'/'+f_name
if isfile(self.copy_path):
counter = 1
temp_write_path = write_path
while temp_write_path in files:
temp_write_path = write_path+'.'+str(counter)
counter = counter + 1
write_path = temp_write_path
with open(write_path, 'w') as f_out:
with open(self.copy_path, 'r') as f_in:
text = f_in.read()
f_out.write(text)
else:
counter = 1
temp_write_path = write_path
while temp_write_path in dirs:
temp_write_path = write_path+'.'+str(counter)
counter = counter + 1
write_path = temp_write_path
self.recursive_paste(write_path)
copy_path = ''
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(write_path, write_path[write_path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
except:
print('not a file')
self.recursive_paste_sftp(write_path, sftp)
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
def recursive_paste(self, path):
os.mkdir(path)
dirs = [f for f in listdir(self.copy_path) if not isfile(join(self.copy_path, f))]
files = [f for f in listdir(self.copy_path) if isfile(join(self.copy_path, f))]
for f in files:
with open(path+'/'+f, 'w') as f_out:
with open(self.copy_path+'/'+f, 'r') as f_in:
text = f_in.read()
f_out.write(text)
for d in dirs:
self.recursive_paste(path+'/'+d)
def recursive_paste_sftp(self, path, sftp):
sftp.mkdir(path[path.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
dirs = [path+'/'+f for f in listdir(path) if not isfile(join(path, f))]
files = [path+'/'+f for f in listdir(path) if isfile(join(path, f))]
print(dirs)
print(files)
for f in files:
sftp.put(f, f[f.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):])
for d in dirs:
self.recursive_paste(d, sftp)
def read_config(self):
with open(self.meringue_path + '/data/meringue_config.ini', 'r') as f_in:
self.lines = f_in.read().split('\n')
self.foreground = self.lines[0].split('=')[1]
self.foreground = self.foreground[:7]
self.background = self.lines[1].split('=')[1]
self.background = self.background[:7]
self.file_color = self.lines[2].split('=')[1]
self.file_color = self.file_color[:7]
self.dir_color = self.lines[3].split('=')[1]
self.dir_color = self.dir_color[:7]
self.line_num_color = self.lines[4].split('=')[1]
self.line_num_color = self.line_num_color[:7]
self.line_num_background_color = self.lines[5].split('=')[1]
self.line_num_background_color = self.line_num_background_color[:7]
self.file_bar_color = self.lines[6].split('=')[1]
self.file_bar_color = self.file_bar_color[:7]
self.file_bar_text_color = self.lines[7].split('=')[1]
self.file_bar_text_color = self.file_bar_text_color[:7]
self.notebook_background = self.lines[8].split('=')[1]
self.notebook_background = self.notebook_background[:7]
self.highlight_foreground = self.lines[9].split('=')[1]
self.highlight_foreground = self.highlight_foreground[:7]
self.highlight_background = self.lines[10].split('=')[1]
self.highlight_background = self.highlight_background[:7]
self.token_keyword = self.lines[11].split('=')[1]
self.token_keyword = self.token_keyword[:7]
self.token_name = self.lines[12].split('=')[1]
self.token_name = self.token_name[:7]
self.token_literal = self.lines[13].split('=')[1]
self.token_literal = self.token_literal[:7]
self.token_string = self.lines[14].split('=')[1]
self.token_string = self.token_string[:7]
self.token_number = self.lines[15].split('=')[1]
self.token_number = self.token_number[:7]
self.token_operators = self.lines[16].split('=')[1]
self.token_operators = self.token_operators[:7]
self.token_punctuation = self.lines[17].split('=')[1]
self.token_punctuation = self.token_punctuation[:7]
self.token_comments = self.lines[18].split('=')[1]
self.token_comments = self.token_comments[:7]
self.token_generic = self.lines[19].split('=')[1]
self.token_generic = self.token_generic[:7]
self.folder = self.lines[20].split('=')[1]
if not self.folder:
self.folder = askdirectory()
self.lines[20] = self.lines[20][:self.lines[20].find('=')+1]+self.folder
self.write_config()
try:
os.chdir(self.folder)
except:
self.folder = askdirectory()
self.lines[20] = self.lines[20][:self.lines[20].find('=')+1]+self.folder
self.write_config()
os.chdir(self.folder)
def new_file(self):
nd = new_dialog(self.root, self)
def new_file_func(self, name):
item = self.tree.selection()[0]
if not isfile(item):
with open(item+'/'+name, 'w') as f_out:
f_out.write('')
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.put(item+'/'+name,item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]+'/'+name)
except:
print('Could not push for some reason')
else:
tkMessageBox.showwarning("File Creation", "Please select the parent folder for the new file and then try creating it again")
def new_folder(self):
nfd = new_folder_dialog(self.root, self)
def new_folder_func(self, name):
item = self.tree.selection()[0]
if not isfile(item):
#with open(item+'/'+name, 'w') as f_out:
# f_out.write('')
os.mkdir(item+'/'+name)
self.tree.delete(*self.tree.get_children())
self.tree = self.list_files('.', self.tree, "", '.')
self.tree.item(os.getcwd(), open=True)
if self.editing_pi:
transport = paramiko.Transport((self.ip, 22))
transport.connect(username=self.username, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.mkdir(item[item.find(self.meringue_path + '/local/') + len(self.meringue_path + '/local/'):]+'/'+name)
except:
print('Could not push for some reason')
else:
tkMessageBox.showwarning("File Creation", "Please select the parent folder for the new file and then try creating it again")
def color_config(self):
cc = change_color(self.root, self)
def make_directory_menu(self, w):
self.directory_menu = Menu(self.root, tearoff=0)
self.directory_menu.add_command(label="Delete", command=self.delete)
self.directory_menu.add_command(label="Rename", command=self.tree_rename)
self.directory_menu.add_command(label="Copy", command=self.copy_file)
self.directory_menu.add_command(label="Paste", command=self.paste_file)
self.directory_menu.add_command(label='New File', command=self.new_file)
self.directory_menu.add_command(label='New Folder', command=self.new_folder)
#self.directory.menu.add_command(label='Copy', command=self.copy_item)
#self.directory.menu.add_command(label='Paste', command=self.paste_item)
def write_config(self):
print('writing')
with open(self.meringue_path + '/data/meringue_config.ini', 'w') as f_out:
for line in self.lines:
f_out.write(line + '\n')
f_out.flush()
def __init__(self):
self.meringue_path = os.path.realpath(__file__)
if os.name == 'nt':
self.meringue_path = self.meringue_path[:self.meringue_path.rfind('\\') + 1]
else:
self.meringue_path = self.meringue_path[:self.meringue_path.rfind('/') + 1]
print(self.meringue_path)
sys.stdout.flush()
#os.chdir(os.path.join(os.path.expanduser('~'), 'Documents'))
self.root = Tk()
img = PhotoImage(file=self.meringue_path + 'icon.gif')
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
#self.root.iconbitmap(self.meringue_path + '/' + 'icon.gif')
self.eds = []
self.n = ttk.Notebook(self.root)
self.menubar = Menu(self.root)
self.tab_names = []
self.find_string = ''
self.find_counter = 0
self.copy_path = ''
self.selected_file_dir = ''
self.tree_array = []
self.remote_tree_array = []
self.remote_tree_file_array = []
self.editing_pi = False
self.username = ''
self.password = ''
self.ip = ''
self.port = 22
self.new_file_or_folder_name = ''
self.folder = ''
self.highligh_foreground = ''
self.highlight_background = ''
self.highlight_keyword = ''
self.highlight_function_name = ''
self.highlight_function = ''
self.highlight_boolean = ''
self.highlight_string = ''
self.highlight_number = ''
self.highlight_operator = ''
#self.highlight_normal = ''
self.foreground = ''
self.background = ''
self.start(1, 9999)
self.make_directory_menu(self.root)
self.jump_counter = 0
self.find_counter = 0
try:
if os.name == 'posix':
os.makedirs(self.meringue_path+'local')
else:
os.makedirs(self.meringue_path.replace('\\', '/')+'local')
except:
pass
if os.name == 'posix':
self.recursive_delete(self.meringue_path+'local')
else:
self.recursive_delete(self.meringue_path.replace('\\', '/')+'local')
self.sftp_stem = ''
mainloop()
def recursive_delete(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
if os.path.isdir(path):
self.recursive_delete(path)
else:
try:
os.remove(path)
except:
pass
try:
os.rmdir(path)
except:
pass
def __main__(self):
App()
def main():
App()
if __name__ == '__main__':
main()
|
the-stack_0_7230 | import random
import httpx
from utils.log import logger
'''
api返回格式为
字段名 数据类型 说明
pid int 作品 pid
p int 作品所在页
uid int 作者 uid
title string 作品标题
author string 作者名(入库时,并过滤掉 @ 及其后内容)
r18 boolean 是否 R18(在库中的分类,不等同于作品本身的 R18 标识)
width int 原图宽度 px
height int 原图高度 px
tags string[] 作品标签,包含标签的中文翻译(有的话)
ext string 图片扩展名
uploadDate number 作品上传日期;时间戳,单位为毫秒
urls object 包含了所有指定size的图片地址
'''
_url = "https://api.lolicon.app/setu/v2"
async def fetch_lolicon_random_img():
"""
从lolicon接口获取一张随机色图,并按照规范输出
"""
j = httpx.get(_url).json()
error = j['error']
logger.info(f'请求结果: {j}')
if len(error) > 0:
raise Exception(f'接口异常: {error}')
# 返回图片列表的随机一张图
data = j['data']
data_len = len(data)
if data_len == 0:
raise Exception(f'返回数据为空')
# 随机获取一张图片对象
random_idx = random.randint(0, data_len - 1)
logger.info(f'随机位置: {random_idx}\n图片列表: {data}')
item = data[random_idx]
return item['title'], item["author"], item["urls"]["original"] |
the-stack_0_7231 | import random
import string
from django.db import transaction
from django.db.models import fields
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from vbb_backend.users.models import Teacher, User, UserTypeEnum
def random_char(y):
return "".join(random.choice(string.ascii_letters) for x in range(y))
class TeacherUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
"first_name",
"last_name",
"date_of_birth",
"time_zone",
"initials",
"personal_email",
"phone",
"city",
"notes",
)
def validate(self, attrs):
attrs["user_type"] = UserTypeEnum.TEACHER.value
return super().validate(attrs)
class TeacherSerializer(serializers.ModelSerializer):
id = serializers.UUIDField(source="external_id", read_only=True)
user = TeacherUserSerializer(required=True)
class Meta:
model = Teacher
exclude = ("deleted", "external_id")
def validate(self, attrs):
user = attrs["user"]
with transaction.atomic():
if self.instance:
user_obj = self.instance.user
user = TeacherUserSerializer(user_obj, data=user)
user.is_valid(raise_exception=True)
instance = user.save()
attrs["user"] = instance
else:
user = TeacherUserSerializer(data=user)
user.is_valid(raise_exception=True)
instance = user.save(email=random_char(20) + "@vbb.com")
attrs["user"] = instance
return super().validate(attrs) |
the-stack_0_7232 | import math, logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..tokenization import WordTokenizer
class GRUEncoder(nn.Module):
def __init__(self,
token2id,
max_length=128,
hidden_size=230,
word_size=50,
blank_padding=True,
word2vec=None,
bidirectional=True,
dropout=0,
activation_function=F.tanh,
mask_entity=False):
"""
Args:
token2id: dictionary of token->idx mapping
max_length: max length of sentence
hidden_size: hidden size
word_size: size of word embedding
blank_padding: padding for RNN
word2vec: pretrained word2vec numpy
bidirectional: if it is a bidirectional RNN
activation_function: the activation function of RNN, tanh/relu
"""
# Hyperparameters
super(GRUEncoder, self).__init__()
self.token2id = token2id
self.max_length = max_length + 4 # 4 == take into account PIs
self.num_token = len(token2id)
self.num_position = max_length * 2
self.bidirectional = bidirectional
self.mask_entity = mask_entity
if word2vec is None:
self.word_size = word_size
else:
self.word_size = word2vec.shape[-1]
self.hidden_size = hidden_size
self.input_size = word_size
self.blank_padding = blank_padding
# Position Indicators (PI)
if not '<head>' in self.token2id:
self.token2id['<head>'] = len(self.token2id)
self.num_token += 1
if not '</head>' in self.token2id:
self.token2id['</head>'] = len(self.token2id)
self.num_token += 1
if not '<tail>' in self.token2id:
self.token2id['<tail>'] = len(self.token2id)
self.num_token += 1
if not '</tail>' in self.token2id:
self.token2id['</tail>'] = len(self.token2id)
self.num_token += 1
# add [UNK] and [PAD] tokens
if not '[UNK]' in self.token2id:
self.token2id['[UNK]'] = len(self.token2id)
self.num_token += 1
if not '[PAD]' in self.token2id:
self.token2id['[PAD]'] = len(self.token2id)
self.num_token += 1
# Word embedding
self.word_embedding = nn.Embedding(self.num_token, self.word_size)
if word2vec is not None:
logging.info("Initializing word embedding with word2vec.")
word2vec = torch.from_numpy(word2vec)
if self.num_token == len(word2vec) + 6: # 6 == <head>, </head>, <tail>, </tail>, [UNK], [PAD]
hsp = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
hep = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
tsp = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
tep = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
unk = torch.randn(1, self.word_size) / math.sqrt(self.word_size)
pad = torch.zeros(1, self.word_size)
self.word_embedding.weight.data.copy_(torch.cat([word2vec, hsp, hep, tsp, tep, unk, pad], 0))
else:
self.word_embedding.weight.data.copy_(word2vec)
self.tokenizer = WordTokenizer(vocab=self.token2id, unk_token="[UNK]")
self.drop = nn.Dropout(dropout)
self.act = activation_function
self.gru_fw = nn.GRU(self.input_size, self.hidden_size, batch_first=True)
if self.bidirectional:
self.gru_bw = nn.GRU(self.input_size, self.hidden_size, batch_first=True)
self.pool = nn.MaxPool1d(self.max_length)
def forward(self, token):
"""
Args:
token: (B, L), index of tokens
Return:
(B, H), representations for sentences
"""
# Check size of tensors
if len(token.size()) != 2:
raise Exception("Size of token should be (B, L)")
# Get non padding mask and sentence lengths (B,)
non_pad_mask, length = self.non_padding_mask(token)
x = self.word_embedding(token) # (B, L, EMBED)
out_fw, _ = self.gru_fw(x)
out = non_pad_mask * out_fw
if self.bidirectional:
x_bw = self.reverse_padded_sequence(x, length, batch_first=True)
out_bw, _ = self.gru_bw(x_bw)
out_bw = non_pad_mask * out_bw
out_bw = self.reverse_padded_sequence(out_bw, length, batch_first=True)
out = torch.add(out, out_bw) # (B, L, H)
out = out.transpose(1, 2) # (B, H, L)
out = self.pool(out).squeeze(-1) # (B, H)
out = self.act(out)
out = self.drop(out)
return out
def tokenize(self, item):
"""
Args:
item: input instance, including sentence, entity positions, etc.
Return:
index number of tokens and positions
"""
if 'text' in item:
sentence = item['text']
is_token = False
else:
sentence = item['token']
is_token = True
pos_head = item['h']['pos']
pos_tail = item['t']['pos']
# Sentence -> token
if not is_token:
if pos_head[0] > pos_tail[0]:
pos_min, pos_max = [pos_tail, pos_head]
rev = True
else:
pos_min, pos_max = [pos_head, pos_tail]
rev = False
sent_0 = self.tokenizer.tokenize(sentence[:pos_min[0]])
sent_1 = self.tokenizer.tokenize(sentence[pos_min[1]:pos_max[0]])
sent_2 = self.tokenizer.tokenize(sentence[pos_max[1]:])
ent_0 = self.tokenizer.tokenize(sentence[pos_min[0]:pos_min[1]])
ent_1 = self.tokenizer.tokenize(sentence[pos_max[0]:pos_max[1]])
if self.mask_entity:
ent_0 = ['[UNK]']
ent_1 = ['[UNK]']
if rev:
ent_0 = ['<tail>'] + ent_0 + ['</tail>']
ent_1 = ['<head>'] + ent_1 + ['</head>']
else:
ent_0 = ['<head>'] + ent_0 + ['</head>']
ent_1 = ['<tail>'] + ent_1 + ['</tail>']
tokens = sent_0 + ent_0 + sent_1 + ent_1 + sent_2
else:
tokens = sentence
# Token -> index
if self.blank_padding:
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokens, self.max_length, self.token2id['[PAD]'], self.token2id['[UNK]'])
else:
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokens, unk_id = self.token2id['[UNK]'])
if self.blank_padding:
indexed_tokens = indexed_tokens[:self.max_length]
indexed_tokens = torch.tensor(indexed_tokens).long().unsqueeze(0) # (1, L)
return indexed_tokens
def reverse_padded_sequence(self, x, lengths, batch_first=True):
"""Reverses sequences according to their lengths.
Inputs should have size ``T x B x *`` if ``batch_first`` is False, or
``B x T x *`` if True. T is the length of the longest sequence (or larger),
B is the batch size, and * is any number of dimensions (including 0).
Arguments:
x (tensor): padded batch of variable length sequences.
lengths (list[int]): list of sequence lengths
batch_first (bool, optional): if True, inputs should be B x T x *.
Returns:
A tensor with the same size as inputs, but with each sequence
reversed according to its length.
"""
if not batch_first:
x = x.transpose(0, 1)
if x.size(0) != len(lengths):
raise ValueError('inputs incompatible with lengths.')
reversed_indices = [list(range(x.size(1))) for _ in range(x.size(0))]
for i, length in enumerate(lengths):
if length > 0:
reversed_indices[i][:length] = reversed_indices[i][length - 1::-1]
reversed_indices = (torch.LongTensor(reversed_indices).unsqueeze(2).expand_as(x))
reversed_indices = reversed_indices.to(x.device)
reversed_x = torch.gather(x, 1, reversed_indices)
if not batch_first:
reversed_x = reversed_x.transpose(0, 1)
return reversed_x
def non_padding_mask(self, token):
non_pad_mask = token.ne(self.token2id['[PAD]']).type(torch.float)
length = torch.count_nonzero(non_pad_mask, dim=1)
return non_pad_mask.unsqueeze(-1), length
|
the-stack_0_7233 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import faulthandler
import logging
import os
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from typing import Callable, Iterator, Optional
import setproctitle
from pants.base.exiter import Exiter
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.osutil import Pid
logger = logging.getLogger(__name__)
class SignalHandler:
"""A specification for how to handle a fixed set of nonfatal signals.
This is subclassed and registered with ExceptionSink.reset_signal_handler() whenever the signal
handling behavior is modified for different pants processes, for example in the remote client when
pantsd is enabled. The default behavior is to exit "gracefully" by leaving a detailed log of which
signal was received, then exiting with failure.
Note that the terminal will convert a ctrl-c from the user into a SIGINT.
"""
@property
def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self._handle_sigint_if_enabled,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
}
def __init__(self):
self._ignore_sigint_lock = threading.Lock()
self._threads_ignoring_sigint = 0
self._ignoring_sigint_v2_engine = False
def _check_sigint_gate_is_correct(self):
assert self._threads_ignoring_sigint >= 0, \
"This should never happen, someone must have modified the counter outside of SignalHandler."
def _handle_sigint_if_enabled(self, signum, _frame):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
threads_ignoring_sigint = self._threads_ignoring_sigint
ignoring_sigint_v2_engine = self._ignoring_sigint_v2_engine
if threads_ignoring_sigint == 0 and not ignoring_sigint_v2_engine:
self.handle_sigint(signum, _frame)
def _toggle_ignoring_sigint_v2_engine(self, toggle: bool):
with self._ignore_sigint_lock:
self._ignoring_sigint_v2_engine = toggle
@contextmanager
def _ignoring_sigint(self):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
self._threads_ignoring_sigint += 1
try:
yield
finally:
with self._ignore_sigint_lock:
self._threads_ignoring_sigint -= 1
self._check_sigint_gate_is_correct()
def handle_sigint(self, signum, _frame):
raise KeyboardInterrupt('User interrupted execution with control-c!')
# TODO(#7406): figure out how to let sys.exit work in a signal handler instead of having to raise
# this exception!
class SignalHandledNonLocalExit(Exception):
"""Raised in handlers for non-fatal signals to overcome Python limitations.
When waiting on a subprocess and in a signal handler, sys.exit appears to be ignored, and causes
the signal handler to return. We want to (eventually) exit after these signals, not ignore them,
so we raise this exception instead and check it in our sys.excepthook override.
"""
def __init__(self, signum, signame):
self.signum = signum
self.signame = signame
self.traceback_lines = traceback.format_stack()
super(SignalHandler.SignalHandledNonLocalExit, self).__init__()
def handle_sigquit(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, 'SIGQUIT')
def handle_sigterm(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, 'SIGTERM')
class ExceptionSink:
"""A mutable singleton object representing where exceptions should be logged to."""
# NB: see the bottom of this file where we call reset_log_location() and other mutators in order
# to properly setup global state.
_log_dir = None
# We need an exiter in order to know what to do after we log a fatal exception or handle a
# catchable signal.
_exiter: Optional[Exiter] = None
# Where to log stacktraces to in a SIGUSR2 handler.
_interactive_output_stream = None
# Whether to print a stacktrace in any fatal error message printed to the terminal.
_should_print_backtrace_to_terminal = True
# An instance of `SignalHandler` which is invoked to handle a static set of specific
# nonfatal signals (these signal handlers are allowed to make pants exit, but unlike SIGSEGV they
# don't need to exit immediately).
_signal_handler: Optional[SignalHandler] = None
# These persistent open file descriptors are kept so the signal handler can do almost no work
# (and lets faulthandler figure out signal safety).
_pid_specific_error_fileobj = None
_shared_error_fileobj = None
def __new__(cls, *args, **kwargs):
raise TypeError('Instances of {} are not allowed to be constructed!'
.format(cls.__name__))
class ExceptionSinkError(Exception): pass
@classmethod
def reset_should_print_backtrace_to_terminal(cls, should_print_backtrace):
"""Set whether a backtrace gets printed to the terminal error stream on a fatal error.
Class state:
- Overwrites `cls._should_print_backtrace_to_terminal`.
"""
cls._should_print_backtrace_to_terminal = should_print_backtrace
# All reset_* methods are ~idempotent!
@classmethod
def reset_log_location(cls, new_log_location):
"""Re-acquire file handles to error logs based in the new location.
Class state:
- Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and
`cls._shared_error_fileobj`.
OS state:
- May create a new directory.
- Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2).
:raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not
writable.
"""
# We could no-op here if the log locations are the same, but there's no reason not to have the
# additional safety of re-acquiring file descriptors each time (and erroring out early if the
# location is no longer writable).
# Create the directory if possible, or raise if not writable.
cls._check_or_create_new_destination(new_log_location)
pid_specific_error_stream, shared_error_stream = cls._recapture_fatal_error_log_streams(
new_log_location)
# NB: mutate process-global state!
if faulthandler.is_enabled():
logger.debug('re-enabling faulthandler')
# Call Py_CLEAR() on the previous error stream:
# https://github.com/vstinner/faulthandler/blob/master/faulthandler.c
faulthandler.disable()
# Send a stacktrace to this file if interrupted by a fatal error.
faulthandler.enable(file=pid_specific_error_stream, all_threads=True)
# NB: mutate the class variables!
cls._log_dir = new_log_location
cls._pid_specific_error_fileobj = pid_specific_error_stream
cls._shared_error_fileobj = shared_error_stream
class AccessGlobalExiterMixin:
@property
def _exiter(self) -> Optional[Exiter]:
return ExceptionSink.get_global_exiter()
@classmethod
def get_global_exiter(cls) -> Optional[Exiter]:
return cls._exiter
@classmethod
@contextmanager
def exiter_as(cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]) -> Iterator[None]:
"""Temporarily override the global exiter.
NB: We don't want to try/finally here, because we want exceptions to propagate
with the most recent exiter installed in sys.excepthook.
If we wrap this in a try:finally, exceptions will be caught and exiters unset.
"""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
cls._reset_exiter(new_exiter)
yield
cls._reset_exiter(previous_exiter)
@classmethod
@contextmanager
def exiter_as_until_exception(cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]) -> Iterator[None]:
"""Temporarily override the global exiter, except this will unset it when an exception happens."""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
try:
cls._reset_exiter(new_exiter)
yield
finally:
cls._reset_exiter(previous_exiter)
@classmethod
def _reset_exiter(cls, exiter: Optional[Exiter]) -> None:
"""
Class state:
- Overwrites `cls._exiter`.
Python state:
- Overwrites sys.excepthook.
"""
assert(isinstance(exiter, Exiter))
logger.debug(f"overriding the global exiter with {exiter} (from {cls._exiter})")
# NB: mutate the class variables! This is done before mutating the exception hook, because the
# uncaught exception handler uses cls._exiter to exit.
cls._exiter = exiter
# NB: mutate process-global state!
sys.excepthook = cls._log_unhandled_exception_and_exit
@classmethod
def reset_interactive_output_stream(
cls,
interactive_output_stream,
override_faulthandler_destination=True
):
"""
Class state:
- Overwrites `cls._interactive_output_stream`.
OS state:
- Overwrites the SIGUSR2 handler.
This method registers a SIGUSR2 handler, which permits a non-fatal `kill -31 <pants pid>` for
stacktrace retrieval. This is also where the the error message on fatal exit will be printed to.
"""
try:
# NB: mutate process-global state!
# This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
if override_faulthandler_destination:
faulthandler.register(signal.SIGUSR2, interactive_output_stream,
all_threads=True, chain=False)
# NB: mutate the class variables!
cls._interactive_output_stream = interactive_output_stream
except ValueError:
# Warn about "ValueError: IO on closed file" when the stream is closed.
cls.log_exception(
"Cannot reset interactive_output_stream -- stream (probably stderr) is closed")
@classmethod
def exceptions_log_path(cls, for_pid=None, in_dir=None):
"""Get the path to either the shared or pid-specific fatal errors log file."""
if for_pid is None:
intermediate_filename_component = ''
else:
assert(isinstance(for_pid, Pid))
intermediate_filename_component = '.{}'.format(for_pid)
in_dir = in_dir or cls._log_dir
return os.path.join(
in_dir,
'logs',
'exceptions{}.log'.format(intermediate_filename_component))
@classmethod
def log_exception(cls, msg):
"""Try to log an error message to this process's error log and the shared error log.
NB: Doesn't raise (logs an error instead).
"""
pid = os.getpid()
fatal_error_log_entry = cls._format_exception_message(msg, pid)
# We care more about this log than the shared log, so write to it first.
try:
cls._try_write_with_flush(cls._pid_specific_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the pid-specific file handle for {} at pid {}:\n{}"
.format(msg, cls._log_dir, pid, e))
# Write to the shared log.
try:
# TODO: we should probably guard this against concurrent modification by other pants
# subprocesses somehow.
cls._try_write_with_flush(cls._shared_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the shared file handle for {} at pid {}:\n{}"
.format(msg, cls._log_dir, pid, e))
@classmethod
def _try_write_with_flush(cls, fileobj, payload):
"""This method is here so that it can be patched to simulate write errors.
This is because mock can't patch primitive objects like file objects.
"""
fileobj.write(payload)
fileobj.flush()
@classmethod
def _check_or_create_new_destination(cls, destination):
try:
safe_mkdir(destination)
except Exception as e:
raise cls.ExceptionSinkError(
"The provided exception sink path at '{}' is not writable or could not be created: {}."
.format(destination, str(e)),
e)
@classmethod
def _recapture_fatal_error_log_streams(cls, new_log_location):
# NB: We do not close old file descriptors under the assumption their lifetimes are managed
# elsewhere.
# We recapture both log streams each time.
pid = os.getpid()
pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location)
shared_log_path = cls.exceptions_log_path(in_dir=new_log_location)
assert(pid_specific_log_path != shared_log_path)
try:
# Truncate the pid-specific error log file.
pid_specific_error_stream = safe_open(pid_specific_log_path, mode='w')
# Append to the shared error file.
shared_error_stream = safe_open(shared_log_path, mode='a')
except Exception as e:
raise cls.ExceptionSinkError(
"Error opening fatal error log streams for log location '{}': {}"
.format(new_log_location, str(e)))
return (pid_specific_error_stream, shared_error_stream)
@classmethod
def reset_signal_handler(cls, signal_handler):
"""
Class state:
- Overwrites `cls._signal_handler`.
OS state:
- Overwrites signal handlers for SIGINT, SIGQUIT, and SIGTERM.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
:returns: The :class:`SignalHandler` that was previously registered, or None if this is
the first time this method was called.
"""
assert(isinstance(signal_handler, SignalHandler))
# NB: Modify process-global state!
for signum, handler in signal_handler.signal_handler_mapping.items():
signal.signal(signum, handler)
# Retry any system calls interrupted by any of the signals we just installed handlers for
# (instead of having them raise EINTR). siginterrupt(3) says this is the default behavior on
# Linux and OSX.
signal.siginterrupt(signum, False)
previous_signal_handler = cls._signal_handler
# NB: Mutate the class variables!
cls._signal_handler = signal_handler
return previous_signal_handler
@classmethod
@contextmanager
def trapped_signals(cls, new_signal_handler):
"""
A contextmanager which temporarily overrides signal handling.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
"""
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
try:
yield
finally:
cls.reset_signal_handler(previous_signal_handler)
@classmethod
@contextmanager
def ignoring_sigint(cls):
"""
A contextmanager which disables handling sigint in the current signal handler.
This allows threads that are not the main thread to ignore sigint.
NB: Only use this if you can't use ExceptionSink.trapped_signals().
Class state:
- Toggles `self._ignore_sigint` in `cls._signal_handler`.
"""
with cls._signal_handler._ignoring_sigint():
yield
@classmethod
def toggle_ignoring_sigint_v2_engine(cls, toggle: bool) -> None:
assert cls._signal_handler is not None
cls._signal_handler._toggle_ignoring_sigint_v2_engine(toggle)
@classmethod
def _iso_timestamp_for_now(cls):
return datetime.datetime.now().isoformat()
# NB: This includes a trailing newline, but no leading newline.
_EXCEPTION_LOG_FORMAT = """\
timestamp: {timestamp}
process title: {process_title}
sys.argv: {args}
pid: {pid}
{message}
"""
@classmethod
def _format_exception_message(cls, msg, pid):
return cls._EXCEPTION_LOG_FORMAT.format(
timestamp=cls._iso_timestamp_for_now(),
process_title=setproctitle.getproctitle(),
args=sys.argv,
pid=pid,
message=msg)
_traceback_omitted_default_text = '(backtrace omitted)'
@classmethod
def _format_traceback(cls, traceback_lines, should_print_backtrace):
if should_print_backtrace:
traceback_string = '\n{}'.format(''.join(traceback_lines))
else:
traceback_string = ' {}'.format(cls._traceback_omitted_default_text)
return traceback_string
_UNHANDLED_EXCEPTION_LOG_FORMAT = """\
Exception caught: ({exception_type}){backtrace}
Exception message: {exception_message}{maybe_newline}
"""
@classmethod
def _format_unhandled_exception_log(cls, exc, tb, add_newline, should_print_backtrace):
exc_type = type(exc)
exception_full_name = '{}.{}'.format(exc_type.__module__, exc_type.__name__)
exception_message = str(exc) if exc else '(no message)'
maybe_newline = '\n' if add_newline else ''
return cls._UNHANDLED_EXCEPTION_LOG_FORMAT.format(
exception_type=exception_full_name,
backtrace=cls._format_traceback(traceback_lines=traceback.format_tb(tb),
should_print_backtrace=should_print_backtrace),
exception_message=exception_message,
maybe_newline=maybe_newline)
_EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT = """\
{timestamp_msg}{terminal_msg}{details_msg}
"""
@classmethod
def _exit_with_failure(cls, terminal_msg):
timestamp_msg = (f'timestamp: {cls._iso_timestamp_for_now()}\n'
if cls._should_print_backtrace_to_terminal else '')
details_msg = ('' if cls._should_print_backtrace_to_terminal
else '\n\n(Use --print-exception-stacktrace to see more error details.)')
terminal_msg = terminal_msg or '<no exit reason provided>'
formatted_terminal_msg = cls._EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT.format(
timestamp_msg=timestamp_msg, terminal_msg=terminal_msg, details_msg=details_msg)
# Exit with failure, printing a message to the terminal (or whatever the interactive stream is).
cls._exiter.exit_and_fail(msg=formatted_terminal_msg, out=cls._interactive_output_stream)
@classmethod
def _log_unhandled_exception_and_exit(cls, exc_class=None, exc=None, tb=None, add_newline=False):
"""A sys.excepthook implementation which logs the error and exits with failure."""
exc_class = exc_class or sys.exc_info()[0]
exc = exc or sys.exc_info()[1]
tb = tb or sys.exc_info()[2]
# This exception was raised by a signal handler with the intent to exit the program.
if exc_class == SignalHandler.SignalHandledNonLocalExit:
return cls._handle_signal_gracefully(exc.signum, exc.signame, exc.traceback_lines)
extra_err_msg = None
try:
# Always output the unhandled exception details into a log file, including the traceback.
exception_log_entry = cls._format_unhandled_exception_log(exc, tb, add_newline,
should_print_backtrace=True)
cls.log_exception(exception_log_entry)
except Exception as e:
extra_err_msg = 'Additional error logging unhandled exception {}: {}'.format(exc, e)
logger.error(extra_err_msg)
# Generate an unhandled exception report fit to be printed to the terminal (respecting the
# Exiter's should_print_backtrace field).
if cls._should_print_backtrace_to_terminal:
stderr_printed_error = cls._format_unhandled_exception_log(
exc, tb, add_newline,
should_print_backtrace=cls._should_print_backtrace_to_terminal)
if extra_err_msg:
stderr_printed_error = '{}\n{}'.format(stderr_printed_error, extra_err_msg)
else:
# If the user didn't ask for a backtrace, show a succinct error message without
# all the exception-related preamble. A power-user/pants developer can still
# get all the preamble info along with the backtrace, but the end user shouldn't
# see that boilerplate by default.
error_msgs = getattr(exc, 'end_user_messages', lambda: [str(exc)])()
stderr_printed_error = '\n' + '\n'.join(f'ERROR: {msg}' for msg in error_msgs)
cls._exit_with_failure(stderr_printed_error)
_CATCHABLE_SIGNAL_ERROR_LOG_FORMAT = """\
Signal {signum} ({signame}) was raised. Exiting with failure.{formatted_traceback}
"""
@classmethod
def _handle_signal_gracefully(cls, signum, signame, traceback_lines):
"""Signal handler for non-fatal signals which raises or logs an error and exits with failure."""
# Extract the stack, and format an entry to be written to the exception log.
formatted_traceback = cls._format_traceback(traceback_lines=traceback_lines,
should_print_backtrace=True)
signal_error_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum,
signame=signame,
formatted_traceback=formatted_traceback)
# TODO: determine the appropriate signal-safe behavior here (to avoid writing to our file
# descriptors re-entrantly, which raises an IOError).
# This method catches any exceptions raised within it.
cls.log_exception(signal_error_log_entry)
# Create a potentially-abbreviated traceback for the terminal or other interactive stream.
formatted_traceback_for_terminal = cls._format_traceback(
traceback_lines=traceback_lines,
should_print_backtrace=cls._should_print_backtrace_to_terminal)
terminal_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum,
signame=signame,
formatted_traceback=formatted_traceback_for_terminal)
# Exit, printing the output to the terminal.
cls._exit_with_failure(terminal_log_entry)
# Setup global state such as signal handlers and sys.excepthook with probably-safe values at module
# import time.
# Set the log location for writing logs before bootstrap options are parsed.
ExceptionSink.reset_log_location(os.getcwd())
# Sets except hook for exceptions at import time.
ExceptionSink._reset_exiter(Exiter(exiter=sys.exit))
# Sets a SIGUSR2 handler.
ExceptionSink.reset_interactive_output_stream(sys.stderr.buffer)
# Sets a handler that logs nonfatal signals to the exception sink before exiting.
ExceptionSink.reset_signal_handler(SignalHandler())
# Set whether to print stacktraces on exceptions or signals during import time.
# NB: This will be overridden by bootstrap options in PantsRunner, so we avoid printing out a full
# stacktrace when a user presses control-c during import time unless the environment variable is set
# to explicitly request it. The exception log will have any stacktraces regardless so this should
# not hamper debugging.
ExceptionSink.reset_should_print_backtrace_to_terminal(
should_print_backtrace=os.environ.get('PANTS_PRINT_EXCEPTION_STACKTRACE', 'True') == 'True')
|
the-stack_0_7234 | import pytest
from world.layer import Layer
from data import TileType
@pytest.fixture(name="tilemap")
def _tilemap(origin, a, b):
layer = Layer()
layer[origin] = TileType.GROUND
layer[a] = TileType.SPACE
layer[b] = TileType.WATER
return layer
def test_serializable(tilemap, origin, a, b):
data = tilemap.json
print(data)
assert data["__TYPE__"] == "Layer"
assert data["0,0,0"] == 1
assert data["1,2,-3"] == 0
assert data["-4,1,3"] == 2
reserialized = Layer.load(data)
assert tilemap[origin] == reserialized[origin]
assert tilemap[a] == reserialized[a]
assert tilemap[b] == reserialized[b]
assert tilemap[a] == TileType.SPACE
assert tilemap[b] == TileType.WATER
|
the-stack_0_7236 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import urlparse
from telemetry.core import exceptions
from telemetry.internal.actions.drag import DragAction
from telemetry.internal.actions.javascript_click import ClickElementAction
from telemetry.internal.actions.key_event import KeyPressAction
from telemetry.internal.actions.load_media import LoadMediaAction
from telemetry.internal.actions.loop import LoopAction
from telemetry.internal.actions.mouse_click import MouseClickAction
from telemetry.internal.actions.navigate import NavigateAction
from telemetry.internal.actions.page_action import GESTURE_SOURCE_DEFAULT
from telemetry.internal.actions.page_action import SUPPORTED_GESTURE_SOURCES
from telemetry.internal.actions.pinch import PinchAction
from telemetry.internal.actions.play import PlayAction
from telemetry.internal.actions.repaint_continuously import (
RepaintContinuouslyAction)
from telemetry.internal.actions.repeatable_scroll import RepeatableScrollAction
from telemetry.internal.actions.scroll import ScrollAction
from telemetry.internal.actions.scroll_bounce import ScrollBounceAction
from telemetry.internal.actions.scroll_to_element import ScrollToElementAction
from telemetry.internal.actions.seek import SeekAction
from telemetry.internal.actions.swipe import SwipeAction
from telemetry.internal.actions.tap import TapAction
from telemetry.internal.actions.wait import WaitForElementAction
from telemetry.web_perf import timeline_interaction_record
from py_trace_event import trace_event
import py_utils
_DUMP_WAIT_TIME = 3
class ActionRunner(object):
__metaclass__ = trace_event.TracedMetaClass
def __init__(self, tab, skip_waits=False):
self._tab = tab
self._skip_waits = skip_waits
@property
def tab(self):
"""Returns the tab on which actions are performed."""
return self._tab
def _RunAction(self, action):
action.WillRunAction(self._tab)
action.RunAction(self._tab)
def CreateInteraction(self, label, repeatable=False):
""" Create an action.Interaction object that issues interaction record.
An interaction record is a labeled time period containing
interaction that developers care about. Each set of metrics
specified in flags will be calculated for this time period.
To mark the start of interaction record, call Begin() method on the returned
object. To mark the finish of interaction record, call End() method on
it. Or better yet, use the with statement to create an
interaction record that covers the actions in the with block.
e.g:
with action_runner.CreateInteraction('Animation-1'):
action_runner.TapElement(...)
action_runner.WaitForJavaScriptCondition(...)
Args:
label: A label for this particular interaction. This can be any
user-defined string, but must not contain '/'.
repeatable: Whether other interactions may use the same logical name
as this interaction. All interactions with the same logical name must
have the same flags.
Returns:
An instance of action_runner.Interaction
"""
flags = []
if repeatable:
flags.append(timeline_interaction_record.REPEATABLE)
return Interaction(self, label, flags)
def CreateGestureInteraction(self, label, repeatable=False):
""" Create an action.Interaction object that issues gesture-based
interaction record.
This is similar to normal interaction record, but it will
auto-narrow the interaction time period to only include the
synthetic gesture event output by Chrome. This is typically use to
reduce noise in gesture-based analysis (e.g., analysis for a
swipe/scroll).
The interaction record label will be prepended with 'Gesture_'.
e.g:
with action_runner.CreateGestureInteraction('Scroll-1'):
action_runner.ScrollPage()
Args:
label: A label for this particular interaction. This can be any
user-defined string, but must not contain '/'.
repeatable: Whether other interactions may use the same logical name
as this interaction. All interactions with the same logical name must
have the same flags.
Returns:
An instance of action_runner.Interaction
"""
return self.CreateInteraction('Gesture_' + label, repeatable)
def WaitForNetworkQuiescence(self, timeout_in_seconds=10):
""" Wait for network quiesence on the page.
Args:
timeout_in_seconds: maximum amount of time (seconds) to wait for network
quiesence unil raising exception.
Raises:
py_utils.TimeoutException when the timeout is reached but the page's
network is not quiet.
"""
py_utils.WaitFor(self.tab.HasReachedQuiescence, timeout_in_seconds)
def MeasureMemory(self, deterministic_mode=False):
"""Add a memory measurement to the trace being recorded.
Behaves as a no-op if tracing is not enabled.
TODO(perezju): Also behave as a no-op if tracing is enabled but
memory-infra is not.
Args:
deterministic_mode: A boolean indicating whether to attempt or not to
control the environment (force GCs, clear caches) before making the
measurement in an attempt to obtain more deterministic results.
Returns:
GUID of the generated dump if one was triggered, None otherwise.
"""
if not self.tab.browser.platform.tracing_controller.is_tracing_running:
logging.warning('Tracing is off. No memory dumps are being recorded.')
return None
if deterministic_mode:
self.Wait(_DUMP_WAIT_TIME)
self.ForceGarbageCollection()
self.Wait(_DUMP_WAIT_TIME)
dump_id = self.tab.browser.DumpMemory()
if not dump_id:
raise exceptions.StoryActionError('Unable to obtain memory dump')
return dump_id
def Navigate(self, url, script_to_evaluate_on_commit=None,
timeout_in_seconds=60):
"""Navigates to |url|.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
if urlparse.urlparse(url).scheme == 'file':
url = self._tab.browser.platform.http_server.UrlOf(url[7:])
self._RunAction(NavigateAction(
url=url,
script_to_evaluate_on_commit=script_to_evaluate_on_commit,
timeout_in_seconds=timeout_in_seconds))
def NavigateBack(self):
""" Navigate back to the previous page."""
self.ExecuteJavaScript('window.history.back()')
def WaitForNavigate(self, timeout_in_seconds_seconds=60):
start_time = time.time()
self._tab.WaitForNavigate(timeout_in_seconds_seconds)
time_left_in_seconds = (start_time + timeout_in_seconds_seconds
- time.time())
time_left_in_seconds = max(0, time_left_in_seconds)
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter(
time_left_in_seconds)
def ReloadPage(self):
"""Reloads the page."""
self._tab.ExecuteJavaScript('window.location.reload()')
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
def ExecuteJavaScript(self, *args, **kwargs):
"""Executes a given JavaScript statement. Does not return the result.
Example: runner.ExecuteJavaScript('var foo = {{ value }};', value='hi');
Args:
statement: The statement to execute (provided as a string).
Optional keyword args:
timeout: The number of seconds to wait for the statement to execute.
Additional keyword arguments provide values to be interpolated within
the statement. See telemetry.util.js_template for details.
Raises:
EvaluationException: The statement failed to execute.
"""
return self._tab.ExecuteJavaScript(*args, **kwargs)
def EvaluateJavaScript(self, *args, **kwargs):
"""Returns the result of evaluating a given JavaScript expression.
The evaluation results must be convertible to JSON. If the result
is not needed, use ExecuteJavaScript instead.
Example: runner.ExecuteJavaScript('document.location.href');
Args:
expression: The expression to execute (provided as a string).
Optional keyword args:
timeout: The number of seconds to wait for the expression to evaluate.
Additional keyword arguments provide values to be interpolated within
the expression. See telemetry.util.js_template for details.
Raises:
EvaluationException: The statement expression failed to execute
or the evaluation result can not be JSON-ized.
"""
return self._tab.EvaluateJavaScript(*args, **kwargs)
def WaitForJavaScriptCondition(self, *args, **kwargs):
"""Wait for a JavaScript condition to become true.
Example: runner.WaitForJavaScriptCondition('window.foo == 10');
Args:
condition: The JavaScript condition (provided as string).
Optional keyword args:
timeout: The number in seconds to wait for the condition to become
True (default to 60).
Additional keyword arguments provide values to be interpolated within
the expression. See telemetry.util.js_template for details.
"""
return self._tab.WaitForJavaScriptCondition(*args, **kwargs)
def Wait(self, seconds):
"""Wait for the number of seconds specified.
Args:
seconds: The number of seconds to wait.
"""
if not self._skip_waits:
time.sleep(seconds)
def WaitForElement(self, selector=None, text=None, element_function=None,
timeout_in_seconds=60):
"""Wait for an element to appear in the document.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
timeout_in_seconds: The timeout in seconds (default to 60).
"""
self._RunAction(WaitForElementAction(
selector=selector, text=text, element_function=element_function,
timeout_in_seconds=timeout_in_seconds))
def TapElement(self, selector=None, text=None, element_function=None):
"""Tap an element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
"""
self._RunAction(TapAction(
selector=selector, text=text, element_function=element_function))
def ClickElement(self, selector=None, text=None, element_function=None):
"""Click an element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
"""
self._RunAction(ClickElementAction(
selector=selector, text=text, element_function=element_function))
def DragPage(self, left_start_ratio, top_start_ratio, left_end_ratio,
top_end_ratio, speed_in_pixels_per_second=800, use_touch=False,
selector=None, text=None, element_function=None):
"""Perform a drag gesture on the page.
You should specify a start and an end point in ratios of page width and
height (see drag.js for full implementation).
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
left_end_ratio: The horizontal ending coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_end_ratio: The vertical ending coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether dragging should be done with touch input.
"""
self._RunAction(DragAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
left_end_ratio=left_end_ratio, top_end_ratio=top_end_ratio,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, selector=selector, text=text,
element_function=element_function))
def PinchPage(self, left_anchor_ratio=0.5, top_anchor_ratio=0.5,
scale_factor=None, speed_in_pixels_per_second=800):
"""Perform the pinch gesture on the page.
It computes the pinch gesture automatically based on the anchor
coordinate and the scale factor. The scale factor is the ratio of
of the final span and the initial span of the gesture.
Args:
left_anchor_ratio: The horizontal pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_anchor_ratio: The vertical pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
scale_factor: The ratio of the final span to the initial span.
The default scale factor is
3.0 / (window.outerWidth/window.innerWidth).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(PinchAction(
left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def PinchElement(self, selector=None, text=None, element_function=None,
left_anchor_ratio=0.5, top_anchor_ratio=0.5,
scale_factor=None, speed_in_pixels_per_second=800):
"""Perform the pinch gesture on an element.
It computes the pinch gesture automatically based on the anchor
coordinate and the scale factor. The scale factor is the ratio of
of the final span and the initial span of the gesture.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_anchor_ratio: The horizontal pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_anchor_ratio: The vertical pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
scale_factor: The ratio of the final span to the initial span.
The default scale factor is
3.0 / (window.outerWidth/window.innerWidth).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(PinchAction(
selector=selector, text=text, element_function=element_function,
left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def ScrollPage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=None, distance_expr=None,
speed_in_pixels_per_second=800, use_touch=False,
synthetic_gesture_source=GESTURE_SOURCE_DEFAULT):
"""Perform scroll gesture on the page.
You may specify distance or distance_expr, but not both. If
neither is specified, the default scroll distance is variable
depending on direction (see scroll.js for full implementation).
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
distance_expr: A JavaScript expression (as string) that can be
evaluated to compute scroll distance. Example:
'window.scrollTop' or '(function() { return crazyMath(); })()'.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether scrolling should be done with touch input.
synthetic_gesture_source: the source input device type for the
synthetic gesture: 'DEFAULT', 'TOUCH' or 'MOUSE'.
"""
assert synthetic_gesture_source in SUPPORTED_GESTURE_SOURCES
self._RunAction(ScrollAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance, distance_expr=distance_expr,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, synthetic_gesture_source=synthetic_gesture_source))
def ScrollPageToElement(self, selector=None, element_function=None,
container_selector=None,
container_element_function=None,
speed_in_pixels_per_second=800):
"""Perform scroll gesture on container until an element is in view.
Both the element and the container can be specified by a CSS selector
xor a JavaScript function, provided as a string, which returns an element.
The element is required so exactly one of selector and element_function
must be provided. The container is optional so at most one of
container_selector and container_element_function can be provided.
The container defaults to document.scrollingElement or document.body if
scrollingElement is not set.
Args:
selector: A CSS selector describing the element.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
container_selector: A CSS selector describing the container element.
container_element_function: A JavaScript function (as a string) that is
used to retrieve the container element.
speed_in_pixels_per_second: Speed to scroll.
"""
self._RunAction(ScrollToElementAction(
selector=selector, element_function=element_function,
container_selector=container_selector,
container_element_function=container_element_function,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def RepeatableBrowserDrivenScroll(self, x_scroll_distance_ratio=0.0,
y_scroll_distance_ratio=0.5,
repeat_count=0,
repeat_delay_ms=250,
timeout=60,
prevent_fling=None,
speed=None):
"""Perform a browser driven repeatable scroll gesture.
The scroll gesture is driven from the browser, this is useful because the
main thread often isn't resposive but the browser process usually is, so the
delay between the scroll gestures should be consistent.
Args:
x_scroll_distance_ratio: The horizontal length of the scroll as a fraction
of the screen width.
y_scroll_distance_ratio: The vertical length of the scroll as a fraction
of the screen height.
repeat_count: The number of additional times to repeat the gesture.
repeat_delay_ms: The delay in milliseconds between each scroll gesture.
prevent_fling: Prevents a fling gesture.
speed: Swipe speed in pixels per second.
"""
self._RunAction(RepeatableScrollAction(
x_scroll_distance_ratio=x_scroll_distance_ratio,
y_scroll_distance_ratio=y_scroll_distance_ratio,
repeat_count=repeat_count,
repeat_delay_ms=repeat_delay_ms, timeout=timeout,
prevent_fling=prevent_fling, speed=speed))
def ScrollElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=None, distance_expr=None,
speed_in_pixels_per_second=800, use_touch=False,
synthetic_gesture_source=GESTURE_SOURCE_DEFAULT):
"""Perform scroll gesture on the element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
You may specify distance or distance_expr, but not both. If
neither is specified, the default scroll distance is variable
depending on direction (see scroll.js for full implementation).
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
distance_expr: A JavaScript expression (as string) that can be
evaluated to compute scroll distance. Example:
'window.scrollTop' or '(function() { return crazyMath(); })()'.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether scrolling should be done with touch input.
synthetic_gesture_source: the source input device type for the
synthetic gesture: 'DEFAULT', 'TOUCH' or 'MOUSE'.
"""
assert synthetic_gesture_source in SUPPORTED_GESTURE_SOURCES
self._RunAction(ScrollAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance, distance_expr=distance_expr,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, synthetic_gesture_source=synthetic_gesture_source))
def ScrollBouncePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400):
"""Perform scroll bounce gesture on the page.
This gesture scrolls the page by the number of pixels specified in
distance, in the given direction, followed by a scroll by
(distance + overscroll) pixels in the opposite direction.
The above gesture is repeated repeat_count times.
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
overscroll: The number of additional pixels to scroll back, in
addition to the givendistance.
repeat_count: How often we want to repeat the full gesture.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(ScrollBounceAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
overscroll=overscroll, repeat_count=repeat_count,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def ScrollBounceElement(
self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400):
"""Perform scroll bounce gesture on the element.
This gesture scrolls on the element by the number of pixels specified in
distance, in the given direction, followed by a scroll by
(distance + overscroll) pixels in the opposite direction.
The above gesture is repeated repeat_count times.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
overscroll: The number of additional pixels to scroll back, in
addition to the given distance.
repeat_count: How often we want to repeat the full gesture.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(ScrollBounceAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
overscroll=overscroll, repeat_count=repeat_count,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def MouseClick(self, selector=None):
"""Mouse click the given element.
Args:
selector: A CSS selector describing the element.
"""
self._RunAction(MouseClickAction(selector=selector))
def SwipePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='left', distance=100, speed_in_pixels_per_second=800):
"""Perform swipe gesture on the page.
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of swipe, either 'left', 'right',
'up', or 'down'
distance: The distance to swipe (in pixel).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(SwipeAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def SwipeElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='left', distance=100,
speed_in_pixels_per_second=800):
"""Perform swipe gesture on the element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
direction: The direction of swipe, either 'left', 'right',
'up', or 'down'
distance: The distance to swipe (in pixel).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(SwipeAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def PressKey(self, key, repeat_count=1, repeat_delay_ms=100, timeout=60):
"""Perform a key press.
Args:
key: DOM value of the pressed key (e.g. 'PageDown', see
https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key).
repeat_count: How many times the key should be pressed.
repeat_delay_ms: Delay after each keypress (including the last one) in
milliseconds.
"""
for _ in xrange(repeat_count):
self._RunAction(KeyPressAction(key, timeout=timeout))
self.Wait(repeat_delay_ms / 1000.0)
def EnterText(self, text, character_delay_ms=100, timeout=60):
"""Enter text by performing key presses.
Args:
text: The text to enter.
character_delay_ms: Delay after each keypress (including the last one) in
milliseconds.
"""
for c in text:
self.PressKey(c, repeat_delay_ms=character_delay_ms, timeout=timeout)
def LoadMedia(self, selector=None, event_timeout_in_seconds=0,
event_to_await='canplaythrough'):
"""Invokes load() on media elements and awaits an event.
Args:
selector: A CSS selector describing the element. If none is
specified, play the first media element on the page. If the
selector matches more than 1 media element, all of them will
be played.
event_timeout_in_seconds: Maximum waiting time for the event to be fired.
0 means do not wait.
event_to_await: Which event to await. For example: 'canplaythrough' or
'loadedmetadata'.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(LoadMediaAction(
selector=selector, timeout_in_seconds=event_timeout_in_seconds,
event_to_await=event_to_await))
def PlayMedia(self, selector=None,
playing_event_timeout_in_seconds=0,
ended_event_timeout_in_seconds=0):
"""Invokes the "play" action on media elements (such as video).
Args:
selector: A CSS selector describing the element. If none is
specified, play the first media element on the page. If the
selector matches more than 1 media element, all of them will
be played.
playing_event_timeout_in_seconds: Maximum waiting time for the "playing"
event (dispatched when the media begins to play) to be fired.
0 means do not wait.
ended_event_timeout_in_seconds: Maximum waiting time for the "ended"
event (dispatched when playback completes) to be fired.
0 means do not wait.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(PlayAction(
selector=selector,
playing_event_timeout_in_seconds=playing_event_timeout_in_seconds,
ended_event_timeout_in_seconds=ended_event_timeout_in_seconds))
def SeekMedia(self, seconds, selector=None, timeout_in_seconds=0,
log_time=True, label=''):
"""Performs a seek action on media elements (such as video).
Args:
seconds: The media time to seek to.
selector: A CSS selector describing the element. If none is
specified, seek the first media element on the page. If the
selector matches more than 1 media element, all of them will
be seeked.
timeout_in_seconds: Maximum waiting time for the "seeked" event
(dispatched when the seeked operation completes) to be
fired. 0 means do not wait.
log_time: Whether to log the seek time for the perf
measurement. Useful when performing multiple seek.
label: A suffix string to name the seek perf measurement.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(SeekAction(
seconds=seconds, selector=selector,
timeout_in_seconds=timeout_in_seconds,
log_time=log_time, label=label))
def LoopMedia(self, loop_count, selector=None, timeout_in_seconds=None):
"""Loops a media playback.
Args:
loop_count: The number of times to loop the playback.
selector: A CSS selector describing the element. If none is
specified, loop the first media element on the page. If the
selector matches more than 1 media element, all of them will
be looped.
timeout_in_seconds: Maximum waiting time for the looped playback to
complete. 0 means do not wait. None (the default) means to
wait loop_count * 60 seconds.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(LoopAction(
loop_count=loop_count, selector=selector,
timeout_in_seconds=timeout_in_seconds))
def ForceGarbageCollection(self):
"""Forces garbage collection on all relevant systems.
This includes:
- Java heap for browser and child subprocesses (on Android).
- JavaScript on the current renderer.
- System caches (on supported platforms).
"""
if self._tab.browser.supports_java_heap_garbage_collection:
self._tab.browser.ForceJavaHeapGarbageCollection()
self._tab.CollectGarbage()
if self._tab.browser.platform.SupportFlushEntireSystemCache():
self._tab.browser.platform.FlushEntireSystemCache()
def SimulateMemoryPressureNotification(self, pressure_level):
"""Simulate memory pressure notification.
Args:
pressure_level: 'moderate' or 'critical'.
"""
self._tab.browser.SimulateMemoryPressureNotification(pressure_level)
def PauseInteractive(self):
"""Pause the page execution and wait for terminal interaction.
This is typically used for debugging. You can use this to pause
the page execution and inspect the browser state before
continuing.
"""
raw_input("Interacting... Press Enter to continue.")
def RepaintContinuously(self, seconds):
"""Continuously repaints the visible content.
It does this by requesting animation frames until the given number
of seconds have elapsed AND at least three RAFs have been
fired. Times out after max(60, self.seconds), if less than three
RAFs were fired."""
self._RunAction(RepaintContinuouslyAction(
seconds=0 if self._skip_waits else seconds))
class Interaction(object):
def __init__(self, action_runner, label, flags):
assert action_runner
assert label
assert isinstance(flags, list)
self._action_runner = action_runner
self._label = label
self._flags = flags
self._started = False
def __enter__(self):
self.Begin()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is None:
self.End()
else:
logging.warning(
'Exception was raised in the with statement block, the end of '
'interaction record is not marked.')
def Begin(self):
assert not self._started
self._started = True
self._action_runner.ExecuteJavaScript(
'console.time({{ marker }});',
marker=timeline_interaction_record.GetJavaScriptMarker(
self._label, self._flags))
def End(self):
assert self._started
self._started = False
self._action_runner.ExecuteJavaScript(
'console.timeEnd({{ marker }});',
marker=timeline_interaction_record.GetJavaScriptMarker(
self._label, self._flags))
|
the-stack_0_7237 | import hassapi as hass # pylint: disable=import-error
class cover_tag_scanned(hass.Hass):
""" Opens or closes a cover based on an nfc_tag being scanned """
def initialize(self):
self.listen_event(
self.door_tag_scanned,
"tag_scanned",
tag_id=self.args["tag_id"],
)
def door_tag_scanned(self, event_name, data, kwargs):
"""Open the door if it's closed. Close the door if it's open.
Ignore the event if the door is opening or closing.
If a device list is provided, ignore the scan if it
didn't come from a device in the list.
'data' looks like this:
'data': {'tag_id': 'cae3c8c5-faac-4585-be93-a1199fa98fcd',
'device_id': 'effd5529caba2c3f'}"""
self.log(
"tag_id = " + data["tag_id"] + ". device_id = " + data["device_id"],
level="DEBUG",
)
if "devices" in self.args and data["device_id"] not in self.args["devices"]:
self.log(
"Ignoring scan from unlisted device " + data["device_id"] + ".",
level="INFO",
)
return
if self.get_state(self.args["cover_entity"]) == "open":
self.log(
"Closing garage door due to NFC tag scan by device "
+ data["device_id"]
+ ".",
level="INFO",
)
self.call_service("cover/close_cover", entity_id=self.args["cover_entity"])
elif self.get_state(self.args["cover_entity"]) == "closed":
self.log(
"Opening garage door due to NFC tag scan by device "
+ data["device_id"]
+ ".",
level="INFO",
)
self.call_service("cover/open_cover", entity_id=self.args["cover_entity"])
|
the-stack_0_7238 | import json
import time
class Vehicle:
ip = None
brand = None
model = None
vrn = None
rotates = None
gear = None
direction = None
directionAsText = None
speed = None
action = None
actionAsText = None
_lastUpdateAt = None
def update(self, ip, brand, model, vrn, rotates, gear,
direction, directionAsText, speed, action,
actionAsText, t = time.time):
self.ip = ip
self.brand = brand
self.model = model
self.vrn = vrn
self.rotates = rotates
self.gear = gear
self.direction = direction
self.directionAsText = directionAsText
self.speed = speed
self.action = action
self.actionAsText = actionAsText
self._lastUpdateAt = t()
def isReachable(self, time):
return time <= self._lastUpdateAt + 3
def echo(self, name, alt = "Zisťujem..."):
val = getattr(self, name)
if val == None:
return alt
else:
return val
def fromJson(self, data, requiresAboutMe = False):
data = json.loads(data)
if requiresAboutMe and not data.get("aboutMe"):
return None
if self == None:
self = Vehicle()
self.update(data.get("ip"),
data.get("brand"),
data.get("model"),
data.get("vrn"),
data.get("rotates"),
data.get("gear"),
data.get("direction"),
data.get("directionAsText"),
data.get("speed"),
data.get("action"),
data.get("actionAsText"))
return self
|
the-stack_0_7240 | #############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""catalog package test runner
"""
import re
import unittest
import doctest
from zope.testing import module
import zope.component.testing
import zope.component.factory
import zope.component.interfaces
from zope.testing import renormalizing
import zc.catalog
from zc.catalog import index
from zc.catalog import extentcatalog
from zc.catalog import globber
from zc.catalog import catalogindex
from zc.catalog import stemmer
import zc.catalog.interfaces
import BTrees.Interfaces
import BTrees.LOBTree
import BTrees.OLBTree
import BTrees.LFBTree
class TestAbstractIndex(unittest.TestCase):
def test_family_on_cls(self):
self.assertIsInstance(index.AbstractIndex.family,
index.FamilyProperty)
def test_clear_cruft(self):
i = index.AbstractIndex()
i.__dict__['BTreeAPI'] = None
del i.__dict__['family']
self.assertIn('BTreeAPI', i.__dict__)
getattr(i, 'family')
self.assertNotIn('BTreeAPI', i.__dict__)
def test_family(self):
class Family(object):
class OO(object):
class BTree(object):
pass
IO = OO
i = index.AbstractIndex(family=Family)
self.assertIs(i.family, Family)
def test_empty_values(self):
i = index.AbstractIndex()
res = i.values(doc_id=1)
self.assertEqual((), res)
class TestValueIndex(unittest.TestCase):
def test_empty_values(self):
i = index.ValueIndex()
res = i.values(doc_id=1)
self.assertEqual((), res)
class TestSetIndex(unittest.TestCase):
def test_removed(self):
i = index.SetIndex()
i.index_doc(1, ('foo', 'bar'))
i.index_doc(1, ('foo',))
self.assertEqual(1, i.wordCount.value)
def test_appy_all_of_empty(self):
i = index.SetIndex()
res = i.apply({'all_of': ()})
self.assertEqual(len(res), 0)
class TestNormalizationWrapper(unittest.TestCase):
def test_pass_to_index(self):
i = index.SetIndex()
class Normaziler(object):
@classmethod
def value(cls, v):
return v
n = index.NormalizationWrapper(i, Normaziler)
self.assertEqual(i.documentCount(), n.documentCount())
self.assertEqual(i.wordCount(), n.wordCount())
n.clear()
n.index_doc(1, ('foo',))
self.assertEqual(i.wordCount(), n.wordCount())
self.assertEqual(n.containsValue('foo'), i.containsValue('foo'))
class TestExtent(unittest.TestCase):
def test_BTreeAPI(self):
i = extentcatalog.Extent()
self.assertIsNotNone(i.BTreeAPI)
def test_bool(self):
i = extentcatalog.Extent()
self.assertFalse(i)
i.add(1, None)
self.assertTrue(i)
self.assertEqual(1, len(i))
def test_discard_missing(self):
i = extentcatalog.Extent()
i.discard(0)
self.assertEqual(0, len(i))
def test_catalog_update(self):
from zope.interface.interfaces import ComponentLookupError
c = extentcatalog.Catalog(extentcatalog.Extent())
i = index.SetIndex()
i.__parent__ = None
self.assertRaises(ComponentLookupError, c.updateIndex, i)
class TestGlob(unittest.TestCase):
def test_bad_parse(self):
class Lexicon(object):
pass
res = globber.glob('', Lexicon())
self.assertIsNone(res)
class TestCatalogIndex(unittest.TestCase):
def test_datetimevalueindex(self):
i = catalogindex.DateTimeValueIndex(field_name='foo')
self.assertTrue(zc.catalog.interfaces.IValueIndex.providedBy(i))
def test_datetimesetindex(self):
i = catalogindex.DateTimeSetIndex(field_name='foo')
self.assertTrue(zc.catalog.interfaces.ISetIndex.providedBy(i))
@unittest.skipUnless(stemmer.broken, "Only for broken stemmers")
class TestBrokenStemmer(unittest.TestCase):
def test_broken(self):
s = stemmer.Stemmer()
self.assertIs(stemmer.broken, s.stemmer)
self.assertEqual('word', s.stemmer.stem("word"))
def setUp32bit(test):
zope.component.testing.setUp(test)
test.globs["btrees_family"] = BTrees.family32
def modSetUp32bit(test):
setUp32bit(test)
module.setUp(test, 'zc.catalog.doctest_test')
def setUp64bit(test):
zope.component.testing.setUp(test)
test.globs["btrees_family"] = BTrees.family64
def modSetUp64bit(test):
setUp64bit(test)
module.setUp(test, 'zc.catalog.doctest_test')
def tearDown(test):
zope.component.testing.tearDown(test)
def modTearDown(test):
module.tearDown(test)
zope.component.testing.tearDown(test)
def test_suite():
checker = renormalizing.RENormalizing((
(re.compile(r"<class 'BTrees."), "<type 'BTrees."),
(re.compile(r"<module 'BTrees\._"), "<module 'BTrees."),
))
tests = unittest.TestSuite((
# 32 bits
doctest.DocFileSuite(
'extentcatalog.rst', setUp=modSetUp32bit, tearDown=modTearDown),
doctest.DocFileSuite(
'setindex.rst', setUp=setUp32bit, tearDown=tearDown),
doctest.DocFileSuite(
'valueindex.rst', setUp=setUp32bit, tearDown=tearDown),
doctest.DocFileSuite(
'normalizedindex.rst', setUp=setUp32bit, tearDown=tearDown),
doctest.DocFileSuite(
'globber.rst', setUp=setUp32bit, tearDown=tearDown),
doctest.DocFileSuite(
'callablewrapper.rst', setUp=setUp32bit, tearDown=tearDown),
# 64 bits
doctest.DocFileSuite(
'extentcatalog.rst', setUp=modSetUp64bit, tearDown=modTearDown),
doctest.DocFileSuite('setindex.rst', setUp=setUp64bit,
tearDown=tearDown),
doctest.DocFileSuite('valueindex.rst', setUp=setUp64bit,
tearDown=tearDown),
doctest.DocFileSuite('normalizedindex.rst', setUp=setUp64bit,
tearDown=tearDown),
doctest.DocFileSuite('globber.rst', setUp=setUp64bit,
tearDown=tearDown),
doctest.DocFileSuite('callablewrapper.rst', setUp=setUp64bit,
tearDown=tearDown),
# legacy data support
doctest.DocFileSuite(
'legacy.rst',
optionflags=doctest.ELLIPSIS,
checker=checker),
))
if not stemmer.broken: # pragma: no cover
tests.addTest(doctest.DocFileSuite('stemmer.rst'))
tests.addTest(unittest.defaultTestLoader.loadTestsFromName(__name__))
return tests
|
the-stack_0_7241 | # -*- coding: utf-8 -*-
"""Defining functions and classes for arrhythmia detection through ECG signals.
The contents of this module define functions and classes for analyzing,
visualizing, and making predictions based on data from the
MIT-BIH Arrhythmia Database.
Explore this repository at:
https://github.com/chance-alvarado/arrhythmia-detector
Author:
Chance Alvarado
LinkedIn: https://www.linkedin.com/in/chance-alvarado/
GitHub: https://github.com/chance-alvarado/
"""
# Set random seeds for reproducability
from numpy.random import seed
seed(1)
import tensorflow
tensorflow.random.set_seed(2)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import colors
from sklearn.metrics import confusion_matrix
from matplotlib.animation import FuncAnimation
from keras.models import load_model
def create_dataframe(path):
"""Alias of Pandas' read_csv without an additional import."""
df = pd.read_csv(path, header=None)
return df
def sample_dataframe(path):
"""Preview 5 rows of DataFrame."""
df_sample = pd.read_csv(path, nrows=5, header=None)
return df_sample
class DataVisualization:
"""Class for data exploration through visualization."""
def plot_setup(self, axs):
"""Set up general plot attributes."""
# Loop through all axis objects
for ax in axs:
# Set facecolor to black
ax.set_facecolor('k')
# Remove spines
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
# Add grid
ax.grid(linestyle='-', color='w', alpha=.2)
def save_plot(self, save_location):
"""Save plot based on user's preference."""
# Try to save plot if user speciefies a save location
if save_location:
plt.savefig(save_location, facecolor='k')
plt.close()
# Else show plot
else:
plt.show()
def label_counts(self, df):
"""Create vectors of unique labels and their counts."""
# Find target column
target = df.iloc[:, -1]
# Unique labels
unique_labels = target.unique()
# Count number of unique occurances for each label
unique_count = []
for label in unique_labels:
unique_count.append(target[target == label].count())
return unique_labels, unique_count
def class_bar(self, df, save_location=None):
"""Create bar chart for showing classs balance."""
# Collect necessary data
unique_labels, unique_count = self.label_counts(df)
# Create figure
fig, ax = plt.subplots(1, 1, figsize=(7, 4), facecolor='k')
# General plot setup
self.plot_setup([ax])
# Title
fig.suptitle('Arrhythmia Type Breakdown', c='w', fontsize=18, y=.95)
# Set proper color
ax.tick_params(colors='w')
# Add x label
ax.set_xlabel('Arrhythmia Type', c='w', fontsize=14, alpha=0.8)
# Change scale of y
ax.set_yticks(np.arange(0, sum(unique_count),
sum(unique_count)/10)
)
# Plot with glow
ax.bar(unique_labels, unique_count, width=.9, color='r', alpha=0.75)
ax.bar(unique_labels, unique_count, width=.93, color='r', alpha=0.4)
ax.bar(unique_labels, unique_count, width=.95, color='w', alpha=0.2)
# Save plot
self.save_plot(save_location)
def ecg_scatter(self, df, save_location=None):
"""Create scatter plot of 100 of each type of arrhythmia."""
# Collect necessary data
unique_labels, _ = self.label_counts(df)
target_vect = df.iloc[:, -1]
# Create figure
fig, axs = plt.subplots(nrows=5, ncols=1, figsize=(8, 12),
facecolor='k'
)
# General plot setup
self.plot_setup(axs)
# Add title
fig.suptitle('Averaged ECG Signals', c='w', fontsize=16, y=0.92)
# Iterate through all labels
for col, label in enumerate(unique_labels):
# Plot text box with arrhythmia type
axs[col].text(df.shape[1], .95,
('Arrhythmia Type: %s' % (str(int(label)))),
size=14, ha="right", va="top", c='w',
bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5),
fc='r', alpha=.7
)
)
# Scatter plot for arrhythmia
matching_rows = (target_vect == label)
for i in range(100):
# Dataframe of only relevant rows
temp_df = df.iloc[:, :-1][matching_rows].round(decimals=1)
# Data to plot
data = temp_df.iloc[i, :]
t_span = range(len(data))
# Plot iteration
axs[col].scatter(t_span, data, alpha=0.05, c='r', s=2)
# Save plot
self.save_plot(save_location)
def ecg_line(self, row, viz_type='static', save_location=None):
"""Create a line plot of an individual ecg signal."""
# Get relevant data
signal = row[:-1]
target = row.iloc[-1]
# Create figure
fig, ax = plt.subplots(1, 1, figsize=(7, 3),
facecolor='k')
# Create title
fig.suptitle('ECG Signal',
fontsize=18,
color='white',
)
# General plot setup
self.plot_setup([ax])
# Hide tick labels
ax.set_xticklabels([])
ax.set_yticklabels([])
# Add titles
ax.set_xlabel('Time', c='w', fontsize=14, alpha=0.8)
ax.set_ylabel('Amplitude', c='w', fontsize=14, alpha=0.8,)
# Plot text box with arrhythmia type
plt.text(len(signal), .95,
('Arrhythmia Type: %s' % (str(int(target)))),
size=14, ha="right", va="top", c='w',
bbox=dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc='r',
alpha=.7
)
)
# Check type
if viz_type == 'static':
# Plot with subtle glow effect
ax.plot(signal, color='r', linewidth=2, alpha=.7)
ax.plot(signal, color='r', linewidth=3, alpha=.4)
ax.plot(signal, color='w', linewidth=5, alpha=.2)
# Save plot
self.save_plot(save_location)
# Check type
elif viz_type == 'dynamic':
# Time vector
time_vect = list(range(len(signal)))
# Create line objects
line, = ax.plot(time_vect, signal, color='r',
linewidth=2, alpha=.7
)
line_g1, = ax.plot(time_vect, signal, color='r',
linewidth=3, alpha=.4
)
line_g2, = ax.plot(time_vect, signal, color='w',
linewidth=5, alpha=.2
)
# Update function
def update(num, time_vect, signal, line):
"""Define function to update plot every frame."""
# Scaling value
scaling_factor = 10
end = num*scaling_factor
if end > 100:
start = end-100
else:
start = 0
for line_obj in [line, line_g1, line_g2]:
line_obj.set_data(time_vect[start:end],
signal[start:end]
)
return [(line,), (line_g1,), (line_g2,)]
# Create animation
anim = FuncAnimation(fig, update, interval=40, frames=40,
fargs=[time_vect, signal, line]
)
# Save animation
anim.save(save_location, writer='imagemagick', fps=20,
savefig_kwargs={'facecolor': 'k', 'transparent': True})
plt.close()
class DataProcessing:
"""Class for processing ecg data before training model."""
def resample(self, num_samples, df):
"""Resample data to have 'num_samples' of each label."""
# New DataFrame
df_resample = pd.DataFrame()
# Define target vector
target = df.iloc[:, -1]
# Resample for each unique value in target
for t in target.unique():
temp_df = df[target == t].sample(num_samples, replace=True)
df_resample = pd.concat([df_resample, temp_df], ignore_index=True)
return df_resample
def shuffle(self, df):
"""Randomly shuffle data."""
df = df.sample(frac=1).reset_index(drop=True)
return df
def add_noise(self, df, noise_level=0.05):
"""Add normal noise with standard deviation 'noise_level'."""
# Get shape
rows, cols = df.shape
# Iterate through rows
for index in range(rows):
# Create new noise
noise = np.random.normal(0, 0.05, cols-1)
noise = np.append(noise, 0.)
# Add noise
df.iloc[index, :] += noise
# Keep all values between 0 and 1
for ind, val in enumerate(df.iloc[index, :-1]):
if val > 1:
df.iloc[index, ind] = 1
elif val < 0:
df.iloc[index, ind] = 0
return df
def feature_target_split(self, df):
"""Split DataFrame intto a feature matrix and target vector."""
feature_mat = df.iloc[:, :-1].to_numpy()
target_vect = df.iloc[:, -1].to_numpy()
return feature_mat, target_vect
def one_hot_encoder(self, vect):
"""One hot encode categorical numerical values given Pandas Series."""
# New target list
target_vect_enc = []
# Number of columns in encoded vector
num_cols = len(np.unique(vect))
# Iterate through each value in vector
for val in vect:
# Create vector to append
bin_vect = np.zeros(num_cols)
bin_vect[int(val)] = 1
# Append
target_vect_enc.append(bin_vect)
return np.array(target_vect_enc)
class ModelEvaluation:
"""Class for evaluation of predictive model's metrics."""
def undo_encode(self, vect):
"""Undo one hot encoding used in training and predictions."""
# New target list
unencoded_target_vect = []
# Add array index to list
for val in vect:
unencoded_target_vect.append(np.argmax(val))
return unencoded_target_vect
def import_best_model(self):
"""Import best model saved in directory."""
model = load_model('resources/model/best_model.h5')
return model
def best_parameters(self, model):
"""Print the best parameters for each layer of model."""
# Get configuration json
config = model.get_config()
# Iterate through all layers and print relevant info
for layer in config['layers']:
layer_type = layer['class_name']
if layer_type == 'Dense':
print('Dense Layer Nodes: %d' % (layer['config']['units']))
elif layer_type == 'Dropout':
print('Dropout Rate: %d' % (layer['config']['rate']))
elif layer_type == 'InputLayer':
print('Input Layer Nodes: %d'
% (layer['config']['batch_input_shape'][1])
)
def evaluate_model(self, model, test_X, test_y):
"""Evaluate model on the test data."""
acc = model.evaluate(test_X, test_y, verbose=0)[1]
print('Accuracy on testing data: ', acc)
def plot_confusion_matrix(self, model, test_X, y_true):
"""Plot confusion matrix with custom colormap."""
# List of target labels
labels = [0, 1, 2, 3, 4]
# Make predictions
y_pred = model.predict(test_X)
# Unencode target vector
y_pred = self.undo_encode(y_pred)
# Get number of samples
num_samples = len(y_pred)
# Create confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Normalize confusion matrix and round
cm_norm = np.zeros(shape=cm.shape)
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
val = round((cm[i][j] / num_samples), ndigits=2)
cm_norm[i, j] = val
# Create figure
fig, ax = plt.subplots(facecolor='k', figsize=(7, 6))
# Create black to red color gradient
# Thanks to SpghttCd on stackoverflow for this code
def NonLinCdict(steps, hexcol_array):
cdict = {'red': (), 'green': (), 'blue': ()}
for s, hexcol in zip(steps, hexcol_array):
rgb = colors.hex2color(hexcol)
cdict['red'] = cdict['red'] + ((s, rgb[0], rgb[0]),)
cdict['green'] = cdict['green'] + ((s, rgb[1], rgb[1]),)
cdict['blue'] = cdict['blue'] + ((s, rgb[2], rgb[2]),)
return cdict
hc = ['#000000', '#5b0000', '#ac0000', '#c80000', '#ff0000']
th = [0, 0.01, 0.03, 0.05, 1]
cdict = NonLinCdict(th, hc)
black_red_cmap = colors.LinearSegmentedColormap('black_red_cmap',
cdict
)
# Plot
sns.heatmap(cm_norm, annot=True, cmap=black_red_cmap,
ax=ax, fmt="g", cbar=False,
annot_kws={"size": 14},
linewidths=1, linecolor='w'
)
# Add suptitle
fig.suptitle('Confusion Matrix', c='w', y=.95, fontsize=18)
# Set axis labels
ax.set_xlabel('Predicted Arrhythmia Type', fontsize=14, c='w')
ax.set_ylabel('Actual Arrhythmia Type', fontsize=14, c='w')
# Set tick parameters
ax.tick_params(axis='both', which='major', labelsize=12, )
ax.set_xticklabels(labels=labels, color='w')
ax.set_yticklabels(labels=labels, color='w', rotation=0)
# Show plot
plt.show()
|
the-stack_0_7242 | import json
from django.conf import settings
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_POST
from requests import RequestException
from rootnroll import RootnRollClient
from rootnroll.constants import ServerStatus
from games.models import Game
"""
Session structure:
game_id -> {'server_id': '123...',
'terminal_id': '234...',}
"""
def _terminal_response_ok(terminal):
return JsonResponse({
'status': 'ok',
'terminal_id': terminal['id'],
'kaylee_url': terminal['config']['kaylee_url'],
})
def _terminal_response_creating():
return JsonResponse({
'status': 'creating',
})
def _terminal_response_error(info=None):
return JsonResponse({
'status': 'error',
'info': info,
})
def get_rnr_client():
return RootnRollClient(username=settings.ROOTNROLL_USERNAME,
password=settings.ROOTNROLL_PASSWORD,
api_url=settings.RNR_API_URL)
@require_POST
def terminals(request):
"""
The main API endpoint for getting a terminal.
TODO: split into several endpoints?
"""
data = json.loads(request.body.decode())
game_id = data.get('id')
game = get_object_or_404(Game, id=game_id)
rnr_client = get_rnr_client()
terminals_map = request.session.get('terminals_map', {})
game_dict = terminals_map.get(str(game_id), {})
server_id = game_dict.get('server_id')
terminal_id = game_dict.get('terminal_id')
if terminal_id:
# Active terminal exists
terminal = rnr_client.get_terminal(terminal_id)
if terminal:
return _terminal_response_ok(terminal)
if server_id:
# Server exists?
server = rnr_client.get_server(server_id)
if server and server['status'] == ServerStatus.ACTIVE:
# Server is ready, create a terminal
terminal = rnr_client.create_terminal(server)
if terminal:
game_dict['terminal_id'] = terminal['id']
terminals_map[str(game_id)] = game_dict
request.session['terminals_map'] = terminals_map
return _terminal_response_ok(terminal)
elif server and server['status'] != ServerStatus.ERROR:
# Waiting for server to come up
return _terminal_response_creating()
# Server does not exist or invalid
try:
# Compute the current number of servers
servers_count = rnr_client.list_servers().get("count")
if servers_count >= settings.SERVERS_NUMBER_HARD_LIMIT:
return _terminal_response_error("No servers available")
server = rnr_client.create_server(game.rnr_image_id)
except RequestException as e:
return _terminal_response_error()
if server and 'id' in server:
game_dict['server_id'] = server['id']
terminals_map[str(game_id)] = game_dict
request.session['terminals_map'] = terminals_map
return _terminal_response_creating()
else:
# Cannot create the server
return _terminal_response_error()
|
the-stack_0_7243 | import random
import numpy as np
#import matplotlib.pyplot as plt
# parameters
N = 100 # No. of training points
D = 2 # 2-dimension
# area between f & g
area = 0
cnt0 = 0
for irun in range(1000):
# training data
x1, x2 = np.zeros((N, 1)), np.zeros((N, 1))
for iN in range(N):
x1[iN] = random.uniform(-1, 1)
x2[iN] = random.uniform(-1, 1)
xtrain = np.c_[np.ones((N, 1)), x1, x2]
# target function: passes through two points
x11, x21 = random.uniform(-1, 1), random.uniform(-1, 1) # 1st point
x12, x22 = random.uniform(-1, 1), random.uniform(-1, 1) # 2nd point
x0 = np.arange(-1, 1, .1) # for plotting purpose
y0 = (x22 - x21)/(x12 - x11) * (x0 - x11) + x21
# target: expected output
y = np.zeros(N)
for iN in range(N):
f = (x22 - x21)/(x12 - x11) * (x1[iN] - x11) + x21
if f < x2[iN]: y[iN] = 1
elif f > x2[iN]: y[iN] = -1
# # visualize
# plt.plot(x0, y0)
# plt.scatter(x1, x2)
# weight vector
w = np.zeros(D+1) # initially, all points mis-classified
# estimated label through Perceptron
yp = np.zeros(N) # initially all 0
cnt = 0
while np.all(yp == y) == False:
evlt = list(np.equal(yp, y))
iN = evlt.index(False)
w += y[iN] * xtrain[iN, :] # update the weight factor
yp = np.sign(w.dot(xtrain.T))
cnt += 1
# # visualize
# g0 = -(w[0] + w[1]*x0)/w[2]
# plt.plot(x0, g0)
# for i in range(N):
# plt.text(x1[i], x2[i], str(yp[i]))
# plt.pause(1)
# input('Press enter to continue')
cnt0 += cnt # No. of iterations to converge
# estimate the difference between f & g using Monte Carlo
ntest = 1000
count = 0
for itest in range(ntest):
x1test, x2test = random.uniform(-1, 1), random.uniform(-1, 1)
# target
fx2 = x21 + (x22-x21)/(x12-x11) * (x1test-x11)
if fx2 < x2test: target = 1
elif fx2 > x2test: target = -1
else: target = 0
# estimate
estimate = np.sign(w.dot([1, x1test, x2test]))
if estimate != target: count += 1
area += count / ntest
print(area / 1000)
print(cnt0 / 1000) |
the-stack_0_7244 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import specialist_pool
from google.cloud.aiplatform_v1beta1.types import specialist_pool_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class SpecialistPoolServiceTransport(abc.ABC):
"""Abstract transport class for SpecialistPoolService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_specialist_pool: gapic_v1.method.wrap_method(
self.create_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
self.get_specialist_pool: gapic_v1.method.wrap_method(
self.get_specialist_pool, default_timeout=5.0, client_info=client_info,
),
self.list_specialist_pools: gapic_v1.method.wrap_method(
self.list_specialist_pools,
default_timeout=5.0,
client_info=client_info,
),
self.delete_specialist_pool: gapic_v1.method.wrap_method(
self.delete_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
self.update_specialist_pool: gapic_v1.method.wrap_method(
self.update_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.CreateSpecialistPoolRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.GetSpecialistPoolRequest],
Union[
specialist_pool.SpecialistPool, Awaitable[specialist_pool.SpecialistPool]
],
]:
raise NotImplementedError()
@property
def list_specialist_pools(
self,
) -> Callable[
[specialist_pool_service.ListSpecialistPoolsRequest],
Union[
specialist_pool_service.ListSpecialistPoolsResponse,
Awaitable[specialist_pool_service.ListSpecialistPoolsResponse],
],
]:
raise NotImplementedError()
@property
def delete_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.DeleteSpecialistPoolRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.UpdateSpecialistPoolRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("SpecialistPoolServiceTransport",)
|
the-stack_0_7245 | #!/bin/python3
import os
import sys
from subprocess import call, run, PIPE
from getFiles import get_files, get_main
from colorama import Fore, Style
answ_linestart = 'Answer: '
def compile_java(task, output_path='.', source_path=''):
cmd = ['javac', '-d', output_path]
cmd.extend(get_files(task, source_path))
call(cmd)
def run_tests(task, out_path='out', src_path='src', test_path='test'):
compile_java(task, out_path, src_path)
for fn in os.listdir(test_path):
if fn.endswith('.theotest'):
with open(os.path.join(test_path, fn), 'r') as f:
test_case = f.read().split('\n')
answer = test_case.pop()
p = run(['java', get_main(task)], cwd=out_path, input='\n'.join(test_case) + '\n', stdout=PIPE,
universal_newlines=True)
if answer.startswith(answ_linestart):
output = str(p.stdout)[:-1]
if output == answer[len(answ_linestart):]:
print(f'{Fore.GREEN}pass {fn}{Style.RESET_ALL}')
else:
print(
f'{Fore.RED}fail {fn}: expected {answer[len(answ_linestart):]}, got {output}{Style.RESET_ALL}')
run_tests(*sys.argv[1:])
|
the-stack_0_7247 | # Copyright 2021 Alexey Tochin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import Callable
from typing import Any, Dict, Optional
import tensorflow as tf
from tf_dataclass.get_type import get_output_type, get_input_type_dict
from tf_dataclass.modified_dataclass import is_dataclass
def unpack(value: Any, temple: Optional[type] = None) -> Any:
if temple is None:
temple = type(value)
if is_dataclass(temple):
return value.as_tuple
elif temple == tuple or (hasattr(temple, "__origin__") and temple.__origin__ == tuple):
return tuple(map(lambda sub_value, sub_temple: unpack(sub_value, sub_temple), value, temple.__args__))
else:
return value
def pack(unpacked_value: Any, temple: type) -> Any:
if is_dataclass(temple):
return temple.from_tuple(data_tuple=unpacked_value)
else:
return unpacked_value
def pack_function(func: Callable, input_type_dict: Dict[str, type], output_type: type):
"""
Returns a version of @param func where its input and outputs are replaced by their unpacked versions.
func -> pack . func . unpack
@param func: input1, input2, ... -> output
@return: input1_tuple, input2_tuple, ... -> output_tuple
"""
def dictorized_func(**kwargs):
if kwargs.keys() != input_type_dict.keys():
raise ValueError(
f"The keyword arguments set from type annotation does not coincide with actual arguments.\n"
f"From type annotations: {set(input_type_dict.keys())}\n"
f"Actual arguments: {set(kwargs.keys())}"
)
packed_arg_dict = {
arg_name: pack(unpacked_value=kwargs[arg_name], temple=type_val)
for arg_name, type_val in input_type_dict.items()
}
output = func(**packed_arg_dict)
unpacked_output = unpack(value=output, temple=output_type)
return unpacked_output
return dictorized_func
def unpack_function(packed_func: Callable, input_type_dict: Dict[str, type], output_type: type):
"""
Returns a version of @param func where its input and outputs are replaced by their unpacked versions.
func -> unpack . func . pack
@param packed_func: input1_tuple, input2_tuple, ... -> output_tuple
@return: input1, input2, ... -> output
"""
def undictorized_func(*args, **kwargs):
if args:
raise ValueError("Only keyword arguments are currently supported.")
if kwargs.keys() != input_type_dict.keys():
raise ValueError(
f"The arguments set from type annotation does not coincide with actual arguments.\n"
f"From type annotations: {set(input_type_dict.keys())}\n"
f"Actual arguments: {set(kwargs.keys())}"
)
input_kwargs = {}
for arg_name, arg_value in kwargs.items():
unpacked_arg = unpack(value=arg_value, temple=input_type_dict[arg_name])
input_kwargs[arg_name] = unpacked_arg
output_dict = packed_func(**input_kwargs)
output = pack(unpacked_value=output_dict, temple=output_type)
return output
return undictorized_func
def function(func: Callable, **kwargs) -> Callable:
"""
Modification of tensorflow.function for dataclass input/output support.
1. dataclass decorator must be imported form tf_dataclass module
2. Type hint for @parm func return type is mandatory
3. Only keword arguments for the returned function are currently supported.
4. Other arguments are the same as for tensorlfow.function
See https://github.com/alexeytochin/tf-dataclass/blob/main/README.md for further details.
@param func: the same as for tensorflow.function but requires typehints for the return type.
@param kwargs: this argumets are pathed to tensorflow.function
@return: callable object that accepts dataclass objects as input and/or output.
Only keyword arguments for the decorated function are currently supported
Example 1:
>>> from tf_dataclass import dataclass, function
>>> @dataclass
>>> class Sequential:
>>> feature: tf.Tensor # shape = [batch, length, channels], dtype = tf.float32
>>> length: tf.Tensor # shape = [batch], dtype = tf.int32
>>> input = Sequential(
>>> feature = tf.random.normal(shape=[2, 6, 3]),
>>> length = tf.constant([6, 4], dtype=tf.int32),
>>> )
>>> @function
>>> def convolution(input: Sequential, filters: tf.Tensor, stride: int) -> Sequential:
>>> return Sequential(
>>> feature = tf.nn.conv1d(input.feature, filters, stride),
>>> length = tf.math.floordiv(input.length, stride),
>>> )
>>> output = convolution(
>>> input = input,
>>> filters = tf.random.normal(shape=[1, 3, 7]),
>>> stride = 2,
>>> )
>>> assert isinstance(output, Sequential)
>>> print(output.length) # -> tf.Tensor([3 2], shape=(2,), dtype=int32)
Example 2:
>>> from typing import Tuple
>>> from tf_dataclass import dataclass, function
>>> @dataclass
>>> class MyDataclass:
>>> ...
>>> @function
>>> def my_func(...) -> Tuple[tf.Tensor, MyDataclass]:
>>> ...
>>> return some_tensor, my_dataclass_instance
"""
input_type_dict = get_input_type_dict(func)
output_type = get_output_type(func)
dictorized_func = pack_function(func, input_type_dict, output_type)
tf_func = tf.function(func=dictorized_func, **kwargs)
undictorized_func = unpack_function(tf_func, input_type_dict, output_type)
return undictorized_func
|
the-stack_0_7248 | # pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `utils.py`."""
from absl.testing import absltest
from dm_c19_modelling.modelling import definitions
from dm_c19_modelling.modelling.models import utils
import numpy as np
class RolloutFeaturesTest(absltest.TestCase):
def test_expected_rollout(self):
feature_names = ["feature1", definitions.SITE_ID_INTEGER, "feature2",
"feature3", definitions.WEEK_DAY_INTEGER]
target_names = ["feature3", "feature1"]
constant_features = ["feature2"]
cadence = 2
features = np.array([
# First date. Day #2.
[
# First site.
[10.1, 25., 30., 40.1, 2],
# Second site.
[10.2, 27., 30., 40.2, 2],
],
# Second date. Day #4.
[
# First site.
[11.1, 25., 30., 41.1, 4],
# Second site.
[11.2, 27., 30., 41.2, 4],
],
])
next_steps_targets = np.array([
# Third date. Day #6.
[
# First site.
[42.1, 12.1],
# Second site.
[42.2, 12.2],
],
# Fourth date. Day #8.
[
# First site.
[43.1, 13.1],
# Second site.
[43.2, 13.2],
],
])
output = utils.rollout_features_with_predictions(
features=features,
next_steps_targets=next_steps_targets,
feature_names=feature_names,
target_names=target_names,
cadence=cadence,
constant_features=constant_features)
expected_additional_features = np.array([
# Third date. Day #6.
[
# First site.
[12.1, 25., 30., 42.1, 6],
# Second site.
[12.2, 27., 30., 42.2, 6],
],
# Fourth date. Day #8.
[
# First site.
[13.1, 25., 30., 43.1, 1],
# Second site.
[13.2, 27., 30., 43.2, 1],
],
])
expected_output = np.concatenate(
[features, expected_additional_features], axis=0)
np.testing.assert_allclose(output, expected_output)
if __name__ == "__main__":
absltest.main()
|
the-stack_0_7249 | import torch
import torch.nn as nn
def dice_loss(input, target):
input = torch.sigmoid(input)
smooth = 1e-5
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + smooth) /
(iflat.sum() + tflat.sum() + smooth))
def focal_loss(input,
target,
reduction='mean',
beta=0.5,
gamma=2.,
eps=1e-7,
**kwargs):
"""
Focal loss, see arXiv:1708.02002
input: [B, 1, H, W] tensor that contains predictions to compare
target: [B, 1, H, W] tensor that contains targets to compare to
reduction: one of mean, sum or none. Used to choose how loss is reduced
over batches
beta: weight in [0; 1] to give to positive targets. The higher it is, the
more true positive and false negative are important. Negative targets
have weight 1-beta
gamma: parameter that reduces the loss contribution from easy examples and
extends the range in which an example receives low loss. It also
gives more weight to misclassified examples
eps: constant used for numerical stability
return: [1] or [B] (if reduction='none') tensor containing loss between
input and target
"""
n = input.size(0)
iflat = torch.sigmoid(input).view(n, -1).clamp(eps, 1 - eps)
tflat = target.view(n, -1)
focal = -(beta * tflat * (1 - iflat).pow(gamma) * iflat.log() + (1 - beta) *
(1 - tflat) * iflat.pow(gamma) * (1 - iflat).log()).mean(-1)
if reduction == 'mean':
return focal.mean()
elif reduction == 'sum':
return focal.sum()
else:
return focal
class FocalDiceLoss(nn.Module):
"""
Weighted linear combination of focal and dice losses
a: weight of binary cross-entropy
b: weight of dice
smooth: value added to both numerator and denominator of dice to avoid
division by zero and smooth gradient around 0
beta: weight in [0; 1] to give to positive targets. The higher it is,
the more true positive and false negative are important. Negative
targets have weight 1-beta
gamma: parameter that reduces the loss contribution from easy examples
and extends the range in which an example receives low loss. It
also gives more weight to misclassified examples
reduction: one of mean, sum or none. Used to choose how loss is reduced
over batches
"""
def __init__(self,
a=0.5,
b=0.5,
smooth=1.,
beta=0.5,
gamma=2.,
reduction='mean'):
super().__init__()
self.a = a
self.b = b
self.smooth = smooth
self.beta = beta
self.gamma = gamma
self.reduction = reduction
def forward(self, input, target):
"""
input: [B, 1, H, W] tensor that contains predictions to compare
target: [B, 1, H, W] tensor that contains targets to compare to
return: [1] or [B] (if self.reduction='none') tensor containing loss
between input and target
"""
focal = focal_loss(
input,
target,
beta=self.beta,
gamma=self.gamma,
reduction=self.reduction)
dice = dice_loss(input, target)
return self.a * focal + self.b * dice
|
the-stack_0_7250 | # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ************************************** DefaultControlMechanism ************************************************
"""
The DefaultControlMechanism is created for a `System` if no other controller type is specified. The
DefaultControlMechanism creates an `ControlSignal` for each `ControlProjection` it is assigned, and uses
`defaultControlAllocation` as the `value <ControlSignal.value>` for the ControlSignal. By default,
`defaultControlAllocation` = 1, so that ControlProjections from the DefaultControlMechanism have no effect on their
parameters. However, it can be used to uniformly control the parameters that receive ControlProjections from it,
by manually changing the value of `defaultControlAllocation`. See `ControlMechanism <ControlMechanism>` for additional
details of how ControlMechanism are created, executed and their attributes.
COMMENT:
ADD LINK FOR defaultControlAllocation
TEST FOR defaultControlAllocation: |defaultControlAllocation|
ANOTHER TEST FOR defaultControlAllocation: :py:print:`defaultControlAllocation`
AND YET ANOTHER TEST FOR defaultControlAllocation: :py:print:|defaultControlAllocation|
LINK TO DEFAULTS: :doc:`Defaults`
COMMENT
"""
import numpy as np
import typecheck as tc
from psyneulink.components.mechanisms.adaptive.control.controlmechanism import ControlMechanism
from psyneulink.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism
from psyneulink.components.states.inputstate import InputState
from psyneulink.globals.defaults import defaultControlAllocation
from psyneulink.globals.keywords import CONTROL, FUNCTION, FUNCTION_PARAMS, INPUT_STATES, INTERCEPT, MODULATION, NAME, OBJECTIVE_MECHANISM, SLOPE
from psyneulink.globals.preferences.componentpreferenceset import is_pref_set
from psyneulink.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.globals.utilities import ContentAddressableList
from psyneulink.scheduling.time import TimeScale
__all__ = [
'DefaultControlMechanism', 'DefaultControlMechanismError'
]
class DefaultControlMechanismError(Exception):
def __init__(self, error_value):
self.error_value = error_value
class DefaultControlMechanism(ControlMechanism):
"""Subclass of `ControlMechanism <ControlMechanism>` that implements a DefaultControlMechanism.
COMMENT:
Description:
Implements default source of control signals, with one inputState and outputState for each.
Uses defaultControlAllocation as input(s) and pass value(s) unchanged to outputState(s) and
ControlProjection(s)
Every ControlProjection is assigned this Mechanism as its sender by default (i.e., unless a sender is
explicitly specified in its constructor).
An inputState and outputState is created for each ControlProjection assigned:
the inputState is assigned the
:py:constant:`defaultControlAllocation <Defaults.defaultControlAllocation>` value;
when the DefaultControlMechanism executes, it simply assigns the same value to the ControlProjection.
Class attributes:
+ componentType (str): System Default Mechanism
+ paramClassDefaults (dict):
+ FUNCTION: Linear
COMMENT
"""
componentType = "DefaultControlMechanism"
classPreferenceLevel = PreferenceLevel.SUBTYPE
# classPreferenceLevel = PreferenceLevel.TYPE
# Any preferences specified below will override those specified in TypeDefaultPreferences
# Note: only need to specify setting; level will be assigned to Type automatically
# classPreferences = {
# kwPreferenceSetName: 'DefaultControlMechanismCustomClassPreferences',
# kp<pref>: <setting>...}
from psyneulink.components.functions.function import Linear
paramClassDefaults = ControlMechanism.paramClassDefaults.copy()
paramClassDefaults.update({FUNCTION:Linear,
FUNCTION_PARAMS:{SLOPE:1, INTERCEPT:0},
OBJECTIVE_MECHANISM:None,
MODULATION:None,
})
@tc.typecheck
def __init__(self,
# default_variable=None,
# size=None,
system=None,
objective_mechanism:tc.optional(tc.any(ObjectiveMechanism, list))=None,
control_signals:tc.optional(list)=None,
params=None,
name=None,
prefs:is_pref_set=None):
super(DefaultControlMechanism, self).__init__(# default_variable=default_variable,
# size=size,
objective_mechanism=objective_mechanism,
control_signals=control_signals,
params=params,
name=name,
prefs=prefs,
context=self)
def _instantiate_input_states(self, context=None):
"""Instantiate input_value attribute
Instantiate input_states and monitored_output_states attributes (in case they are referenced)
and assign any OutputStates that project to the input_states to monitored_output_states
IMPLEMENTATION NOTE: At present, these are dummy assignments, simply to satisfy the requirements for
subclasses of ControlMechanism; in the future, an _instantiate_objective_mechanism()
method should be implemented that also implements an _instantiate_monitored_output_states
method, and that can be used to add OutputStates/Mechanisms to be monitored.
"""
if not hasattr(self, INPUT_STATES):
self._input_states = None
elif self.input_states:
for input_state in self.input_states:
for projection in input_state.path_afferents:
self.monitored_output_states.append(projection.sender)
def _instantiate_control_signal(self, control_signal, context=None):
"""Instantiate requested ControlSignal, ControlProjection and associated InputState
"""
from psyneulink.components.states.parameterstate import ParameterState
if isinstance(control_signal, dict):
if CONTROL in control_signal:
projection = control_signal[CONTROL][0]
input_name = 'DefaultControlAllocation for ' + projection.receiver.name + '_ControlSignal'
elif NAME in control_signal:
input_name = 'DefaultControlAllocation for ' + control_signal[NAME] + '_ControlSignal'
elif isinstance(control_signal, tuple):
input_name = 'DefaultControlAllocation for ' + control_signal[0] + '_ControlSignal'
elif isinstance(control_signal, ParameterState):
input_name = 'DefaultControlAllocation for ' + control_signal.name + '_ControlSignal'
else:
raise DefaultControlMechanismError("control signal ({}) was not a dict, tuple, or ParameterState".
format(control_signal))
# Instantiate input_states and allocation_policy attribute for control_signal allocations
self._instantiate_default_input_state(input_name, defaultControlAllocation, context=context)
self.allocation_policy = self.input_values
# Call super to instantiate ControlSignal
# Note: any params specified with ControlProjection for the control_signal
# should be in PARAMS entry of dict passed in control_signal arg
control_signal = super()._instantiate_control_signal(control_signal=control_signal, context=context)
def _instantiate_default_input_state(self, input_state_name, input_state_value, context=None):
"""Instantiate inputState for ControlMechanism
NOTE: This parallels ObjectMechanism._instantiate_input_state_for_monitored_state()
It is implemented here to spare having to instantiate a "dummy" (and superfluous) ObjectiveMechanism
for the sole purpose of creating input_states for each value of defaultControlAllocation to assign
to the ControlProjections.
Extend self.instance_defaults.variable by one item to accommodate new inputState
Instantiate the inputState using input_state_name and input_state_value
Update self.input_state and self.input_states
Args:
input_state_name (str):
input_state_value (2D np.array):
context:
Returns:
input_state (InputState):
"""
# First, test for initialization conditions:
# This is for generality (in case, for any subclass in the future, variable is assigned to None on init)
if self.instance_defaults.variable is None:
self.instance_defaults.variable = np.atleast_2d(input_state_value)
# If there is a single item in self.instance_defaults.variable, it could be the one assigned on initialization
# (in order to validate ``function`` and get its return value as a template for self.value);
# in that case, there should be no input_states yet, so pass
# (i.e., don't bother to extend self.instance_defaults.variable): it will be used for the new inputState
elif len(self.instance_defaults.variable) == 1:
if self.input_states:
self.instance_defaults.variable = np.append(self.instance_defaults.variable, np.atleast_2d(input_state_value), 0)
else:
# If there are no input_states, this is the usual initialization condition;
# Pass to create a new inputState that will be assigned to existing the first item of self.instance_defaults.variable
pass
# Other than on initialization (handled above), it is a PROGRAM ERROR if
# the number of input_states is not equal to the number of items in self.instance_defaults.variable
elif len(self.instance_defaults.variable) != len(self.input_states):
raise DefaultControlMechanismError(
"PROGRAM ERROR: The number of input_states ({}) does not match "
"the number of items found for the variable attribute ({}) of {}"
"when creating {}".format(
len(self.input_states),
len(self.instance_defaults.variable),
self.name,
input_state_name,
)
)
# Extend self.instance_defaults.variable to accommodate new inputState
else:
self.instance_defaults.variable = np.append(self.instance_defaults.variable, np.atleast_2d(input_state_value), 0)
variable_item_index = self.instance_defaults.variable.size-1
# Instantiate inputState
from psyneulink.components.states.state import _instantiate_state
from psyneulink.components.states.inputstate import InputState
input_state = _instantiate_state(owner=self,
state_type=InputState,
name=input_state_name,
# state_spec=defaultControlAllocation,
reference_value=np.array(self.instance_defaults.variable[variable_item_index]),
reference_value_name='Default control allocation',
params=None,
context=context)
# Update inputState and input_states
if self.input_states:
self._input_states[input_state.name] = input_state
else:
from psyneulink.components.states.state import State_Base
self._input_states = ContentAddressableList(component_type=State_Base,
list=[input_state],
name=self.name+'.input_states')
# self.input_value = [state.value for state in self.input_states]
return input_state
|
the-stack_0_7251 | # -*- coding: utf8 -*-
import sys
import os
import unittest
import platform
from pygame.tests.test_utils import example_path, AssertRaisesRegexMixin
import pygame
from pygame import mixer
from pygame.compat import unicode_, as_bytes, bytes_
IS_PYPY = "PyPy" == platform.python_implementation()
################################### CONSTANTS ##################################
FREQUENCIES = [11025, 22050, 44100, 48000]
SIZES = [-16, -8, 8, 16]
if pygame.get_sdl_version()[0] >= 2:
SIZES.append(32)
CHANNELS = [1, 2]
BUFFERS = [3024]
CONFIGS = [
{"frequency": f, "size": s, "channels": c}
for f in FREQUENCIES
for s in SIZES
for c in CHANNELS
]
# Using all CONFIGS fails on a Mac; probably older SDL_mixer; we could do:
# if platform.system() == 'Darwin':
# But using all CONFIGS is very slow (> 10 sec for example)
# And probably, we don't need to be so exhaustive, hence:
CONFIG = {"frequency": 22050, "size": -16, "channels": 2} # base config
if pygame.get_sdl_version()[0] >= 2:
CONFIG = {"frequency": 44100, "size": 32, "channels": 2} # base config
class InvalidBool(object):
"""To help test invalid bool values."""
__nonzero__ = None
__bool__ = None
############################## MODULE LEVEL TESTS ##############################
class MixerModuleTest(unittest.TestCase):
def tearDown(self):
mixer.quit()
mixer.pre_init(0, 0, 0, 0)
def test_init__keyword_args(self):
# note: this test used to loop over all CONFIGS, but it's very slow..
mixer.init(**CONFIG)
mixer_conf = mixer.get_init()
self.assertEqual(mixer_conf[0], CONFIG["frequency"])
# Not all "sizes" are supported on all systems, hence "abs".
self.assertEqual(abs(mixer_conf[1]), abs(CONFIG["size"]))
self.assertEqual(mixer_conf[2], CONFIG["channels"])
def test_pre_init__keyword_args(self):
# note: this test used to loop over all CONFIGS, but it's very slow..
mixer.pre_init(**CONFIG)
mixer.init()
mixer_conf = mixer.get_init()
self.assertEqual(mixer_conf[0], CONFIG["frequency"])
# Not all "sizes" are supported on all systems, hence "abs".
self.assertEqual(abs(mixer_conf[1]), abs(CONFIG["size"]))
self.assertEqual(mixer_conf[2], CONFIG["channels"])
def test_pre_init__zero_values(self):
# Ensure that argument values of 0 are replaced with
# default values. No way to check buffer size though.
mixer.pre_init(22050, -8, 1) # Non default values
mixer.pre_init(0, 0, 0) # Should reset to default values
mixer.init()
self.assertEqual(mixer.get_init(), (44100, -16, 2))
def test_init__zero_values(self):
# Ensure that argument values of 0 are replaced with
# preset values. No way to check buffer size though.
mixer.pre_init(44100, 8, 1, allowedchanges=0) # None default values
mixer.init(0, 0, 0)
self.assertEqual(mixer.get_init(), (44100, 8, 1))
@unittest.skip("SDL_mixer bug")
def test_get_init__returns_exact_values_used_for_init(self):
# fix in 1.9 - I think it's a SDL_mixer bug.
# TODO: When this bug is fixed, testing through every combination
# will be too slow so adjust as necessary, at the moment it
# breaks the loop after first failure
for init_conf in CONFIGS:
frequency, size, channels
if (frequency, size) == (22050, 16):
continue
mixer.init(frequency, size, channels)
mixer_conf = mixer.get_init()
self.assertEqual(init_conf, mixer_conf)
mixer.quit()
def test_get_init__returns_None_if_mixer_not_initialized(self):
self.assertIsNone(mixer.get_init())
def test_get_num_channels__defaults_eight_after_init(self):
mixer.init()
self.assertEqual(mixer.get_num_channels(), 8)
def test_set_num_channels(self):
mixer.init()
default_num_channels = mixer.get_num_channels()
for i in range(1, default_num_channels + 1):
mixer.set_num_channels(i)
self.assertEqual(mixer.get_num_channels(), i)
def test_quit(self):
""" get_num_channels() Should throw pygame.error if uninitialized
after mixer.quit() """
mixer.init()
mixer.quit()
self.assertRaises(pygame.error, mixer.get_num_channels)
# TODO: FIXME: appveyor fails here sometimes.
@unittest.expectedFailure
def test_sound_args(self):
def get_bytes(snd):
return snd.get_raw()
mixer.init()
sample = as_bytes("\x00\xff") * 24
wave_path = example_path(os.path.join("data", "house_lo.wav"))
uwave_path = unicode_(wave_path)
bwave_path = uwave_path.encode(sys.getfilesystemencoding())
snd = mixer.Sound(file=wave_path)
self.assertTrue(snd.get_length() > 0.5)
snd_bytes = get_bytes(snd)
self.assertTrue(len(snd_bytes) > 1000)
self.assertEqual(get_bytes(mixer.Sound(wave_path)), snd_bytes)
self.assertEqual(get_bytes(mixer.Sound(file=uwave_path)), snd_bytes)
self.assertEqual(get_bytes(mixer.Sound(uwave_path)), snd_bytes)
arg_emsg = "Sound takes either 1 positional or 1 keyword argument"
with self.assertRaises(TypeError) as cm:
mixer.Sound()
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(wave_path, buffer=sample)
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(sample, file=wave_path)
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(buffer=sample, file=wave_path)
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(foobar=sample)
self.assertEqual(str(cm.exception), "Unrecognized keyword argument 'foobar'")
snd = mixer.Sound(wave_path, **{})
self.assertEqual(get_bytes(snd), snd_bytes)
snd = mixer.Sound(*[], **{"file": wave_path})
with self.assertRaises(TypeError) as cm:
mixer.Sound([])
self.assertEqual(str(cm.exception), "Unrecognized argument (type list)")
with self.assertRaises(TypeError) as cm:
snd = mixer.Sound(buffer=[])
emsg = "Expected object with buffer interface: got a list"
self.assertEqual(str(cm.exception), emsg)
ufake_path = unicode_("12345678")
self.assertRaises(IOError, mixer.Sound, ufake_path)
self.assertRaises(IOError, mixer.Sound, "12345678")
with self.assertRaises(TypeError) as cm:
mixer.Sound(buffer=unicode_("something"))
emsg = "Unicode object not allowed as buffer object"
self.assertEqual(str(cm.exception), emsg)
self.assertEqual(get_bytes(mixer.Sound(buffer=sample)), sample)
if type(sample) != str:
somebytes = get_bytes(mixer.Sound(sample))
# on python 2 we do not allow using string except as file name.
self.assertEqual(somebytes, sample)
self.assertEqual(get_bytes(mixer.Sound(file=bwave_path)), snd_bytes)
self.assertEqual(get_bytes(mixer.Sound(bwave_path)), snd_bytes)
snd = mixer.Sound(wave_path)
with self.assertRaises(TypeError) as cm:
mixer.Sound(wave_path, array=snd)
self.assertEqual(str(cm.exception), arg_emsg)
with self.assertRaises(TypeError) as cm:
mixer.Sound(buffer=sample, array=snd)
self.assertEqual(str(cm.exception), arg_emsg)
snd2 = mixer.Sound(array=snd)
self.assertEqual(snd.get_raw(), snd2.get_raw())
def test_sound_unicode(self):
"""test non-ASCII unicode path"""
mixer.init()
import shutil
ep = unicode_(example_path("data"))
temp_file = os.path.join(ep, u"你好.wav")
org_file = os.path.join(ep, u"house_lo.wav")
shutil.copy(org_file, temp_file)
try:
with open(temp_file, "rb") as f:
pass
except IOError:
raise unittest.SkipTest("the path cannot be opened")
try:
sound = mixer.Sound(temp_file)
del sound
finally:
os.remove(temp_file)
@unittest.skipIf(
os.environ.get("SDL_AUDIODRIVER") == "disk",
"this test fails without real sound card",
)
def test_array_keyword(self):
try:
from numpy import (
array,
arange,
zeros,
int8,
uint8,
int16,
uint16,
int32,
uint32,
)
except ImportError:
self.skipTest("requires numpy")
freq = 22050
format_list = [-8, 8, -16, 16]
channels_list = [1, 2]
a_lists = dict((f, []) for f in format_list)
a32u_mono = arange(0, 256, 1, uint32)
a16u_mono = a32u_mono.astype(uint16)
a8u_mono = a32u_mono.astype(uint8)
au_list_mono = [(1, a) for a in [a8u_mono, a16u_mono, a32u_mono]]
for format in format_list:
if format > 0:
a_lists[format].extend(au_list_mono)
a32s_mono = arange(-128, 128, 1, int32)
a16s_mono = a32s_mono.astype(int16)
a8s_mono = a32s_mono.astype(int8)
as_list_mono = [(1, a) for a in [a8s_mono, a16s_mono, a32s_mono]]
for format in format_list:
if format < 0:
a_lists[format].extend(as_list_mono)
a32u_stereo = zeros([a32u_mono.shape[0], 2], uint32)
a32u_stereo[:, 0] = a32u_mono
a32u_stereo[:, 1] = 255 - a32u_mono
a16u_stereo = a32u_stereo.astype(uint16)
a8u_stereo = a32u_stereo.astype(uint8)
au_list_stereo = [(2, a) for a in [a8u_stereo, a16u_stereo, a32u_stereo]]
for format in format_list:
if format > 0:
a_lists[format].extend(au_list_stereo)
a32s_stereo = zeros([a32s_mono.shape[0], 2], int32)
a32s_stereo[:, 0] = a32s_mono
a32s_stereo[:, 1] = -1 - a32s_mono
a16s_stereo = a32s_stereo.astype(int16)
a8s_stereo = a32s_stereo.astype(int8)
as_list_stereo = [(2, a) for a in [a8s_stereo, a16s_stereo, a32s_stereo]]
for format in format_list:
if format < 0:
a_lists[format].extend(as_list_stereo)
for format in format_list:
for channels in channels_list:
try:
mixer.init(freq, format, channels)
except pygame.error:
# Some formats (e.g. 16) may not be supported.
continue
try:
__, f, c = mixer.get_init()
if f != format or c != channels:
# Some formats (e.g. -8) may not be supported.
continue
for c, a in a_lists[format]:
self._test_array_argument(format, a, c == channels)
finally:
mixer.quit()
def _test_array_argument(self, format, a, test_pass):
from numpy import array, all as all_
try:
snd = mixer.Sound(array=a)
except ValueError:
if not test_pass:
return
self.fail("Raised ValueError: Format %i, dtype %s" % (format, a.dtype))
if not test_pass:
self.fail(
"Did not raise ValueError: Format %i, dtype %s" % (format, a.dtype)
)
a2 = array(snd)
a3 = a.astype(a2.dtype)
lshift = abs(format) - 8 * a.itemsize
if lshift >= 0:
# This is asymmetric with respect to downcasting.
a3 <<= lshift
self.assertTrue(all_(a2 == a3), "Format %i, dtype %s" % (format, a.dtype))
def _test_array_interface_fail(self, a):
self.assertRaises(ValueError, mixer.Sound, array=a)
def test_array_interface(self):
mixer.init(22050, -16, 1, allowedchanges=0)
snd = mixer.Sound(buffer=as_bytes("\x00\x7f") * 20)
d = snd.__array_interface__
self.assertTrue(isinstance(d, dict))
if pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN:
typestr = "<i2"
else:
typestr = ">i2"
self.assertEqual(d["typestr"], typestr)
self.assertEqual(d["shape"], (20,))
self.assertEqual(d["strides"], (2,))
self.assertEqual(d["data"], (snd._samples_address, False))
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
def test_newbuf__one_channel(self):
mixer.init(22050, -16, 1)
self._NEWBUF_export_check()
@unittest.skipIf(not pygame.HAVE_NEWBUF, "newbuf not implemented")
def test_newbuf__twho_channel(self):
mixer.init(22050, -16, 2)
self._NEWBUF_export_check()
def _NEWBUF_export_check(self):
freq, fmt, channels = mixer.get_init()
ndim = 1 if (channels == 1) else 2
itemsize = abs(fmt) // 8
formats = {
8: "B",
-8: "b",
16: "=H",
-16: "=h",
32: "=I",
-32: "=i", # 32 and 64 for future consideration
64: "=Q",
-64: "=q",
}
format = formats[fmt]
from pygame.tests.test_utils import buftools
Exporter = buftools.Exporter
Importer = buftools.Importer
is_lil_endian = pygame.get_sdl_byteorder() == pygame.LIL_ENDIAN
fsys, frev = ("<", ">") if is_lil_endian else (">", "<")
shape = (10, channels)[:ndim]
strides = (channels * itemsize, itemsize)[2 - ndim :]
exp = Exporter(shape, format=frev + "i")
snd = mixer.Sound(array=exp)
buflen = len(exp) * itemsize * channels
imp = Importer(snd, buftools.PyBUF_SIMPLE)
self.assertEqual(imp.ndim, 0)
self.assertTrue(imp.format is None)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertTrue(imp.shape is None)
self.assertTrue(imp.strides is None)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_WRITABLE)
self.assertEqual(imp.ndim, 0)
self.assertTrue(imp.format is None)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertTrue(imp.shape is None)
self.assertTrue(imp.strides is None)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_FORMAT)
self.assertEqual(imp.ndim, 0)
self.assertEqual(imp.format, format)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertTrue(imp.shape is None)
self.assertTrue(imp.strides is None)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_ND)
self.assertEqual(imp.ndim, ndim)
self.assertTrue(imp.format is None)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertEqual(imp.shape, shape)
self.assertTrue(imp.strides is None)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_STRIDES)
self.assertEqual(imp.ndim, ndim)
self.assertTrue(imp.format is None)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertEqual(imp.shape, shape)
self.assertEqual(imp.strides, strides)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_FULL_RO)
self.assertEqual(imp.ndim, ndim)
self.assertEqual(imp.format, format)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, 2)
self.assertEqual(imp.shape, shape)
self.assertEqual(imp.strides, strides)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_FULL_RO)
self.assertEqual(imp.ndim, ndim)
self.assertEqual(imp.format, format)
self.assertEqual(imp.len, buflen)
self.assertEqual(imp.itemsize, itemsize)
self.assertEqual(imp.shape, exp.shape)
self.assertEqual(imp.strides, strides)
self.assertTrue(imp.suboffsets is None)
self.assertFalse(imp.readonly)
self.assertEqual(imp.buf, snd._samples_address)
imp = Importer(snd, buftools.PyBUF_C_CONTIGUOUS)
self.assertEqual(imp.ndim, ndim)
self.assertTrue(imp.format is None)
self.assertEqual(imp.strides, strides)
imp = Importer(snd, buftools.PyBUF_ANY_CONTIGUOUS)
self.assertEqual(imp.ndim, ndim)
self.assertTrue(imp.format is None)
self.assertEqual(imp.strides, strides)
if ndim == 1:
imp = Importer(snd, buftools.PyBUF_F_CONTIGUOUS)
self.assertEqual(imp.ndim, 1)
self.assertTrue(imp.format is None)
self.assertEqual(imp.strides, strides)
else:
self.assertRaises(BufferError, Importer, snd, buftools.PyBUF_F_CONTIGUOUS)
def todo_test_fadeout(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.fadeout:
# pygame.mixer.fadeout(time): return None
# fade out the volume on all sounds before stopping
#
# This will fade out the volume on all active channels over the time
# argument in milliseconds. After the sound is muted the playback will
# stop.
#
self.fail()
def todo_test_find_channel(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.find_channel:
# pygame.mixer.find_channel(force=False): return Channel
# find an unused channel
#
# This will find and return an inactive Channel object. If there are
# no inactive Channels this function will return None. If there are no
# inactive channels and the force argument is True, this will find the
# Channel with the longest running Sound and return it.
#
# If the mixer has reserved channels from pygame.mixer.set_reserved()
# then those channels will not be returned here.
#
self.fail()
def todo_test_get_busy(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.get_busy:
# pygame.mixer.get_busy(): return bool
# test if any sound is being mixed
#
# Returns True if the mixer is busy mixing any channels. If the mixer
# is idle then this return False.
#
self.fail()
def todo_test_pause(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.pause:
# pygame.mixer.pause(): return None
# temporarily stop playback of all sound channels
#
# This will temporarily stop all playback on the active mixer
# channels. The playback can later be resumed with
# pygame.mixer.unpause()
#
self.fail()
def todo_test_set_reserved(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.set_reserved:
# pygame.mixer.set_reserved(count): return None
# reserve channels from being automatically used
#
# The mixer can reserve any number of channels that will not be
# automatically selected for playback by Sounds. If sounds are
# currently playing on the reserved channels they will not be stopped.
#
# This allows the application to reserve a specific number of channels
# for important sounds that must not be dropped or have a guaranteed
# channel to play on.
#
self.fail()
def todo_test_stop(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.stop:
# pygame.mixer.stop(): return None
# stop playback of all sound channels
#
# This will stop all playback of all active mixer channels.
self.fail()
def todo_test_unpause(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.unpause:
# pygame.mixer.unpause(): return None
# resume paused playback of sound channels
#
# This will resume all active sound channels after they have been paused.
self.fail()
def test_get_sdl_mixer_version(self):
"""Ensures get_sdl_mixer_version works correctly with no args."""
expected_length = 3
expected_type = tuple
expected_item_type = int
version = pygame.mixer.get_sdl_mixer_version()
self.assertIsInstance(version, expected_type)
self.assertEqual(len(version), expected_length)
for item in version:
self.assertIsInstance(item, expected_item_type)
def test_get_sdl_mixer_version__args(self):
"""Ensures get_sdl_mixer_version works correctly using args."""
expected_length = 3
expected_type = tuple
expected_item_type = int
for value in (True, False):
version = pygame.mixer.get_sdl_mixer_version(value)
self.assertIsInstance(version, expected_type)
self.assertEqual(len(version), expected_length)
for item in version:
self.assertIsInstance(item, expected_item_type)
def test_get_sdl_mixer_version__kwargs(self):
"""Ensures get_sdl_mixer_version works correctly using kwargs."""
expected_length = 3
expected_type = tuple
expected_item_type = int
for value in (True, False):
version = pygame.mixer.get_sdl_mixer_version(linked=value)
self.assertIsInstance(version, expected_type)
self.assertEqual(len(version), expected_length)
for item in version:
self.assertIsInstance(item, expected_item_type)
def test_get_sdl_mixer_version__invalid_args_kwargs(self):
"""Ensures get_sdl_mixer_version handles invalid args and kwargs."""
invalid_bool = InvalidBool()
with self.assertRaises(TypeError):
version = pygame.mixer.get_sdl_mixer_version(invalid_bool)
with self.assertRaises(TypeError):
version = pygame.mixer.get_sdl_mixer_version(linked=invalid_bool)
def test_get_sdl_mixer_version__linked_equals_compiled(self):
"""Ensures get_sdl_mixer_version's linked/compiled versions are equal.
"""
linked_version = pygame.mixer.get_sdl_mixer_version(linked=True)
complied_version = pygame.mixer.get_sdl_mixer_version(linked=False)
self.assertTupleEqual(linked_version, complied_version)
############################## CHANNEL CLASS TESTS #############################
class ChannelTypeTest(AssertRaisesRegexMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
# Initializing the mixer is slow, so minimize the times it is called.
mixer.init()
@classmethod
def tearDownClass(cls):
mixer.quit()
def setUp(cls):
# This makes sure the mixer is always initialized before each test (in
# case a test calls pygame.mixer.quit()).
if mixer.get_init() is None:
mixer.init()
def test_channel(self):
"""Ensure Channel() creation works."""
channel = mixer.Channel(0)
self.assertIsInstance(channel, mixer.ChannelType)
self.assertEqual(channel.__class__.__name__, "Channel")
def test_channel__without_arg(self):
"""Ensure exception for Channel() creation with no argument."""
with self.assertRaises(TypeError):
mixer.Channel()
def test_channel__invalid_id(self):
"""Ensure exception for Channel() creation with an invalid id."""
with self.assertRaises(IndexError):
mixer.Channel(-1)
def test_channel__before_init(self):
"""Ensure exception for Channel() creation with non-init mixer."""
mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
mixer.Channel(0)
def todo_test_fadeout(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.fadeout:
# Channel.fadeout(time): return None
# stop playback after fading channel out
#
# Stop playback of a channel after fading out the sound over the given
# time argument in milliseconds.
#
self.fail()
def test_get_busy(self):
"""Ensure an idle channel's busy state is correct."""
expected_busy = False
channel = mixer.Channel(0)
busy = channel.get_busy()
self.assertEqual(busy, expected_busy)
def todo_test_get_busy__active(self):
"""Ensure an active channel's busy state is correct."""
self.fail()
def todo_test_get_endevent(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_endevent:
# Channel.get_endevent(): return type
# get the event a channel sends when playback stops
#
# Returns the event type to be sent every time the Channel finishes
# playback of a Sound. If there is no endevent the function returns
# pygame.NOEVENT.
#
self.fail()
def todo_test_get_queue(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_queue:
# Channel.get_queue(): return Sound
# return any Sound that is queued
#
# If a Sound is already queued on this channel it will be returned.
# Once the queued sound begins playback it will no longer be on the
# queue.
#
self.fail()
def todo_test_get_sound(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.get_sound:
# Channel.get_sound(): return Sound
# get the currently playing Sound
#
# Return the actual Sound object currently playing on this channel. If
# the channel is idle None is returned.
#
self.fail()
def test_get_volume(self):
"""Ensure a channel's volume can be retrieved."""
expected_volume = 1.0 # default
channel = mixer.Channel(0)
volume = channel.get_volume()
self.assertAlmostEqual(volume, expected_volume)
def todo_test_get_volume__while_playing(self):
"""Ensure a channel's volume can be retrieved while playing."""
self.fail()
def todo_test_pause(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.pause:
# Channel.pause(): return None
# temporarily stop playback of a channel
#
# Temporarily stop the playback of sound on a channel. It can be
# resumed at a later time with Channel.unpause()
#
self.fail()
def todo_test_play(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.play:
# Channel.play(Sound, loops=0, maxtime=0, fade_ms=0): return None
# play a Sound on a specific Channel
#
# This will begin playback of a Sound on a specific Channel. If the
# Channel is currently playing any other Sound it will be stopped.
#
# The loops argument has the same meaning as in Sound.play(): it is
# the number of times to repeat the sound after the first time. If it
# is 3, the sound will be played 4 times (the first time, then three
# more). If loops is -1 then the playback will repeat indefinitely.
#
# As in Sound.play(), the maxtime argument can be used to stop
# playback of the Sound after a given number of milliseconds.
#
# As in Sound.play(), the fade_ms argument can be used fade in the sound.
self.fail()
def todo_test_queue(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.queue:
# Channel.queue(Sound): return None
# queue a Sound object to follow the current
#
# When a Sound is queued on a Channel, it will begin playing
# immediately after the current Sound is finished. Each channel can
# only have a single Sound queued at a time. The queued Sound will
# only play if the current playback finished automatically. It is
# cleared on any other call to Channel.stop() or Channel.play().
#
# If there is no sound actively playing on the Channel then the Sound
# will begin playing immediately.
#
self.fail()
def todo_test_set_endevent(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.set_endevent:
# Channel.set_endevent(): return None
# Channel.set_endevent(type): return None
# have the channel send an event when playback stops
#
# When an endevent is set for a channel, it will send an event to the
# pygame queue every time a sound finishes playing on that channel
# (not just the first time). Use pygame.event.get() to retrieve the
# endevent once it's sent.
#
# Note that if you called Sound.play(n) or Channel.play(sound,n), the
# end event is sent only once: after the sound has been played "n+1"
# times (see the documentation of Sound.play).
#
# If Channel.stop() or Channel.play() is called while the sound was
# still playing, the event will be posted immediately.
#
# The type argument will be the event id sent to the queue. This can
# be any valid event type, but a good choice would be a value between
# pygame.locals.USEREVENT and pygame.locals.NUMEVENTS. If no type
# argument is given then the Channel will stop sending endevents.
#
self.fail()
def todo_test_set_volume(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.set_volume:
# Channel.set_volume(value): return None
# Channel.set_volume(left, right): return None
# set the volume of a playing channel
#
# Set the volume (loudness) of a playing sound. When a channel starts
# to play its volume value is reset. This only affects the current
# sound. The value argument is between 0.0 and 1.0.
#
# If one argument is passed, it will be the volume of both speakers.
# If two arguments are passed and the mixer is in stereo mode, the
# first argument will be the volume of the left speaker and the second
# will be the volume of the right speaker. (If the second argument is
# None, the first argument will be the volume of both speakers.)
#
# If the channel is playing a Sound on which set_volume() has also
# been called, both calls are taken into account. For example:
#
# sound = pygame.mixer.Sound("s.wav")
# channel = s.play() # Sound plays at full volume by default
# sound.set_volume(0.9) # Now plays at 90% of full volume.
# sound.set_volume(0.6) # Now plays at 60% (previous value replaced).
# channel.set_volume(0.5) # Now plays at 30% (0.6 * 0.5).
self.fail()
def todo_test_stop(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.stop:
# Channel.stop(): return None
# stop playback on a Channel
#
# Stop sound playback on a channel. After playback is stopped the
# channel becomes available for new Sounds to play on it.
#
self.fail()
def todo_test_unpause(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Channel.unpause:
# Channel.unpause(): return None
# resume pause playback of a channel
#
# Resume the playback on a paused channel.
self.fail()
############################### SOUND CLASS TESTS ##############################
class SoundTypeTest(AssertRaisesRegexMixin, unittest.TestCase):
@classmethod
def tearDownClass(cls):
mixer.quit()
def setUp(cls):
# This makes sure the mixer is always initialized before each test (in
# case a test calls pygame.mixer.quit()).
if mixer.get_init() is None:
mixer.init()
# See MixerModuleTest's methods test_sound_args(), test_sound_unicode(),
# and test_array_keyword() for additional testing of Sound() creation.
def test_sound(self):
"""Ensure Sound() creation with a filename works."""
filename = example_path(os.path.join("data", "house_lo.wav"))
sound1 = mixer.Sound(filename)
sound2 = mixer.Sound(file=filename)
self.assertIsInstance(sound1, mixer.Sound)
self.assertIsInstance(sound2, mixer.Sound)
def test_sound__from_file_object(self):
"""Ensure Sound() creation with a file object works."""
filename = example_path(os.path.join("data", "house_lo.wav"))
# Using 'with' ensures the file is closed even if test fails.
with open(filename, "rb") as file_obj:
sound = mixer.Sound(file_obj)
self.assertIsInstance(sound, mixer.Sound)
def test_sound__from_sound_object(self):
"""Ensure Sound() creation with a Sound() object works."""
filename = example_path(os.path.join("data", "house_lo.wav"))
sound_obj = mixer.Sound(file=filename)
sound = mixer.Sound(sound_obj)
self.assertIsInstance(sound, mixer.Sound)
def todo_test_sound__from_buffer(self):
"""Ensure Sound() creation with a buffer works."""
self.fail()
def todo_test_sound__from_array(self):
"""Ensure Sound() creation with an array works."""
self.fail()
def test_sound__without_arg(self):
"""Ensure exception raised for Sound() creation with no argument."""
with self.assertRaises(TypeError):
mixer.Sound()
def test_sound__before_init(self):
"""Ensure exception raised for Sound() creation with non-init mixer."""
mixer.quit()
filename = example_path(os.path.join("data", "house_lo.wav"))
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
mixer.Sound(file=filename)
@unittest.skipIf(IS_PYPY, "pypy skip")
def test_samples_address(self):
"""Test the _samples_address getter."""
try:
from ctypes import pythonapi, c_void_p, py_object
try:
Bytes_FromString = pythonapi.PyBytes_FromString # python 3
except:
Bytes_FromString = pythonapi.PyString_FromString # python 2
Bytes_FromString.restype = c_void_p
Bytes_FromString.argtypes = [py_object]
samples = as_bytes("abcdefgh") # keep byte size a multiple of 4
sample_bytes = Bytes_FromString(samples)
snd = mixer.Sound(buffer=samples)
self.assertNotEqual(snd._samples_address, sample_bytes)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
snd._samples_address
def todo_test_fadeout(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Sound.fadeout:
# Sound.fadeout(time): return None
# stop sound playback after fading out
#
# This will stop playback of the sound after fading it out over the
# time argument in milliseconds. The Sound will fade and stop on all
# actively playing channels.
#
self.fail()
def test_get_length(self):
"""Tests if get_length returns a correct length."""
try:
for size in SIZES:
pygame.mixer.quit()
pygame.mixer.init(size=size)
filename = example_path(os.path.join("data", "punch.wav"))
sound = mixer.Sound(file=filename)
# The sound data is in the mixer output format. So dividing the
# length of the raw sound data by the mixer settings gives
# the expected length of the sound.
sound_bytes = sound.get_raw()
mix_freq, mix_bits, mix_channels = pygame.mixer.get_init()
mix_bytes = abs(mix_bits) / 8
expected_length = float(len(sound_bytes)) / mix_freq / mix_bytes / mix_channels
self.assertAlmostEqual(expected_length, sound.get_length())
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.get_length()
def test_get_num_channels(self):
"""
Tests if Sound.get_num_channels returns the correct number
of channels playing a specific sound.
"""
try:
filename = example_path(os.path.join("data", "house_lo.wav"))
sound = mixer.Sound(file=filename)
self.assertEqual(sound.get_num_channels(), 0)
sound.play()
self.assertEqual(sound.get_num_channels(), 1)
sound.play()
self.assertEqual(sound.get_num_channels(), 2)
sound.stop()
self.assertEqual(sound.get_num_channels(), 0)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.get_num_channels()
def test_get_volume(self):
"""Ensure a sound's volume can be retrieved."""
try:
expected_volume = 1.0 # default
filename = example_path(os.path.join("data", "house_lo.wav"))
sound = mixer.Sound(file=filename)
volume = sound.get_volume()
self.assertAlmostEqual(volume, expected_volume)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.get_volume()
def todo_test_get_volume__while_playing(self):
"""Ensure a sound's volume can be retrieved while playing."""
self.fail()
def todo_test_play(self):
# __doc__ (as of 2008-08-02) for pygame.mixer.Sound.play:
# Sound.play(loops=0, maxtime=0, fade_ms=0): return Channel
# begin sound playback
#
# Begin playback of the Sound (i.e., on the computer's speakers) on an
# available Channel. This will forcibly select a Channel, so playback
# may cut off a currently playing sound if necessary.
#
# The loops argument controls how many times the sample will be
# repeated after being played the first time. A value of 5 means that
# the sound will be played once, then repeated five times, and so is
# played a total of six times. The default value (zero) means the
# Sound is not repeated, and so is only played once. If loops is set
# to -1 the Sound will loop indefinitely (though you can still call
# stop() to stop it).
#
# The maxtime argument can be used to stop playback after a given
# number of milliseconds.
#
# The fade_ms argument will make the sound start playing at 0 volume
# and fade up to full volume over the time given. The sample may end
# before the fade-in is complete.
#
# This returns the Channel object for the channel that was selected.
self.fail()
def test_set_volume(self):
"""Ensure a sound's volume can be set."""
try:
float_delta = 1.0 / 128 # SDL volume range is 0 to 128
filename = example_path(os.path.join("data", "house_lo.wav"))
sound = mixer.Sound(file=filename)
current_volume = sound.get_volume()
# (volume_set_value : expected_volume)
volumes = (
(-1, current_volume), # value < 0 won't change volume
(0, 0.0),
(0.01, 0.01),
(0.1, 0.1),
(0.5, 0.5),
(0.9, 0.9),
(0.99, 0.99),
(1, 1.0),
(1.1, 1.0),
(2.0, 1.0),
)
for volume_set_value, expected_volume in volumes:
sound.set_volume(volume_set_value)
self.assertAlmostEqual(
sound.get_volume(), expected_volume, delta=float_delta
)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.set_volume(1)
def todo_test_set_volume__while_playing(self):
"""Ensure a sound's volume can be set while playing."""
self.fail()
def test_stop(self):
"""Ensure stop can be called while not playing a sound."""
try:
expected_channels = 0
filename = example_path(os.path.join("data", "house_lo.wav"))
sound = mixer.Sound(file=filename)
sound.stop()
self.assertEqual(sound.get_num_channels(), expected_channels)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
sound.stop()
def todo_test_stop__while_playing(self):
"""Ensure stop stops a playing sound."""
self.fail()
def test_get_raw(self):
"""Ensure get_raw returns the correct bytestring."""
try:
samples = as_bytes("abcdefgh") # keep byte size a multiple of 4
snd = mixer.Sound(buffer=samples)
raw = snd.get_raw()
self.assertIsInstance(raw, bytes_)
self.assertEqual(raw, samples)
finally:
pygame.mixer.quit()
with self.assertRaisesRegex(pygame.error, "mixer not initialized"):
snd.get_raw()
##################################### MAIN #####################################
if __name__ == "__main__":
unittest.main()
|
the-stack_0_7252 | # coding=utf-8
__author__ = 'lxn3032'
import os
import requests
import time
import warnings
import threading
import atexit
from airtest.core.api import connect_device, device as current_device
from airtest.core.android.ime import YosemiteIme
from hrpc.client import RpcClient
from hrpc.transport.http import HttpTransport
from poco.pocofw import Poco
from poco.agent import PocoAgent
from poco.sdk.Attributor import Attributor
from poco.sdk.interfaces.screen import ScreenInterface
from poco.utils.hrpc.hierarchy import RemotePocoHierarchy
from poco.utils.airtest.input import AirtestInput
from poco.utils import six
from poco.drivers.android.utils.installation import install, uninstall
__all__ = ['AndroidUiautomationPoco', 'AndroidUiautomationHelper']
this_dir = os.path.dirname(os.path.realpath(__file__))
PocoServicePackage = 'com.netease.open.pocoservice'
PocoServicePackageTest = 'com.netease.open.pocoservice.test'
class AndroidRpcClient(RpcClient):
def __init__(self, endpoint):
self.endpoint = endpoint
super(AndroidRpcClient, self).__init__(HttpTransport)
def initialize_transport(self):
return HttpTransport(self.endpoint, self)
# deprecated
class AttributorWrapper(Attributor):
"""
部分手机上仍不支持Accessibility.ACTION_SET_TEXT,使用YosemiteIme还是兼容性最好的方案
这个class会hook住set_text,然后改用ime的text方法
"""
def __init__(self, remote, ime):
self.remote = remote
self.ime = ime
def getAttr(self, node, attrName):
return self.remote.getAttr(node, attrName)
def setAttr(self, node, attrName, attrVal):
if attrName == 'text' and attrVal != '':
# 先清除了再设置,虽然这样不如直接用ime的方法好,但是也能凑合用着
current_val = self.remote.getAttr(node, 'text')
if current_val:
self.remote.setAttr(node, 'text', '')
self.ime.text(attrVal)
else:
self.remote.setAttr(node, attrName, attrVal)
class ScreenWrapper(ScreenInterface):
def __init__(self, screen):
super(ScreenWrapper, self).__init__()
self.screen = screen
def getScreen(self, width):
# Android上PocoService的实现为仅返回b64编码的图像,格式固定位jpg
b64img = self.screen.getScreen(width)
return b64img, 'jpg'
def getPortSize(self):
return self.screen.getPortSize()
class AndroidPocoAgent(PocoAgent):
def __init__(self, endpoint, ime, use_airtest_input=False):
self.client = AndroidRpcClient(endpoint)
remote_poco = self.client.remote('poco-uiautomation-framework')
dumper = remote_poco.dumper
selector = remote_poco.selector
attributor = remote_poco.attributor
hierarchy = RemotePocoHierarchy(dumper, selector, attributor)
if use_airtest_input:
inputer = AirtestInput()
else:
inputer = remote_poco.inputer
super(AndroidPocoAgent, self).__init__(hierarchy, inputer, ScreenWrapper(remote_poco.screen), None)
def on_bind_driver(self, driver):
super(AndroidPocoAgent, self).on_bind_driver(driver)
if isinstance(self.input, AirtestInput):
self.input.add_preaction_cb(driver)
class KeepRunningInstrumentationThread(threading.Thread):
"""Keep pocoservice running"""
def __init__(self, poco, port_to_ping):
super(KeepRunningInstrumentationThread, self).__init__()
self._stop_event = threading.Event()
self.poco = poco
self.port_to_ping = port_to_ping
self.daemon = True
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def run(self):
while not self.stopped():
if getattr(self.poco, "_instrument_proc", None) is not None:
stdout, stderr = self.poco._instrument_proc.communicate()
print('[pocoservice.apk] stdout: {}'.format(stdout))
print('[pocoservice.apk] stderr: {}'.format(stderr))
if not self.stopped():
self.poco._start_instrument(self.port_to_ping) # 尝试重启
time.sleep(1)
class AndroidUiautomationPoco(Poco):
"""
Poco Android implementation for testing **Android native apps**.
Args:
device (:py:obj:`Device`): :py:obj:`airtest.core.device.Device` instance provided by ``airtest``. leave the
parameter default and the default device will be chosen. more details refer to ``airtest doc``
using_proxy (:py:obj:`bool`): whether use adb forward to connect the Android device or not
force_restart (:py:obj:`bool`): whether always restart the poco-service-demo running on Android device or not
options: see :py:class:`poco.pocofw.Poco`
Examples:
The simplest way to initialize AndroidUiautomationPoco instance and no matter your device network status::
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
poco = AndroidUiautomationPoco()
poco('android:id/title').click()
...
"""
def __init__(self, device=None, using_proxy=True, force_restart=False, use_airtest_input=False, **options):
# 加这个参数为了不在最新的pocounit方案中每步都截图
self.screenshot_each_action = True
if options.get('screenshot_each_action') is False:
self.screenshot_each_action = False
self.device = device or current_device()
if not self.device:
self.device = connect_device("Android:///")
self.adb_client = self.device.adb
if using_proxy:
self.device_ip = self.adb_client.host or "127.0.0.1"
else:
self.device_ip = self.device.get_ip_address()
# save current top activity (@nullable)
current_top_activity_package = self.device.get_top_activity_name()
if current_top_activity_package is not None:
current_top_activity_package = current_top_activity_package.split('/')[0]
# install ime
self.ime = YosemiteIme(self.adb_client)
self.ime.start()
# install
self._instrument_proc = None
self._install_service()
# forward
if using_proxy:
p0, _ = self.adb_client.setup_forward("tcp:10080")
p1, _ = self.adb_client.setup_forward("tcp:10081")
else:
p0 = 10080
p1 = 10081
# start
if self._is_running('com.github.uiautomator'):
warnings.warn('{} should not run together with "uiautomator". "uiautomator" will be killed.'
.format(self.__class__.__name__))
self.adb_client.shell(['am', 'force-stop', 'com.github.uiautomator'])
ready = self._start_instrument(p0, force_restart=force_restart)
if not ready:
# 启动失败则需要卸载再重启,instrument的奇怪之处
uninstall(self.adb_client, PocoServicePackage)
self._install_service()
ready = self._start_instrument(p0)
if current_top_activity_package is not None:
current_top_activity2 = self.device.get_top_activity_name()
if current_top_activity2 is None or current_top_activity_package not in current_top_activity2:
self.device.start_app(current_top_activity_package, activity=True)
if not ready:
raise RuntimeError("unable to launch AndroidUiautomationPoco")
if ready:
# 首次启动成功后,在后台线程里监控这个进程的状态,保持让它不退出
self._keep_running_thread = KeepRunningInstrumentationThread(self, p0)
self._keep_running_thread.start()
endpoint = "http://{}:{}".format(self.device_ip, p1)
agent = AndroidPocoAgent(endpoint, self.ime, use_airtest_input)
super(AndroidUiautomationPoco, self).__init__(agent, **options)
def _install_service(self):
updated = install(self.adb_client, os.path.join(this_dir, 'lib', 'pocoservice-debug.apk'))
install(self.adb_client, os.path.join(this_dir, 'lib', 'pocoservice-debug-androidTest.apk'), updated)
return updated
def _is_running(self, package_name):
processes = self.adb_client.shell(['ps']).splitlines()
for ps in processes:
ps = ps.strip()
if ps.endswith(package_name):
return True
return False
def _start_instrument(self, port_to_ping, force_restart=False):
if not force_restart:
try:
state = requests.get('http://{}:{}/uiautomation/connectionState'.format(self.device_ip, port_to_ping),
timeout=10)
state = state.json()
if state.get('connected'):
# skip starting instrumentation if UiAutomation Service already connected.
return True
except:
pass
if self._instrument_proc is not None:
if self._instrument_proc.poll() is None:
self._instrument_proc.kill()
self._instrument_proc = None
ready = False
self.adb_client.shell(['am', 'force-stop', PocoServicePackage])
# 启动instrument之前,先把主类activity启动起来,不然instrumentation可能失败
self.adb_client.shell('am start -n {}/.TestActivity'.format(PocoServicePackage))
instrumentation_cmd = [
'am', 'instrument', '-w', '-e', 'debug', 'false', '-e', 'class',
'{}.InstrumentedTestAsLauncher'.format(PocoServicePackage),
'{}.test/android.support.test.runner.AndroidJUnitRunner'.format(PocoServicePackage)]
self._instrument_proc = self.adb_client.start_shell(instrumentation_cmd)
def cleanup_proc(proc):
def wrapped():
try:
proc.kill()
except:
pass
return wrapped
atexit.register(cleanup_proc(self._instrument_proc))
time.sleep(2)
for i in range(10):
try:
requests.get('http://{}:{}'.format(self.device_ip, port_to_ping), timeout=10)
ready = True
break
except requests.exceptions.Timeout:
break
except requests.exceptions.ConnectionError:
if self._instrument_proc.poll() is not None:
warnings.warn("[pocoservice.apk] instrumentation test server process is no longer alive")
stdout = self._instrument_proc.stdout.read()
stderr = self._instrument_proc.stderr.read()
print('[pocoservice.apk] stdout: {}'.format(stdout))
print('[pocoservice.apk] stderr: {}'.format(stderr))
time.sleep(1)
print("still waiting for uiautomation ready.")
continue
return ready
def on_pre_action(self, action, ui, args):
if self.screenshot_each_action:
# airteset log用
from airtest.core.api import snapshot
msg = repr(ui)
if not isinstance(msg, six.text_type):
msg = msg.decode('utf-8')
snapshot(msg=msg)
def stop_running(self):
print('[pocoservice.apk] stopping PocoService')
self._keep_running_thread.stop()
self._keep_running_thread.join(3)
self.adb_client.shell(['am', 'force-stop', PocoServicePackage])
class AndroidUiautomationHelper(object):
_nuis = {}
@classmethod
def get_instance(cls, device):
"""
This is only a slot to store and get already initialized poco instance rather than initializing again. You can
simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.
If no such AndroidUiautomationPoco instance, a new instance will be created and stored.
Args:
device (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc``
Returns:
poco instance
"""
if cls._nuis.get(device) is None:
cls._nuis[device] = AndroidUiautomationPoco(device)
return cls._nuis[device]
|
the-stack_0_7253 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from test_framework.test_framework import RavenTestFramework
from test_framework.util import satoshi_round, assert_raises_rpc_error, assert_equal, Decimal
from test_framework.script import CScript
from test_framework.mininode import COIN, CTransaction, CTxIn, COutPoint, CTxOut
MAX_REPLACEMENT_LIMIT = 100
def tx_to_hex(tx):
return tx.serialize().hex()
def make_utxo(node, amount, confirmed=True, script_pub_key=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1 * COIN
while node.getbalance() < satoshi_round((amount + fee) / COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount + fee) / COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, script_pub_key)]
tx2.rehash()
signed_tx = node.signrawtransaction(tx_to_hex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert (new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(RavenTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-maxorphantx=1000",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"],
["-mempoolreplacement=0"]]
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1 * COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_fee_per_kb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("All Tests Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(1 * COIN, CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# This will raise an exception due to transaction replacement being disabled
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Extra 0.1 RVN fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# Replacement still disabled even with "enough fee"
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
# Second node is running mempoolreplacement=0, will not replace originally-seen txn
mempool = self.nodes[1].getrawmempool()
assert tx1a_txid in mempool
assert tx1b_txid not in mempool
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_n_value = 5000 * COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
prevout = tx0_outpoint
remaining_value = initial_n_value
chain_txids = []
while remaining_value > 1000 * COIN:
remaining_value -= 100 * COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, n_sequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = tx_to_hex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 RVN - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - 30 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(1 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert (doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_n_value = 50 * COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
def branch(prevout, initial_value, max_txs, tree_width=5, fee_val=0.0001 * COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee_val) // tree_width
if txout_value < fee_val:
return
vout = [CTxOut(txout_value, CScript([i + 1]))
for i in range(tree_width)]
tx_data = CTransaction()
tx_data.vin = [CTxIn(prevout, n_sequence=0)]
tx_data.vout = vout
tx_hex = tx_to_hex(tx_data)
assert (len(tx_data.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx_data
_total_txs[0] += 1
txid = int(txid, 16)
for i, _ in enumerate(tx_data.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee_val=fee_val,
_total_txs=_total_txs):
yield x
fee = int(0.0001 * COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_n_value, n, fee_val=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - fee * n, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# 1 RVN fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - fee * n - 1 * COIN, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2):
fee = int(0.0001 * COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_n_value)
tree_txs = list(branch(tx0_outpoint, initial_n_value, n, fee_val=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
dbl_tx.vout = [CTxOut(initial_n_value - 2 * fee * n, CScript([1]))]
dbl_tx_hex = tx_to_hex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_fee_per_kb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 999000]))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2 * COIN))
utxo2 = make_utxo(self.nodes[0], 3 * COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, n_sequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, n_sequence=0), CTxIn(utxo2, n_sequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0))
tx2.vout = tx1a.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)]
tx1b.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1b_hex = tx_to_hex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, n_sequence=0), CTxIn(utxo2, n_sequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1 * COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1 * COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1_hex = tx_to_hex(tx1)
self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = tx_to_hex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_n_value = 10 * COIN
utxo = make_utxo(self.nodes[0], initial_n_value)
fee = int(0.0001 * COIN)
split_value = int((initial_n_value - fee) / (MAX_REPLACEMENT_LIMIT + 1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT + 1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, n_sequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = tx_to_hex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT + 1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), n_sequence=0)]
tx_i.vout = [CTxOut(split_value - fee, CScript([b'a']))]
tx_i_hex = tx_to_hex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value - 100 * fee) * (MAX_REPLACEMENT_LIMIT + 1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT + 1):
inputs.append(CTxIn(COutPoint(txid, i), n_sequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = tx_to_hex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = tx_to_hex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx1b_hex = tx_to_hex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, n_sequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx2a_hex = tx_to_hex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), CScript([b'b']))]
tx2b_hex = tx_to_hex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), n_sequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9 * COIN), CScript([b'c'])), CTxOut(int(0.9 * COIN), CScript([b'd']))]
tx3a_hex = tx_to_hex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), CScript([b'e']))]
tx3b_hex = tx_to_hex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), n_sequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), CScript([b'f']))]
tx3c_hex = tx_to_hex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx1a_hex = tx_to_hex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)]
tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 740000]))]
tx1b_hex = tx_to_hex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1 * COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert (tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2a.vout = [CTxOut(1 * COIN, CScript([b'a']))]
tx2a_hex = tx_to_hex(tx2a)
self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, n_sequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = tx_to_hex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1 * COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert (tx2b_txid in self.nodes[0].getrawmempool())
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress(): Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
f_raw_tx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
f_raw_tx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(f_raw_tx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(f_raw_tx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
the-stack_0_7254 | """Setup script for shreddit.
"""
from setuptools import setup
from codecs import open
from os import path
VERSION = "6.1.0"
DESCRIPTION = " Remove your comment history on Reddit as deleting an account does not do so."
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding='utf-8') as filein:
long_description = filein.read()
setup(
name="shreddit",
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
url="https://github.com/niktheblak/Shreddit",
author="David John",
author_email="[email protected]",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python"],
license="FreeBSD License",
packages=["shreddit"],
install_requires=[
"arrow",
"praw>=4",
"PyYAML",
"requests",
"six",
"loremipsum"],
package_data={
"shreddit": ["*.example"]},
entry_points={
"console_scripts": ["shreddit=shreddit.app:main"]})
|
the-stack_0_7256 | import json
import math
import os.path
from src.cleaning.clean_drinks_4 import main as clean_drinks
from pathlib import Path
# clean_json_5.py
def main():
print("Cleaning json from cleaned Drink Data")
if not os.path.isfile(Path("../Savefiles/drinks_C2.txt")):
print("Cleaned Drinks Savefiles not found. Creating one")
clean_drinks()
Drinks = []
before = ("gin", "rum", "vodka", "tequila", "tonic", "coke", "orange juice", "grenadine", "mate", "cola")
after = ("gin", "rum", "vodka", "tequila", "tonic", "coke", "oj", "gren", "mate", "coke")
with open(Path("../Savefiles/drinks_C2.txt"), "r") as f:
items = json.loads(f.read())["items"]
for item in items:
Drink = {}
print(item["drink_name"])
Drink['name'] = item["drink_name"].strip()
Drink['color'] = "black"
full_ammount = 0
for ing in item["ingredients"]:
full_ammount = full_ammount + int(math.ceil(float(ing["ing_ammount"])))
multiplicator = float(200) / float(full_ammount)
Drink["recipe"] = []
for ing in item["ingredients"]:
Drink["recipe"].append({"name": after[before.index(ing["ing_name"])], "amt": int(float(ing["ing_ammount"]) * multiplicator)})
Drinks.append(Drink)
with open(Path("../Savefiles/Drinks.drk"), "w+") as f:
f.write(json.dumps({"Drinks": Drinks}, indent=4, sort_keys=True))
print("Cleaned JSON from cleaned Drink Data")
if __name__ == "__main__":
main()
|
the-stack_0_7259 | import io
from setuptools import setup
NAME = 'plex-lastfm-scrobbler'
VERSION = '4.1.1'
description = 'Scrobble audio tracks played via Plex Media Center'
try:
with io.open('README.rst', encoding="utf-8") as fh:
long_description = fh.read()
except IOError:
long_description = description
setup(
name='plex-scrobble',
version=VERSION,
author='Jesse Ward',
author_email='[email protected]',
description=description,
long_description=long_description,
license='MIT',
url='https://github.com/jesseward/plex-lastfm-scrobbler',
packages=['plex_scrobble'],
entry_points={
'console_scripts': [
'plex-scrobble = plex_scrobble.__main__:main'
]
},
install_requires=[
'click>=6.2',
'pylast>=1.6.0',
'toml>=0.9.1',
'requests>=2.12.0',
],
classifiers=[
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
the-stack_0_7260 | '''
MIT License
Copyright (c) 2021 Chen Guojun
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
bl_info = {
"name": "Grab Frame",
"author": "GuoJun Chen ([email protected])",
"version": (1, 0, 0),
"blender": (2, 80, 0),
"location": "3D View >Tool> Grab Frame",
"description": '''Use keyboard shortcuts to change the current frame by a specified number of frames.ctrl + left arrow : back frame,ctrl + right arrow : forward frame''',
"category": "Animation"}
from . import grab_frame
def register():
grab_frame.register()
def unregister():
grab_frame.unregister()
if __name__ == "__main__":
register()
|
the-stack_0_7261 | import numpy as np
import matplotlib.pyplot as plt
# set width of bar
barWidth = 0.25
# fig = plt.subplots(figsize =(12, 8))
# set height of bar
PDR=[0.633136094675,0.7,0.846153846154,0.990990990991,0.021822849807445]
Filter=[0.723032069970845,0.71,0.88,0.976909413854352,0.217672413793103]
# Set position of bar on X axis
br1 = np.arange(2,len(PDR)+2)
print(br1)
br2 = [x + barWidth for x in br1]
br3 = [x + barWidth for x in br2]
colors = iter([plt.cm.tab20(i) for i in range(20)])
# next(colors)
# next(colors)
# next(colors)
# plt.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.5)
# Make the plot
plt.bar(br1, Filter, width = barWidth,
edgecolor ='black', label ='Control', color=[next(colors)])
next(colors)
plt.bar(br2, PDR, width = barWidth,
edgecolor ='black', label ='Data',color=[next(colors)],hatch = '/')
# plt.bar(br3, RPL, width = barWidth,
# edgecolor ='black', label ='RPL PDR [#]',color=[next(colors)],hatch = '/')
# Adding Xticks
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('Number of neighbors', fontweight ='bold', fontsize=15)
plt.ylabel('Reception Ratio [%]', fontweight ='bold', fontsize=15)
# plt.yticks(fontsize=15)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),
ncol=3, fancybox=True, shadow=True, fontsize=15)
plt.show()
RMSE=[1.43394533652394,1.44394533652394,1.45860840464733,1.47824637203261,2.45796016765343]
# fig = plt.subplots(figsize =(14, 8))
# plt.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.5)
plt.bar(br1, RMSE, width = barWidth,
edgecolor ='black', label ='RMSE [m]', color=[next(colors)])
plt.xlabel('Number of neighbors', fontweight ='bold', fontsize=15)
plt.ylabel('Localization Error [m]', fontweight ='bold', fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
# fig = plt.subplots(figsize =(14, 8))
# plt.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.5)
delay=[191.863,200.1,209.961,218.996, 861.0759]
plt.bar(br1, delay, width = barWidth,
edgecolor ='black', label ='E2E delay [ms]', color=[next(colors)])
plt.xlabel('Number of neighbors', fontweight ='bold', fontsize=15)
plt.ylabel('E2E delay [ms]', fontweight ='bold', fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
|
the-stack_0_7263 | #!/usr/bin/env python3
import os
import random # Discuss: random module
import sys
# Constants
# Discuss: set data structure
NSFW = {'bong', 'sodomized', 'kiss', 'head-in', 'telebears'}
# Main Execution
def main():
characters = [] # Discuss: os.popen
for index, line in enumerate(os.popen('cowsay -l')):
if not index: # Discuss: enumerate
continue
for character in line.split(): # Review: str.split
if character not in NSFW: # Review: searching collection
characters.append(character) # Review: list.append
selected = random.choice(characters)
os.system(f'cowsay -f {selected}') # Variant: check exist status
if __name__ == '__main__':
main()
|
the-stack_0_7266 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.types import is_compatible_type
from coremltools.converters.mil.mil.types.symbolic import is_symbolic, any_symbolic
from . import SPACES
from .block import curr_block
from .input_type import TupleInputType, DefaultInputs
from .var import Var, InternalVar, ListVar
VALUE = 1
SYMBOL = 2
NONE = 4
ALL = 7
def _is_compatible_symbolic_array(a, b):
"""
A helper function which check if two numpy array with symbolic value.
For instance, a = np.array([is0, is2])
b = np.array([is1, 1])
are considered compatible.
a = np.array([is0, 1])
b = np.array([is1, -1])
are not.
"""
if not a.shape == b.shape:
return False
a = a.flatten()
b = b.flatten()
for t, v in zip(a, b):
if not is_symbolic(t) and not is_symbolic(v):
if t != v:
return False
return True
def precondition(allow=ALL):
"""
A helper decorator for value_inference method.
Decorate value_inference with parameter VALUE/SYMBOL/NONE or ALL.
For VALUE/SYMBOL/NONE use logical or ( | ) for multiple allowance.
Note that:
1. ALL == VALUE | SYMBOL | NONE
2. Chosen flag (some or all VALUE/SYMBOL/NONE) must be satisfied
by EVERY INPUTS for the precondition to be satisfied.
The meaning for each flag is:
VALUE: value that can be materialized during compile time
SYMBOL: value that cannot be materialized by exist as a symbol value
NONE: a None value
Usage:
@precondition(allow=VALUE|SYMBOL)
def value_inference(self):
'''some value_inference implementation'''
"""
ALLOW_VALUE = allow & VALUE
ALLOW_SYMBOL = allow & SYMBOL
ALLOW_NONE = allow & NONE
def process(v, has_value, has_symbol, has_none):
"""
v: Var
Return updated has_value, has_symbol, has_none
"""
if any_symbolic(v.sym_val):
return has_value, True, has_none
elif v.val is None:
return has_value, has_symbol, True
return True, has_symbol, has_none
def decorator(func):
def wrapper(self):
HAS_VALUE = False
HAS_SYMBOL = False
HAS_NONE = False
for in_name, in_type in self._input_types.items():
if in_type.optional:
# Optional inputs are not required to invoke value_inference()
continue
if isinstance(in_type, TupleInputType):
for v in self._input_vars[in_name]:
HAS_VALUE, HAS_SYMBOL, HAS_NONE = process(
v, HAS_VALUE, HAS_SYMBOL, HAS_NONE
)
else:
HAS_VALUE, HAS_SYMBOL, HAS_NONE = process(
self._input_vars[in_name], HAS_VALUE, HAS_SYMBOL, HAS_NONE
)
if HAS_VALUE and not ALLOW_VALUE:
msg = "Implementation of value_inference() for op {} doesn't support input with VALUE"
raise NotImplementedError(msg.format(self.op_type))
elif HAS_SYMBOL and not ALLOW_SYMBOL:
msg = "Implementation of value_inference() for op {} doesn't support input with SYMBOL"
raise NotImplementedError(msg.format(self.op_type))
elif HAS_NONE and not ALLOW_NONE:
msg = "Implementation of value_inference() for op {} doesn't support input with NONE"
raise NotImplementedError(msg.format(self.op_type))
else:
return func(self)
return wrapper
return decorator
def is_internal_input(arg_name):
return arg_name[0] == "_"
class mil_list(object):
'''
A wrapper around python list
'''
def __init__(self, ls=None):
self.ls = ls if ls is not None else []
if not isinstance(self.ls, list):
raise TypeError("Type of 'ls' must be list in the 'mil_list' class")
class Operation(object):
"""
Represents Operation in MIL.
# Properties
name (str):
The name of the operation
input_types (InputSpec, class attr):
Read-only named input types from all subclasses. Input types are used
to validate `inputs`.
inputs [_input_vars] (dict of str --> Var):
An Operation (subclass of Operation) only has access to input Var,
which is already validated against `input_spec`.
outputs [_output_vars] (list of Var):
List of output var based on type inference. Read-only
"""
def __init__(self, **kwargs):
self._input_types = self.input_spec.input_types
self.name = kwargs.get("name", None)
self._output_vars = None
self._input_vars = {}
self.blocks = []
self.enclosing_block = curr_block()
# Initialize inputs as object attributes (all None)
for k in self._input_types.keys():
setattr(self, k, None)
self._input_vars[k] = None
self._check_expected_inputs(kwargs)
# Set inputs from kwargs
input_kv = {k: v for k, v in kwargs.items() \
if k in self._input_types and v is not None}
self._validate_and_set_inputs(input_kv)
self._ensure_required_inputs()
def _check_expected_inputs(self, kwargs):
"""
Check that all kwargs inputs are one of the followings:
- system inputs (non-attributes)
- op inputs (self._input_types.keys())
"""
non_attributes = [
"name",
"symbolic_datatype",
"datatype",
"symbolic_value",
"value",
"version",
"before_op",
"no_check_var_visibility", # no_check_var_visibility==True to deviate from SSA
"no_check_var_types", # no_check_var_types==True to force set inputs, even if type does not match with earlier ones
]
for k in kwargs.keys():
if k not in non_attributes and k not in self._input_types:
raise ValueError(
"Unknown input '{}' for op '{}'".format(
k, self.op_type)
)
def set_inputs(self,
no_check_var_types=False,
type_inference=False,
**input_kvs):
"""
Parameters
----------
- input_kvs: Dict[str, Var]
Value cannot be None
- type_inference: bool
True to perform type inference and recreate output Var.
"""
self._validate_and_set_inputs(input_kvs,
no_check_var_types=no_check_var_types)
if type_inference and not no_check_var_types:
self.type_value_inference()
self._ensure_required_inputs()
def get_flattened_inputs(self):
"""
Returns:
list[Var]. Flatten all tuple inputs
"""
flat_inputs = []
for v in self.inputs.values():
if isinstance(v, (list, tuple)):
flat_inputs.extend(v)
else:
flat_inputs.append(v)
return flat_inputs
def type_value_inference(self, overwrite_output=False):
"""
Perform type inference and auto_val computation based on new input Vars
in kwargs. If self._output_vars is None then we generate _output_vars;
otherwise no new Var is created, but type inference result is verified
against existing _output_vars, if overwrite_output is False.
If overwrite_output is True, then the type inference result overwrites the
existing _output_vars
"""
output_types = self.type_inference()
if not isinstance(output_types, tuple):
output_types = (output_types,)
output_vals = self._auto_val(output_types)
try:
output_names = self.output_names()
if not isinstance(output_names, tuple):
output_names = (output_names,)
except NotImplementedError as e:
if len(output_types) > 1:
output_names = tuple(str(i) for i, _ in enumerate(output_types))
else:
output_names = ("",) # output name same as op name.
# Combine (output_names, output_types, output_vals) to create output
# Vars.
if self._output_vars is None:
self._output_vars = []
for i, (n, sym_type, sym_val) in enumerate(
zip(output_names, output_types, output_vals)
):
name = self.name + ":" + n if n != "" else self.name
if types.is_list(sym_type):
new_var = ListVar(
name,
elem_type=sym_type.T[0],
init_length=sym_type.T[1],
dynamic_length=sym_type.T[2],
sym_val=sym_val if (sym_val is not None and isinstance(sym_val.val, list)) else None,
op=self,
op_output_idx=i,
)
else:
new_var = Var(name, sym_type, sym_val, op=self, op_output_idx=i)
self._output_vars.append(new_var)
else:
# Check new inference result against existing self._output_vars.
for i, (n, sym_type, sym_val) in enumerate(
zip(output_names, output_types, output_vals)
):
out_var = self._output_vars[i]
# Check type inference
if overwrite_output:
out_var._sym_type = sym_type
elif not is_compatible_type(sym_type, out_var.sym_type):
msg = "Output Var {} in op {} type changes with new input Vars"
raise ValueError(msg.format(out_var.name, self.name))
# Check value inference
if overwrite_output:
out_var._sym_val = sym_val
if sym_val is not None and out_var.sym_val is not None:
if np.any(sym_val.val != out_var.sym_val):
if overwrite_output:
out_var._sym_val = sym_val
else:
msg = 'value_inference differs for var {} in op {}'
if not _is_compatible_symbolic_array(sym_val.val, out_var.sym_val):
raise ValueError(msg.format(out_var.name, self.name))
def _auto_val(self, output_types):
"""
# Evaluation is two stage:
#
# Stage 1: Check whether the method value_inference() is implemented
#
# Stage 2: Check if there's an value_inference() implementation
# for given input types.
#
# Suppose input are all SYMBOL:
# Case 1: No value_inference() implemented => fail at stage 1
# Case 2: If value_inference() implemented, but requires all VALUE not
# SYMBOL => fail at stage 2
# Case 3: If value_inference() implemented, and has no restriction on
# input types => Success
#
# If either stage fails, outputs[i].val is None.
# Otherwise, output[i].sym_val is not None.
output_types: tuple of builtin types
Returns:
output_vals: tuple of builtin type with value, or tuple of None
"""
do_auto_val = True
if do_auto_val:
# Is self.value_inference implemented for corresponding input?
try:
vals = self.value_inference()
except NotImplementedError as e:
do_auto_val = False
if not do_auto_val:
# No auto_val possible.
return tuple(None for _ in output_types)
if not isinstance(vals, (tuple, list)):
vals = (vals,)
for val in vals:
if val is None:
do_auto_val = False
if not do_auto_val:
# No auto_val possible.
return tuple(None for _ in output_types)
auto_val = []
for t, v in zip(output_types, vals):
builtin_val = t()
if isinstance(v, mil_list):
builtin_val.val = v.ls
else:
builtin_val.val = v
auto_val.append(builtin_val)
return auto_val
def value_inference(self):
"""
Optional Python implementation of the op based on (materialized) values
in `self.input_var`. Return a builtin value (single output) or a tuple of
builtin values (multi-outputs) of the same length as returned by `
type_inference`
"""
msg = "value_inference() is not implemented by op {}"
raise NotImplementedError(msg.format(self.op_type))
def default_inputs(self):
"""
Optional. Returns default values for optional inputs. The
function is guaranteed to have access to all required inputs and
possibly some optional inputs should the user supply them.
They may be used to construct default values, such as
`strides=[1]*num_spatial_dims` in conv, where
`num_spatial_dims` may be inferred from the rank of
required inputs
"""
return DefaultInputs()
def output_names(self):
"""
Optional. If implemented, we set the output var i name as
self.name + "/" + output_names[i]
Returns a string (single output) or tuple of strings
"""
msg = "output_names() is not implemented by op {}"
raise NotImplementedError(msg.format(self.op_type))
def type_inference(self):
"""
Return (builtin_type, builtin_val) pair from type inference.
builtin_val may be None if symbolic_value is not attainable at compile
time.
"""
raise NotImplementedError("This function must be implemented by each op")
def build_nested_blocks(self):
"""
Build nested blocks (for cond and while_loop and other composite
blocks)
"""
pass
def _ensure_required_inputs(self):
"""
Raise value error if required inputs aren't present
"""
for name, input_type in self._input_types.items():
if not input_type.optional and \
self._input_vars[name] is None:
msg_prefix = 'Op \"{}\" (op_type: {}) '.format(self.name,
self.op_type)
raise ValueError(msg_prefix + \
"Required input {} is missing".format(name))
def _validate_and_set_inputs(self, input_kvs,
no_check_var_types=False):
"""
For each k, v in `input_kvs`, perform the followings:
- Check k exists in `self.input_specs`
- Check that v satisfies the correspodning `InputType`
- Set input, possibly replacing existing input.
Note that it does not ensure all required inputs are satisfied.
Use _ensure_required_inputs() for that.
Parameters
----------
- input_kvs: Dict[str, Var]
Each key in input_kvs must exist in `self.input_specs`. Its values
must be a Var.
- no_check_var_types: bool
True to check var types against input_specs only, but not
enforcing new input vars to be a subtype of existing input vars
"""
for key in input_kvs.keys():
if key not in self._input_types:
raise RuntimeError(
"Unknown input '{}' for op '{}'".format(key, self.op_type)
)
def check_and_detach(v_new, v_old, op, no_check_var_types):
# Check new var's sym_type is compatible with the
# existing's sym_type.
if (
not is_compatible_type(v_new.sym_type, v_old.sym_type)
and not no_check_var_types
):
msg = "New var type {} not a subtype of " + "existing var type {}"
raise ValueError(msg.format(v_new.sym_type, v_old.sym_type))
v_old.remove_child_op(op, no_check_var_types)
self.input_spec.validate_inputs(self.name, self.op_type, input_kvs)
for name, var in input_kvs.items():
# TODO: remove InternalVar check
#if not isinstance(var, InternalVar):
# Remove this operation itself from existing input
# Var's child_ops
existing_input_var = self._input_vars[name]
if existing_input_var is not None:
if isinstance(existing_input_var, (list, tuple)):
for v_old, v_new in zip(existing_input_var, var):
check_and_detach(v_new, v_old, self, no_check_var_types)
else:
check_and_detach(
var, existing_input_var, self, no_check_var_types
)
# Set var as input_var
if isinstance(var, Var):
var.add_child_op(self)
elif isinstance(var, (tuple, list)):
for v in var:
v.add_child_op(self)
# ignore function inputs
self._input_vars[name] = var
setattr(self, name, var)
@property
def inputs(self):
"""
Returns
-------
- inputs: Dict[str, Union[Var, Tuple[Var]]]
"""
# Filter out InternalVar
return {k: v for k, v in self._input_vars.items() if not
isinstance(v, InternalVar) and v is not None}
@property
def outputs(self):
return self._output_vars
@property
def op_type(self):
return type(self).__name__
def remove_from_block(self):
"""
Remove / detach itself from the enclosing block. See Block.remove_ops
for details.
"""
self.enclosing_block.remove_ops([self])
@staticmethod
def var_to_str(v):
if isinstance(v, (tuple, list)):
return "(" + ", ".join(["%" + s.name for s in v]) + ")"
else:
return "%" + v.name
def indented_str(self, indent=""):
s = indent
if self.outputs is not None:
s += ", ".join([str(o) for o in self.outputs])
s += " = " + self.op_type + "("
if self.op_type == "const":
if self.mode.val == "immediate_value":
if isinstance(self.val.sym_val, (np.generic, np.ndarray)):
val_str = str(self.val.sym_val.tolist())
else:
val_str = (
'"' + self.val.sym_val + '"'
if isinstance(self.val.sym_val, str)
else str(self.val.sym_val)
)
s += "val=" + val_str
else:
s += "val=(file_value)"
else:
s += ", ".join(
[
k + "=" + Operation.var_to_str(self.inputs[k])
for k in self._input_types.keys()
if k in self.inputs and not is_internal_input(k)
]
)
s += ', name="{}")\n'.format(self.name)
for b in self.blocks:
s += b.indented_str(indent=indent + SPACES)
return s
def __repr__(self):
return str(self)
def __str__(self):
return self.indented_str(SPACES)
|
the-stack_0_7267 | import sqlite3
from abc import abstractmethod
from ipaddress import IPv4Address
from ipaddress import IPv6Address as IPv6AddressPython
from typing import (Callable, FrozenSet, Generic, Optional, Set, Sized,
TypeVar, Union)
from .blockchain import Miner, Node, Version
from .db import Cursor, Database, ForeignKey, Model, Table
from .geolocation import Geolocation
from .serialization import DateTime, IntFlag, IPv6Address
N = TypeVar("N", bound=Node)
class HostInfo(Model):
ip: IPv6Address
isp: str
os: str
timestamp: DateTime
def __hash__(self):
return hash(self.ip)
class CrawlState(IntFlag):
UNKNOWN = 0
DISCOVERED = 1
GEOLOCATED = 2
ATTEMPTED_CONNECTION = DISCOVERED | 4
CONNECTION_FAILED = ATTEMPTED_CONNECTION | 8
CONNECTED = ATTEMPTED_CONNECTION | 16
CONNECTION_RESET = CONNECTED | 32
REQUESTED_NEIGHBORS = CONNECTED | 64
GOT_NEIGHBORS = REQUESTED_NEIGHBORS | 128
REQUESTED_VERSION = CONNECTED | 256
GOT_VERSION = REQUESTED_VERSION | 512
class CrawledNode(Model["CrawlDatabase"]):
ip: IPv6Address
port: int
is_miner: Miner
state: CrawlState
source: str
def __hash__(self):
return hash((self.ip, self.port))
def get_events(self) -> Cursor["CrawlEvent"]:
return self.db.events.select(
node=self.rowid, order_by="timestamp", order_direction="DESC"
)
def get_version(self) -> Optional[Version]:
for version_event in self.db.events.select(
node=self.rowid,
order_by="timestamp",
order_direction="DESC",
limit=1,
event="version",
):
return Version(version_event.description, version_event.timestamp)
return None
def get_location(self) -> Optional[Geolocation]:
return self.db.locations.select(
ip=self.ip, order_by="timestamp DESC", limit=1
).fetchone()
def last_crawled(self) -> Optional[DateTime]:
max_edge = Cursor(
self.db.edges,
"SELECT a.* FROM edges a LEFT OUTER JOIN edges b ON a.rowid = b.rowid AND a.timestamp < b.timestamp "
"WHERE b.rowid is NULL AND a.from_node = ? LIMIT 1",
(self.rowid,),
).fetchone()
if max_edge is None:
return None
return max_edge.timestamp
def get_latest_edges(self) -> Set["CrawledNode"]:
return {
edge.to_node.row
for edge in Cursor(
self.db.edges,
"SELECT a.* FROM edges a LEFT OUTER JOIN edges b ON a.rowid = b.rowid AND a.timestamp < b.timestamp "
"WHERE b.rowid is NULL AND a.from_node = ?",
(self.rowid,),
)
}
def out_degree(self) -> int:
cur = self.db.con.cursor()
try:
result = cur.execute(
"SELECT count(a.*) FROM edges a "
"LEFT OUTER JOIN edges b ON a.rowid = b.rowid AND a.timestamp < b.timestamp "
"WHERE b.rowid is NULL AND a.from_node = ?",
(self.rowid,),
)
return result.fetchone()[0]
finally:
cur.close()
class Edge(Model):
from_node: ForeignKey["nodes", CrawledNode] # noqa: F821
to_node: ForeignKey["nodes", CrawledNode] # noqa: F821
timestamp: DateTime
class CrawlEvent(Model):
node: ForeignKey["nodes", CrawledNode] # noqa: F821
timestamp: DateTime
event: str
description: str
class CrawlDatabase(Database):
nodes: Table[CrawledNode]
events: Table[CrawlEvent]
locations: Table[Geolocation]
edges: Table[Edge]
hosts: Table[HostInfo]
def __init__(self, path: str = ":memory:"):
super().__init__(path)
@property
def crawled_nodes(self) -> Cursor[CrawledNode]:
return Cursor(
self.nodes,
f"SELECT DISTINCT n.*, n.rowid FROM {self.nodes.name} n WHERE n.state >= ?",
(CrawlState.CONNECTED,),
)
class Crawl(Generic[N], Sized):
@abstractmethod
def __contains__(self, node: N) -> bool:
raise NotImplementedError()
@abstractmethod
def __getitem__(self, node: N) -> CrawledNode:
raise NotImplementedError()
@abstractmethod
def get_node(self, node: N) -> CrawledNode:
raise NotImplementedError()
@abstractmethod
def add_event(
self,
node: CrawledNode,
event: str,
description: str,
timestamp: Optional[DateTime] = None,
):
raise NotImplementedError()
@abstractmethod
def set_location(self, ip: IPv6Address, location: Geolocation):
raise NotImplementedError()
@abstractmethod
def get_neighbors(self, node: N) -> FrozenSet[N]:
raise NotImplementedError()
@abstractmethod
def set_neighbors(self, node: N, neighbors: FrozenSet[N]):
raise NotImplementedError()
@abstractmethod
def set_miner(self, node: N, miner: Miner):
raise NotImplementedError()
@abstractmethod
def set_host_info(self, host_info: HostInfo):
raise NotImplementedError()
@abstractmethod
def add_state(self, node: Union[N, CrawledNode], state: CrawlState):
raise NotImplementedError()
@abstractmethod
def update_node(self, node: CrawledNode):
raise NotImplementedError()
def commit(self):
pass
class DatabaseCrawl(Generic[N], Crawl[N]):
def __init__(
self,
constructor: Callable[[Union[str, IPv4Address, IPv6AddressPython], int], N],
db: CrawlDatabase,
):
super().__init__()
self.constructor: Callable[
[Union[str, IPv4Address, IPv6AddressPython], int], N
] = constructor
self.db: CrawlDatabase = db
def __contains__(self, node: N) -> bool:
return self.db.nodes.select(ip=node.ip, port=node.port).fetchone() is not None
def __getitem__(self, node: N) -> CrawledNode:
try:
return next(iter(self.db.nodes.select(ip=node.address, port=node.port)))
except StopIteration:
pass
raise KeyError(node)
def commit(self):
self.db.con.commit()
def get_node(self, node: N) -> CrawledNode:
try:
return self[node]
except KeyError:
# this is a new node
pass
ret = CrawledNode(ip=node.address, port=node.port, source=node.source)
self.db.nodes.append(ret)
return ret
def update_node(self, node: CrawledNode):
with self.db:
self.db.nodes.update(node)
def add_event(
self,
node: CrawledNode,
event: str,
description: str,
timestamp: Optional[DateTime] = None,
):
with self.db:
if timestamp is None:
timestamp = DateTime()
self.db.events.append(
CrawlEvent(
node=node.rowid,
event=event,
description=description,
timestamp=timestamp,
)
)
def get_neighbors(self, node: N) -> FrozenSet[N]:
return frozenset(
{
self.constructor(neighbor.ip, neighbor.port)
for neighbor in self.get_node(node).get_latest_edges()
}
)
def set_neighbors(self, node: N, neighbors: FrozenSet[N]):
with self.db:
crawled_node = self.get_node(node)
timestamp = DateTime()
self.db.edges.extend(
[
Edge(
from_node=crawled_node,
to_node=self.get_node(neighbor),
timestamp=timestamp,
)
for neighbor in neighbors
]
)
self.add_state(node, CrawlState.GOT_NEIGHBORS)
for neighbor in neighbors:
# Make sure we record that we discovered the neighbor
_ = self.get_node(neighbor)
# (simply getting the node for the neighbor will ensure that its state's "discovered" flag is set)
def set_location(self, ip: IPv6Address, location: Geolocation):
with self.db:
self.db.locations.append(location)
def set_miner(self, node: N, miner: Miner):
with self.db:
crawled_node = self.get_node(node)
crawled_node.is_miner = miner
self.db.nodes.update(crawled_node)
def set_host_info(self, host_info: HostInfo):
with self.db:
self.db.hosts.append(host_info)
def add_state(self, node: Union[N, CrawledNode], state: CrawlState):
with self.db:
if isinstance(node, CrawledNode):
crawled_node = node
else:
crawled_node = self.get_node(node)
if crawled_node.state & state != state:
crawled_node.state = crawled_node.state | state
self.db.nodes.update(crawled_node)
def __len__(self) -> int:
return len(self.db.nodes)
|
the-stack_0_7268 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
from scipy.special import erf
from SimPEG import Utils
def Qfun(R, L, f, alpha=None):
if alpha is None:
omega = np.pi*2*f
tau = L/R
alpha = omega*tau
Q = (alpha**2+1j*alpha) / (1+alpha**2)
return alpha, Q
def Mijfun(x,y,z,incl,decl,x1,y1,z1,incl1,decl1, area=1.,area0=1.):
"""
Compute mutual inductance between two loops
This
Parameters
----------
x : array
x location of the Tx loop
y : array
y location of the Tx loop
z : array
z location of the Tx loop
incl:
XXX
decl:
XXX
x1 : array
XXX
y1 : array
XXX
z1 : array
XXX
incl1:
XXX
decl1:
XXX
"""
# Pretty sure below assumes dipole
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z = np.array(z, dtype=float)
x1 = np.array(x1, dtype=float)
y1 = np.array(y1, dtype=float)
z1 = np.array(z1, dtype=float)
incl = np.array(incl, dtype=float)
decl = np.array(decl, dtype=float)
incl1 = np.array(incl1, dtype=float)
decl1 = np.array(decl1, dtype=float)
di=np.pi*incl/180.0
dd=np.pi*decl/180.0
cx=np.cos(di)*np.cos(dd)
cy=np.cos(di)*np.sin(dd)
cz=np.sin(di)
ai=np.pi*incl1/180.0
ad=np.pi*decl1/180.0
ax=np.cos(ai)*np.cos(ad)
ay=np.cos(ai)*np.sin(ad)
az=np.sin(ai)
# begin the calculation
a=x-x1
b=y-y1
h=z-z1
rt=np.sqrt(a**2.+b**2.+h**2.)**5.
txy=3.*a*b/rt
txz=3.*a*h/rt
tyz=3.*b*h/rt
txx=(2.*a**2.-b**2.-h**2.)/rt
tyy=(2.*b**2.-a**2.-h**2.)/rt
tzz=-(txx+tyy)
scale = mu_0*np.pi*area*area0/4
# scale = 1.
bx= (txx*cx+txy*cy+txz*cz)
by= (txy*cx+tyy*cy+tyz*cz)
bz= (txz*cx+tyz*cy+tzz*cz)
return scale*(bx*ax+by*ay+bz*az)
def Cfun(L,R,xc,yc,zc,incl,decl,S,ht,f,xyz):
"""
Compute coupling coefficients
.. math::
- \frac{M_{12} M_{23}}{M_{13}L_2}
Parameters
----------
"""
L = np.array(L, dtype=float)
R = np.array(R, dtype=float)
xc = np.array(xc, dtype=float)
yc = np.array(yc, dtype=float)
zc = np.array(zc, dtype=float)
incl = np.array(incl, dtype=float)
decl = np.array(decl, dtype=float)
S = np.array(S, dtype=float)
f = np.array(f, dtype=float)
# This is a bug, hence needs to be fixed later
x = xyz[:,1]
y = xyz[:,0]
z = xyz[:,2]
# simulate anomalies
yt=y-S/2.
yr=y+S/2.
dm=-S/2.
dp= S/2.
# Computes mutual inducances
# Mijfun(x,y,z,incl,decl,x1,y1,z1,incl1,decl1)
M13=Mijfun(0.,dm,0.,90.,0., 0., dp, 0., 90.,0.)
M12=Mijfun(x,yt,z,90.,0.,xc,yc,zc,incl,decl,area=1.,area0=3.)
M23=Mijfun(xc,yc,zc,incl,decl,x,yr,z,90.,0.,area=3.,area0=1.)
C = -M12*M23/(M13*L)
return C, M12, M23, M13*np.ones_like(C)
if __name__ == '__main__':
out = Mijfun(0., 0., 0., 0., 0., 10., 0, 0., 0., 0.)
anal = mu_0*np.pi / (2*10**3)
err = abs(out-anal)
print(err)
showIt = False
import matplotlib.pyplot as plt
f = np.logspace(-3, 3, 61)
alpha, Q = Qfun(1., 0.1, f)
if showIt:
plt.semilogx(alpha, Q.real)
plt.semilogx(alpha, Q.imag)
plt.show()
L = 1.
R = 2000.
xc = 0.
yc = 0.
zc = 2.
incl = 0.
decl = 90.
S = 4.
ht = 0.
f = 10000.
xmin = -10.
xmax = 10.
dx = 0.25
xp = np.linspace(xmin, xmax, 101)
yp = xp.copy()
zp = np.r_[-ht]
[Y, X] = np.meshgrid(yp, xp)
xyz = np.c_[X.flatten(), Y.flatten(), np.ones_like(X.flatten())*ht]
C, M12, M23, M13 = Cfun(L,R,xc,yc,zc,incl,decl,S,ht,f,xyz)
[Xp, Yp] = np.meshgrid(xp, yp)
if showIt:
plt.contourf(X, Y, C.reshape(X.shape), 100)
plt.show()
# xyz = np.c_[xp, np.zeros_like(yp), np.zeros_like(yp)]
# C, M12, M23, M13 = Cfun(L,R,xc,yc,zc,incl,decl,S,ht,f,xyz)
# plt.plot(xp, C, 'k')
# plt.plot(xp, M12, 'b')
# plt.plot(xp, M23, 'g')
# plt.plot(xp, M13, 'r')
# plt.show()
|
the-stack_0_7269 | import inspect
class ProblemSizeCounter:
def __init__ (self, J, F, L, M, P):
self._initNumberOfVariables(J, F, L, M, P)
self._initNumberOfConstraints(J, F, L, M, P)
def _initNumberOfVariables(self, J, F, L, M, P):
self.numberOfVariablesX = P * L * F
self.numberOfVariablesY = P * F * J
self.totalNumberOfVariables = self.numberOfVariablesX + self.numberOfVariablesY
def _initNumberOfConstraints(self, J, F, L, M, P):
self.numberOfDemandConstraints = J * P
self.numberOfMachineCapacityConstraints = L * F
self.numberOfVariablesCompatibilityConstraints = P * F
self.numberOfResourcesConstraints = M * F
self.totalNumberOfConstraints = self.numberOfDemandConstraints \
+ self.numberOfMachineCapacityConstraints \
+ self.numberOfVariablesCompatibilityConstraints \
+ self.numberOfResourcesConstraints
def __str__ (self):
attributesToPrint = [
"numberOfVariablesX",
"numberOfVariablesY",
"totalNumberOfVariables",
"numberOfDemandConstraints",
"numberOfMachineCapacityConstraints",
"numberOfVariablesCompatibilityConstraints",
"numberOfResourcesConstraints",
"totalNumberOfConstraints"
]
string = "ProblemSizeCounter[\n"
for attribute in attributesToPrint:
value = getattr(self, attribute)
string += f"\t{attribute} = {value}\n"
string += "]"
return string
|
the-stack_0_7271 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class JeuxVideoIE(InfoExtractor):
_VALID_URL = r'https?://.*?\.jeuxvideo\.com/.*/(.*?)\.htm'
_TESTS = [{
'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
'md5': '046e491afb32a8aaac1f44dd4ddd54ee',
'info_dict': {
'id': '114765',
'ext': 'mp4',
'title': 'Tearaway : GC 2013 : Tearaway nous présente ses papiers d\'identité',
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.',
},
}, {
'url': 'http://www.jeuxvideo.com/videos/chroniques/434220/l-histoire-du-jeu-video-la-saturn.htm',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
title = self._html_search_meta('name', webpage) or self._og_search_title(webpage)
config_url = self._html_search_regex(
r'data-src(?:set-video)?="(/contenu/medias/video.php.*?)"',
webpage, 'config URL')
config_url = 'http://www.jeuxvideo.com' + config_url
video_id = self._search_regex(
r'id=(\d+)',
config_url, 'video ID')
config = self._download_json(
config_url, title, 'Downloading JSON config')
formats = [{
'url': source['file'],
'format_id': source['label'],
'resolution': source['label'],
} for source in reversed(config['sources'])]
return {
'id': video_id,
'title': title,
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': config.get('image'),
}
|
the-stack_0_7272 | class ConfigStruct(object):
def __init__(self):
# Location of the star catalog
self.CAL_DIR = '../data/'
self.CAL_NAME = 'gaia_dr2_mag_11.5.npy'
# Location of the MPC data
self.OURS_DIR = '../data/'
self.OURS_NAME = 'mpc_data.txt'
# Location of finder data
self.FINDER_DIR = '../data/'
self.FINDER_NAME = '2019-01-02.txt'
# Location of the query results
self.QUERY_DIR = '../data/'
self.QUERY_NAME = 'query_results.txt'
# Where the RAW imaging coordinates are saved
self.SAVE_DIR = '../data/'
self.SAVE_NAME = 'saved_coordinates.txt'
# Where the imaging coordinates IN TELESCOPE FORMAT are saved
self.FINAL_DIR = '../data/'
self.FINAL_NAME = 'saved_coord_telescope.txt'
# Ask the user for limiting magnitude and FOV size
self.LIM_MAG = 11.5
self.X_SPAN = 0.7317
self.Y_SPAN = 0.7317
config = ConfigStruct() |
the-stack_0_7275 | import secrets
def _get_header(token):
return f'''
rule encoding_geary_{token}:'''
def _get_benchmark(benchmark_out):
return f'''
benchmark:
"{benchmark_out}"'''
def _get_main(fasta_in, classes_in, length_in, geary_out):
return f'''
input:
fasta_in="{fasta_in}",
classes_in="{classes_in}",
length_in="{length_in}"
output:
csv_out={geary_out}
threads:
1000
params:
snakefile="nodes/encodings/geary/Snakefile",
configfile="nodes/encodings/geary/config.yaml"
run:
with WorkflowExecuter(dict(input), dict(output), params.configfile, cores=CORES) as e:
shell(f"""{{e.snakemake}} -s {{params.snakefile}} --configfile {{params.configfile}}""")
'''
def rule(fasta_in, classes_in, length_in, geary_out, benchmark_dir=None):
"""
Computes the Geary correlation encoding.
Category: encodings \n
Node: geary
:param fasta_in: The path to the fasta file.
:param classes_in: The path to the classes file.
:param length_in: The path to the file, containing the allowed parameter space.
:param geary_out: A list of output file paths to store the encoded datasets.
:param benchmark_dir: The path to the directory to store the benchmark results. If None,
benchmark will be not executed (default).
:return: A string object representing a Snakemake rule.
"""
token = secrets.token_hex(4)
rule = _get_header(token)
if benchmark_dir is not None:
benchmark_out = f"{benchmark_dir}encoding_geary_{token}.txt"
rule += _get_benchmark(benchmark_out)
rule += _get_main(fasta_in, classes_in, length_in, geary_out)
return rule
|
the-stack_0_7276 | from copy import deepcopy
import setpath
import vtbase
import functions
import heapq
### Classic stream iterator
registered=True
class StreamIntersect(vtbase.VT):
def BestIndex(self, constraints, orderbys):
return (None, 0, None, True, 1000)
def VTiter(self, *parsedArgs,**envars):
largs, dictargs = self.full_parse(parsedArgs)
if len(largs) < 1:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Not defined union tables ")
streams = str(largs[0]).split(",")
if len(streams) < 2:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Union tables must be more than one ")
cursors = []
execs = []
for stream in streams:
cursors.append(envars['db'].cursor())
execs.append(cursors[-1].execute("select * from " + str(stream) + ";"))
comparedcursor = str(cursors[0].getdescriptionsafe())
# for cursor in cursors:
# if str(cursor.getdescriptionsafe()) != comparedcursor:
# raise functions.OperatorError(__name__.rsplit('.')[-1],"Union tables with different schemas ")
if 'cols' in dictargs:
try:
cols = int(dictargs['cols'])
except ValueError:
try:
cols = [y[0] for y in cursors[0].getdescriptionsafe()].index(dictargs['cols'])
except ValueError:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Column name does not exists ")
else:
cols=0
if cols >= len(cursors[0].getdescriptionsafe()):
raise functions.OperatorError(__name__.rsplit('.')[-1],"Column position does not exists ")
for x in range(0, len(streams)):
if x is 0:
execs[0] = ((v[cols], (0,) + v) for v in execs[0])
elif x is 1:
execs[1] = ((v[cols], (1,) + v) for v in execs[1])
elif x is 2:
execs[2] = ((v[cols], (2,) + v) for v in execs[2])
elif x is 3:
execs[3] = ((v[cols], (3,) + v) for v in execs[3])
elif x is 4:
execs[4] = ((v[cols], (4,) + v) for v in execs[4])
try:
yield list(cursors[0].getdescriptionsafe())
except StopIteration:
try:
raise
finally:
try:
for cur in cursors:
cur.close()
except:
pass
currentgroup = None
lists = [[]] * len(streams)
for k, v in heapq.merge(*execs):
if currentgroup is None or currentgroup != k:
for t in set(lists[0]).intersection(*lists[1:]):
yield t
lists = [[]] * len(streams)
lists[v[0]] = lists[v[0]] + [tuple(v[1:])]
currentgroup = k
for t in set(lists[0]).intersection(*lists[1:]):
yield t
def Source():
return vtbase.VTGenerator(StreamIntersect)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
|
the-stack_0_7279 | from sympy import (
Rational, Symbol, N, I, Abs, sqrt, exp, Float, sin,
cos, symbols)
from sympy.matrices import eye, Matrix
from sympy.matrices.matrices import MatrixEigen
from sympy.matrices.common import _MinimalMatrix, _CastableMatrix
from sympy.core.singleton import S
from sympy.testing.pytest import raises, XFAIL
from sympy.matrices.matrices import NonSquareMatrixError, MatrixError
from sympy.simplify.simplify import simplify
from sympy.matrices.immutable import ImmutableMatrix
from sympy.testing.pytest import slow
class EigenOnlyMatrix(_MinimalMatrix, _CastableMatrix, MatrixEigen):
pass
def test_eigen():
R = Rational
M = Matrix.eye(3)
assert M.eigenvals(multiple=False) == {S.One: 3}
assert M.eigenvals(multiple=True) == [1, 1, 1]
assert M.eigenvects() == (
[(1, 3, [Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])])
assert M.left_eigenvects() == (
[(1, 3, [Matrix([[1, 0, 0]]),
Matrix([[0, 1, 0]]),
Matrix([[0, 0, 1]])])])
M = Matrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
assert M.eigenvects() == (
[
(-1, 1, [Matrix([-1, 1, 0])]),
( 0, 1, [Matrix([0, -1, 1])]),
( 2, 1, [Matrix([R(2, 3), R(1, 3), 1])])
])
assert M.left_eigenvects() == (
[
(-1, 1, [Matrix([[-2, 1, 1]])]),
(0, 1, [Matrix([[-1, -1, 1]])]),
(2, 1, [Matrix([[1, 1, 1]])])
])
a = Symbol('a')
M = Matrix([[a, 0],
[0, 1]])
assert M.eigenvals() == {a: 1, S.One: 1}
M = Matrix([[1, -1],
[1, 3]])
assert M.eigenvects() == ([(2, 2, [Matrix(2, 1, [-1, 1])])])
assert M.left_eigenvects() == ([(2, 2, [Matrix([[1, 1]])])])
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a = R(15, 2)
b = 3*33**R(1, 2)
c = R(13, 2)
d = (R(33, 8) + 3*b/8)
e = (R(33, 8) - 3*b/8)
def NS(e, n):
return str(N(e, n))
r = [
(a - b/2, 1, [Matrix([(12 + 24/(c - b/2))/((c - b/2)*e) + 3/(c - b/2),
(6 + 12/(c - b/2))/e, 1])]),
( 0, 1, [Matrix([1, -2, 1])]),
(a + b/2, 1, [Matrix([(12 + 24/(c + b/2))/((c + b/2)*d) + 3/(c + b/2),
(6 + 12/(c + b/2))/d, 1])]),
]
r1 = [(NS(r[i][0], 2), NS(r[i][1], 2),
[NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]
r = M.eigenvects()
r2 = [(NS(r[i][0], 2), NS(r[i][1], 2),
[NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]
assert sorted(r1) == sorted(r2)
eps = Symbol('eps', real=True)
M = Matrix([[abs(eps), I*eps ],
[-I*eps, abs(eps) ]])
assert M.eigenvects() == (
[
( 0, 1, [Matrix([[-I*eps/abs(eps)], [1]])]),
( 2*abs(eps), 1, [ Matrix([[I*eps/abs(eps)], [1]]) ] ),
])
assert M.left_eigenvects() == (
[
(0, 1, [Matrix([[I*eps/Abs(eps), 1]])]),
(2*Abs(eps), 1, [Matrix([[-I*eps/Abs(eps), 1]])])
])
M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
M._eigenvects = M.eigenvects(simplify=False)
assert max(i.q for i in M._eigenvects[0][2][0]) > 1
M._eigenvects = M.eigenvects(simplify=True)
assert max(i.q for i in M._eigenvects[0][2][0]) == 1
M = Matrix([[Rational(1, 4), 1], [1, 1]])
assert M.eigenvects(simplify=True) == [
(Rational(5, 8) - sqrt(73)/8, 1, [Matrix([[-sqrt(73)/8 - Rational(3, 8)], [1]])]),
(Rational(5, 8) + sqrt(73)/8, 1, [Matrix([[Rational(-3, 8) + sqrt(73)/8], [1]])])]
assert M.eigenvects(simplify=False) == [
(Rational(5, 8) - sqrt(73)/8, 1, [Matrix([[-1/(-Rational(3, 8) + sqrt(73)/8)], [1]])]),
(Rational(5, 8) + sqrt(73)/8, 1, [Matrix([[8/(3 + sqrt(73))], [1]])])]
# issue 10719
assert Matrix([]).eigenvals() == {}
assert Matrix([]).eigenvects() == []
# issue 15119
raises(NonSquareMatrixError, lambda : Matrix([[1, 2], [0, 4], [0, 0]]).eigenvals())
raises(NonSquareMatrixError, lambda : Matrix([[1, 0], [3, 4], [5, 6]]).eigenvals())
raises(NonSquareMatrixError, lambda : Matrix([[1, 2, 3], [0, 5, 6]]).eigenvals())
raises(NonSquareMatrixError, lambda : Matrix([[1, 0, 0], [4, 5, 0]]).eigenvals())
raises(NonSquareMatrixError, lambda : Matrix([[1, 2, 3], [0, 5, 6]]).eigenvals(error_when_incomplete = False))
raises(NonSquareMatrixError, lambda : Matrix([[1, 0, 0], [4, 5, 0]]).eigenvals(error_when_incomplete = False))
# issue 15125
from sympy.core.function import count_ops
q = Symbol("q", positive = True)
m = Matrix([[-2, exp(-q), 1], [exp(q), -2, 1], [1, 1, -2]])
assert count_ops(m.eigenvals(simplify=False)) > count_ops(m.eigenvals(simplify=True))
assert count_ops(m.eigenvals(simplify=lambda x: x)) > count_ops(m.eigenvals(simplify=True))
assert isinstance(m.eigenvals(simplify=True, multiple=False), dict)
assert isinstance(m.eigenvals(simplify=True, multiple=True), list)
assert isinstance(m.eigenvals(simplify=lambda x: x, multiple=False), dict)
assert isinstance(m.eigenvals(simplify=lambda x: x, multiple=True), list)
def test_float_eigenvals():
m = Matrix([[1, .6, .6], [.6, .9, .9], [.9, .6, .6]])
evals = [
Rational(5, 4) - sqrt(385)/20,
sqrt(385)/20 + Rational(5, 4),
S.Zero]
n_evals = m.eigenvals(rational=True, multiple=True)
n_evals = sorted(n_evals)
s_evals = [x.evalf() for x in evals]
s_evals = sorted(s_evals)
for x, y in zip(n_evals, s_evals):
assert abs(x-y) < 10**-9
@XFAIL
def test_eigen_vects():
m = Matrix(2, 2, [1, 0, 0, I])
raises(NotImplementedError, lambda: m.is_diagonalizable(True))
# !!! bug because of eigenvects() or roots(x**2 + (-1 - I)*x + I, x)
# see issue 5292
assert not m.is_diagonalizable(True)
raises(MatrixError, lambda: m.diagonalize(True))
(P, D) = m.diagonalize(True)
def test_issue_8240():
# Eigenvalues of large triangular matrices
x, y = symbols('x y')
n = 200
diagonal_variables = [Symbol('x%s' % i) for i in range(n)]
M = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
M[i][i] = diagonal_variables[i]
M = Matrix(M)
eigenvals = M.eigenvals()
assert len(eigenvals) == n
for i in range(n):
assert eigenvals[diagonal_variables[i]] == 1
eigenvals = M.eigenvals(multiple=True)
assert set(eigenvals) == set(diagonal_variables)
# with multiplicity
M = Matrix([[x, 0, 0], [1, y, 0], [2, 3, x]])
eigenvals = M.eigenvals()
assert eigenvals == {x: 2, y: 1}
eigenvals = M.eigenvals(multiple=True)
assert len(eigenvals) == 3
assert eigenvals.count(x) == 2
assert eigenvals.count(y) == 1
# EigenOnlyMatrix tests
def test_eigenvals():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
# if we cannot factor the char poly, we raise an error
m = Matrix([
[3, 0, 0, 0, -3],
[0, -3, -3, 0, 3],
[0, 3, 0, 3, 0],
[0, 0, 3, 0, 3],
[3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.eigenvals())
def test_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert M*vec_list[0] == val*vec_list[0]
def test_left_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.left_eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert vec_list[0]*M == val*vec_list[0]
@slow
def test_bidiagonalize():
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.bidiagonalize() == M
assert M.bidiagonalize(upper=False) == M
assert M.bidiagonalize() == M
assert M.bidiagonal_decomposition() == (M, M, M)
assert M.bidiagonal_decomposition(upper=False) == (M, M, M)
assert M.bidiagonalize() == M
import random
#Real Tests
for real_test in range(2):
test_values = []
row = 2
col = 2
for _ in range(row * col):
value = random.randint(-1000000000, 1000000000)
test_values = test_values + [value]
# L -> Lower Bidiagonalization
# M -> Mutable Matrix
# N -> Immutable Matrix
# 0 -> Bidiagonalized form
# 1,2,3 -> Bidiagonal_decomposition matrices
# 4 -> Product of 1 2 3
M = Matrix(row, col, test_values)
N = ImmutableMatrix(M)
N1, N2, N3 = N.bidiagonal_decomposition()
M1, M2, M3 = M.bidiagonal_decomposition()
M0 = M.bidiagonalize()
N0 = N.bidiagonalize()
N4 = N1 * N2 * N3
M4 = M1 * M2 * M3
N2.simplify()
N4.simplify()
N0.simplify()
M0.simplify()
M2.simplify()
M4.simplify()
LM0 = M.bidiagonalize(upper=False)
LM1, LM2, LM3 = M.bidiagonal_decomposition(upper=False)
LN0 = N.bidiagonalize(upper=False)
LN1, LN2, LN3 = N.bidiagonal_decomposition(upper=False)
LN4 = LN1 * LN2 * LN3
LM4 = LM1 * LM2 * LM3
LN2.simplify()
LN4.simplify()
LN0.simplify()
LM0.simplify()
LM2.simplify()
LM4.simplify()
assert M == M4
assert M2 == M0
assert N == N4
assert N2 == N0
assert M == LM4
assert LM2 == LM0
assert N == LN4
assert LN2 == LN0
#Complex Tests
for complex_test in range(2):
test_values = []
size = 2
for _ in range(size * size):
real = random.randint(-1000000000, 1000000000)
comp = random.randint(-1000000000, 1000000000)
value = real + comp * I
test_values = test_values + [value]
M = Matrix(size, size, test_values)
N = ImmutableMatrix(M)
# L -> Lower Bidiagonalization
# M -> Mutable Matrix
# N -> Immutable Matrix
# 0 -> Bidiagonalized form
# 1,2,3 -> Bidiagonal_decomposition matrices
# 4 -> Product of 1 2 3
N1, N2, N3 = N.bidiagonal_decomposition()
M1, M2, M3 = M.bidiagonal_decomposition()
M0 = M.bidiagonalize()
N0 = N.bidiagonalize()
N4 = N1 * N2 * N3
M4 = M1 * M2 * M3
N2.simplify()
N4.simplify()
N0.simplify()
M0.simplify()
M2.simplify()
M4.simplify()
LM0 = M.bidiagonalize(upper=False)
LM1, LM2, LM3 = M.bidiagonal_decomposition(upper=False)
LN0 = N.bidiagonalize(upper=False)
LN1, LN2, LN3 = N.bidiagonal_decomposition(upper=False)
LN4 = LN1 * LN2 * LN3
LM4 = LM1 * LM2 * LM3
LN2.simplify()
LN4.simplify()
LN0.simplify()
LM0.simplify()
LM2.simplify()
LM4.simplify()
assert M == M4
assert M2 == M0
assert N == N4
assert N2 == N0
assert M == LM4
assert LM2 == LM0
assert N == LN4
assert LN2 == LN0
M = Matrix(18, 8, range(1, 145))
M = M.applyfunc(lambda i: Float(i))
assert M.bidiagonal_decomposition()[1] == M.bidiagonalize()
assert M.bidiagonal_decomposition(upper=False)[1] == M.bidiagonalize(upper=False)
a, b, c = M.bidiagonal_decomposition()
diff = a * b * c - M
assert abs(max(diff)) < 10**-12
def test_diagonalize():
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
raises(MatrixError, lambda: m.diagonalize(reals_only=True))
P, D = m.diagonalize()
assert D.is_diagonal()
assert D == Matrix([
[-I, 0],
[ 0, I]])
# make sure we use floats out if floats are passed in
m = EigenOnlyMatrix(2, 2, [0, .5, .5, 0])
P, D = m.diagonalize()
assert all(isinstance(e, Float) for e in D.values())
assert all(isinstance(e, Float) for e in P.values())
_, D2 = m.diagonalize(reals_only=True)
assert D == D2
def test_is_diagonalizable():
a, b, c = symbols('a b c')
m = EigenOnlyMatrix(2, 2, [a, c, c, b])
assert m.is_symmetric()
assert m.is_diagonalizable()
assert not EigenOnlyMatrix(2, 2, [1, 1, 0, 1]).is_diagonalizable()
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
assert m.is_diagonalizable()
assert not m.is_diagonalizable(reals_only=True)
def test_jordan_form():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
raises(NonSquareMatrixError, lambda: m.jordan_form())
# the next two tests test the cases where the old
# algorithm failed due to the fact that the block structure can
# *NOT* be determined from algebraic and geometric multiplicity alone
# This can be seen most easily when one lets compute the J.c.f. of a matrix that
# is in J.c.f already.
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
A = Matrix([[ 2, 4, 1, 0],
[-4, 2, 0, 1],
[ 0, 0, 2, 4],
[ 0, 0, -4, 2]])
P, J = A.jordan_form()
assert simplify(P*J*P.inv()) == A
assert EigenOnlyMatrix(1, 1, [1]).jordan_form() == (
Matrix([1]), Matrix([1]))
assert EigenOnlyMatrix(1, 1, [1]).jordan_form(
calc_transform=False) == Matrix([1])
# make sure if we cannot factor the characteristic polynomial, we raise an error
m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.jordan_form())
# make sure that if the input has floats, the output does too
m = Matrix([
[ 0.6875, 0.125 + 0.1875*sqrt(3)],
[0.125 + 0.1875*sqrt(3), 0.3125]])
P, J = m.jordan_form()
assert all(isinstance(x, Float) or x == 0 for x in P)
assert all(isinstance(x, Float) or x == 0 for x in J)
def test_singular_values():
x = Symbol('x', real=True)
A = EigenOnlyMatrix([[0, 1*I], [2, 0]])
# if singular values can be sorted, they should be in decreasing order
assert A.singular_values() == [2, 1]
A = eye(3)
A[1, 1] = x
A[2, 2] = 5
vals = A.singular_values()
# since Abs(x) cannot be sorted, test set equality
assert set(vals) == {5, 1, Abs(x)}
A = EigenOnlyMatrix([[sin(x), cos(x)], [-cos(x), sin(x)]])
vals = [sv.trigsimp() for sv in A.singular_values()]
assert vals == [S.One, S.One]
A = EigenOnlyMatrix([
[2, 4],
[1, 3],
[0, 0],
[0, 0]
])
assert A.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221))]
assert A.T.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221)), 0, 0]
def test___eq__():
assert (EigenOnlyMatrix(
[[0, 1, 1],
[1, 0, 0],
[1, 1, 1]]) == {}) is False
def test_definite():
# Examples from Gilbert Strang, "Introduction to Linear Algebra"
# Positive definite matrices
m = Matrix([[2, -1, 0], [-1, 2, -1], [0, -1, 2]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[5, 4], [4, 5]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Positive semidefinite matrices
m = Matrix([[2, -1, -1], [-1, 2, -1], [-1, -1, 2]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[1, 2], [2, 4]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Examples from Mathematica documentation
# Non-hermitian positive definite matrices
m = Matrix([[2, 3], [4, 8]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[1, 2*I], [-I, 4]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Symbolic matrices examples
a = Symbol('a', positive=True)
b = Symbol('b', negative=True)
m = Matrix([[a, 0, 0], [0, a, 0], [0, 0, a]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[b, 0, 0], [0, b, 0], [0, 0, b]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == False
assert m.is_negative_definite == True
assert m.is_negative_semidefinite == True
assert m.is_indefinite == False
m = Matrix([[a, 0], [0, b]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == False
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == True
m = Matrix([
[0.0228202735623867, 0.00518748979085398,
-0.0743036351048907, -0.00709135324903921],
[0.00518748979085398, 0.0349045359786350,
0.0830317991056637, 0.00233147902806909],
[-0.0743036351048907, 0.0830317991056637,
1.15859676366277, 0.340359081555988],
[-0.00709135324903921, 0.00233147902806909,
0.340359081555988, 0.928147644848199]
])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_indefinite == False
|
the-stack_0_7280 | #!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
from __future__ import print_function, absolute_import
import os
import glob
import sys
import platform
import subprocess
import difflib
import filecmp
import shutil
from itertools import chain
from optparse import OptionParser
def make_relpath (path, start=os.curdir):
"Wrapper around os.path.relpath which always uses '/' as the separator."
p = os.path.relpath (path, start)
return p if sys.platform != "win32" else p.replace ('\\', '/')
#
# Get standard testsuite test arguments: srcdir exepath
#
srcdir = "."
tmpdir = "."
OSL_BUILD_DIR = os.environ.get("OSL_BUILD_DIR", "..")
OSL_SOURCE_DIR = os.environ.get("OSL_SOURCE_DIR", "../../..")
OSL_TESTSUITE_DIR = os.path.join(OSL_SOURCE_DIR, "testsuite")
OpenImageIO_ROOT = os.environ.get("OpenImageIO_ROOT", None)
OSL_TESTSUITE_ROOT = make_relpath(os.getenv('OSL_TESTSUITE_ROOT',
'../../../testsuite'))
os.environ['OSLHOME'] = os.path.join(OSL_SOURCE_DIR, "src")
OSL_REGRESSION_TEST = os.environ.get("OSL_REGRESSION_TEST", None)
# Options for the command line
parser = OptionParser()
parser.add_option("-p", "--path", help="add to executable path",
action="store", type="string", dest="path", default="")
parser.add_option("--devenv-config", help="use a MS Visual Studio configuration",
action="store", type="string", dest="devenv_config", default="")
parser.add_option("--solution-path", help="MS Visual Studio solution path",
action="store", type="string", dest="solution_path", default="")
(options, args) = parser.parse_args()
if args and len(args) > 0 :
srcdir = args[0]
srcdir = os.path.abspath (srcdir) + "/"
os.chdir (srcdir)
if args and len(args) > 1 :
OSL_BUILD_DIR = args[1]
OSL_BUILD_DIR = os.path.normpath (OSL_BUILD_DIR)
tmpdir = "."
tmpdir = os.path.abspath (tmpdir)
if platform.system() == 'Windows' :
redirect = " >> out.txt 2>&1 "
else :
redirect = " >> out.txt 2>>out.txt "
refdir = "ref/"
mytest = os.path.split(os.path.abspath(os.getcwd()))[-1]
if str(mytest).endswith('.opt') or str(mytest).endswith('.optix') :
mytest = mytest.split('.')[0]
test_source_dir = os.getenv('OSL_TESTSUITE_SRC',
os.path.join(OSL_TESTSUITE_ROOT, mytest))
#test_source_dir = os.path.join(OSL_TESTSUITE_DIR,
# os.path.basename(os.path.abspath(srcdir)))
command = ""
outputs = [ "out.txt" ] # default
failureok = 0
failthresh = 0.004
hardfail = 0.01
failpercent = 0.02
cleanup_on_success = False
if int(os.getenv('TESTSUITE_CLEANUP_ON_SUCCESS', '0')) :
cleanup_on_success = True;
oslcargs = "-Wall"
image_extensions = [ ".tif", ".tx", ".exr", ".jpg", ".png", ".rla",
".dpx", ".iff", ".psd" ]
compile_osl_files = True
splitsymbol = ';'
#print ("srcdir = " + srcdir)
#print ("tmpdir = " + tmpdir)
#print ("path = " + path)
#print ("refdir = " + refdir)
print ("test source dir = ", test_source_dir)
if platform.system() == 'Windows' :
if not os.path.exists("./ref") :
test_source_ref_dir = os.path.join (test_source_dir, "ref")
if os.path.exists(test_source_ref_dir) :
shutil.copytree (test_source_ref_dir, "./ref")
if os.path.exists (os.path.join (test_source_dir, "src")) and not os.path.exists("./src") :
shutil.copytree (os.path.join (test_source_dir, "src"), "./src")
if not os.path.exists(os.path.abspath("data")) :
shutil.copytree (test_source_dir, os.path.abspath("data"))
else :
if not os.path.exists("./ref") :
test_source_ref_dir = os.path.join (test_source_dir, "ref")
if os.path.exists(test_source_ref_dir) :
os.symlink (test_source_ref_dir, "./ref")
if os.path.exists (os.path.join (test_source_dir, "src")) and not os.path.exists("./src") :
os.symlink (os.path.join (test_source_dir, "src"), "./src")
if not os.path.exists("./data") :
os.symlink (test_source_dir, "./data")
pythonbin = 'python'
if os.getenv("PYTHON_VERSION") :
pythonbin += os.getenv("PYTHON_VERSION")
#print ("pythonbin = ", pythonbin)
###########################################################################
# Handy functions...
# Compare two text files. Returns 0 if they are equal otherwise returns
# a non-zero value and writes the differences to "diff_file".
# Based on the command-line interface to difflib example from the Python
# documentation
def text_diff (fromfile, tofile, diff_file=None):
import time
try:
fromdate = time.ctime (os.stat (fromfile).st_mtime)
todate = time.ctime (os.stat (tofile).st_mtime)
fromlines = open (fromfile, 'r').readlines()
tolines = open (tofile, 'r').readlines()
except:
print ("Unexpected error:", sys.exc_info()[0])
return -1
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile,
fromdate, todate)
# Diff is a generator, but since we need a way to tell if it is
# empty we just store all the text in advance
diff_lines = [l for l in diff]
if not diff_lines:
return 0
if diff_file:
try:
open (diff_file, 'w').writelines (diff_lines)
print ("Diff " + fromfile + " vs " + tofile + " was:\n-------")
# print (diff)
print ("".join(diff_lines))
except:
print ("Unexpected error:", sys.exc_info()[0])
return 1
def run_app (app, silent=False, concat=True) :
command = app
if not silent :
command += redirect
if concat:
command += " ;\n"
return command
def osl_app (app):
apath = os.path.join(OSL_BUILD_DIR, "bin")
if (platform.system () == 'Windows'):
# when we use Visual Studio, built applications are stored
# in the app/$(OutDir)/ directory, e.g., Release or Debug.
apath = os.path.join(apath, options.devenv_config)
return os.path.join(apath, app) + " "
def oiio_app (app):
if OpenImageIO_ROOT :
return os.path.join (OpenImageIO_ROOT, "bin", app) + " "
else :
return app + " "
# Construct a command that will compile the shader file, appending output to
# the file "out.txt".
def oslc (args) :
return (osl_app("oslc") + oslcargs + " " + args + redirect + " ;\n")
# Construct a command that will run oslinfo, appending output to
# the file "out.txt".
def oslinfo (args) :
return (osl_app("oslinfo") + args + redirect + " ;\n")
# Construct a command that runs oiiotool, appending console output
# to the file "out.txt".
def oiiotool (args, silent=False) :
oiiotool_cmd = (oiio_app("oiiotool") + args)
if not silent :
oiiotool_cmd += redirect
oiiotool_cmd += " ;\n"
return oiiotool_cmd;
# Construct a command that runs maketx, appending console output
# to the file "out.txt".
def maketx (args) :
return (oiio_app("maketx") + args + redirect + " ;\n")
# Construct a command that will compare two images, appending output to
# the file "out.txt". We allow a small number of pixels to have up to
# 1 LSB (8 bit) error, it's very hard to make different platforms and
# compilers always match to every last floating point bit.
def oiiodiff (fileA, fileB, extraargs="", silent=True, concat=True) :
command = (oiio_app("idiff") + "-a"
+ " -fail " + str(failthresh)
+ " -failpercent " + str(failpercent)
+ " -hardfail " + str(hardfail)
+ " -warn " + str(2*failthresh)
+ " -warnpercent " + str(failpercent)
+ " " + extraargs + " " + make_relpath(fileA,tmpdir)
+ " " + make_relpath(fileB,tmpdir))
if not silent :
command += redirect
if concat:
command += " ;\n"
return command
# Construct a command that run testshade with the specified arguments,
# appending output to the file "out.txt".
def testshade (args) :
if os.environ.__contains__('OSL_TESTSHADE_NAME') :
testshadename = os.environ['OSL_TESTSHADE_NAME'] + " "
else :
testshadename = osl_app("testshade")
return (testshadename + args + redirect + " ;\n")
# Construct a command that run testrender with the specified arguments,
# appending output to the file "out.txt".
def testrender (args) :
os.environ["optix_log_level"] = "0"
return (osl_app("testrender") + " " + args + redirect + " ;\n")
# Construct a command that run testoptix with the specified arguments,
# appending output to the file "out.txt".
def testoptix (args) :
# Disable OptiX logging to prevent messages from the library from
# appearing in the program output.
os.environ["optix_log_level"] = "0"
return (osl_app("testoptix") + " " + args + redirect + " ;\n")
# Run 'command'. For each file in 'outputs', compare it to the copy
# in 'ref/'. If all outputs match their reference copies, return 0
# to pass. If any outputs do not match their references return 1 to
# fail.
def runtest (command, outputs, failureok=0, failthresh=0, failpercent=0, regression=None) :
# print ("working dir = " + tmpdir)
os.chdir (srcdir)
open ("out.txt", "w").close() # truncate out.txt
if options.path != "" :
sys.path = [options.path] + sys.path
test_environ = None
if (platform.system () == 'Windows') and (options.solution_path != "") and \
(os.path.isdir (options.solution_path)):
test_environ = os.environ
libOIIO_path = options.solution_path + "\\libOpenImageIO\\"
if options.devenv_config != "":
libOIIO_path = libOIIO_path + '\\' + options.devenv_config
test_environ["PATH"] = libOIIO_path + ';' + test_environ["PATH"]
if regression == "BATCHED" :
if test_environ == None :
test_environ = os.environ
test_environ["TESTSHADE_BATCHED"] = "1"
print ("command = ", command)
for sub_command in command.split(splitsymbol):
sub_command = sub_command.lstrip().rstrip()
#print ("running = ", sub_command)
cmdret = subprocess.call (sub_command, shell=True, env=test_environ)
if cmdret != 0 and failureok == 0 :
print ("#### Error: this command failed: ", sub_command)
print ("FAIL")
print ("Output was:\n--------")
print (open ("out.txt", 'r').read())
print ("--------")
return (1)
err = 0
if regression == "BASELINE" :
if not os.path.exists("./baseline") :
os.mkdir("./baseline")
for out in outputs :
shutil.move(out, "./baseline/"+out)
else :
for out in outputs :
extension = os.path.splitext(out)[1]
ok = 0
# We will first compare out to ref/out, and if that fails, we
# will compare it to everything else with the same extension in
# the ref directory. That allows us to have multiple matching
# variants for different platforms, etc.
if regression != None:
testfiles = ["baseline/"+out]
else :
testfiles = ["ref/"+out] + glob.glob (os.path.join ("ref", "*"+extension))
for testfile in (testfiles) :
# print ("comparing " + out + " to " + testfile)
if extension == ".tif" or extension == ".exr" :
# images -- use idiff
cmpcommand = oiiodiff (out, testfile, concat=False, silent=True)
# print ("cmpcommand = ", cmpcommand)
cmpresult = os.system (cmpcommand)
elif extension == ".txt" :
cmpresult = text_diff (out, testfile, out + ".diff")
else :
# anything else
cmpresult = 0 if filecmp.cmp (out, testfile) else 1
if cmpresult == 0 :
ok = 1
break # we're done
if ok :
# if extension == ".tif" or extension == ".exr" or extension == ".jpg" or extension == ".png":
# # If we got a match for an image, save the idiff results
# os.system (oiiodiff (out, testfile, silent=False))
print ("PASS: ", out, " matches ", testfile)
else :
err = 1
print ("NO MATCH for ", out)
print ("FAIL ", out)
if extension == ".txt" :
# If we failed to get a match for a text file, print the
# file and the diff, for easy debugging.
print ("-----" + out + "----->")
print (open(out,'r').read() + "<----------")
print ("Diff was:\n-------")
print (open (out+".diff", 'r').read())
if extension == ".tif" or extension == ".exr" or extension == ".jpg" or extension == ".png":
# If we failed to get a match for an image, send the idiff
# results to the console
testfile = None
if regression != None:
testfile = os.path.join ("baseline/", out)
else :
testfile = os.path.join (refdir, out)
os.system (oiiodiff (out, testfile, silent=False))
return (err)
##########################################################################
#
# Read the individual run.py file for this test, which will define
# command and outputs.
#
with open(os.path.join(test_source_dir,"run.py")) as f:
code = compile(f.read(), "run.py", 'exec')
exec (code)
# if os.path.exists("run.py") :
# execfile ("run.py")
# Allow a little more slop for slight pixel differences when in DEBUG mode.
if "DEBUG" in os.environ and os.environ["DEBUG"] :
failthresh *= 2.0
hardfail *= 2.0
failpercent *= 2.0
# Force out.txt to be in the outputs
##if "out.txt" not in outputs :
## outputs.append ("out.txt")
# Force any local shaders to compile automatically, prepending the
# compilation onto whatever else the individual run.py file requested.
for filetype in [ "*.osl", "*.h", "*.oslgroup", "*.xml" ] :
for testfile in glob.glob (os.path.join (test_source_dir, filetype)) :
shutil.copyfile (testfile, os.path.basename(testfile))
if compile_osl_files :
compiles = ""
oslfiles = glob.glob ("*.osl")
oslfiles.sort() ## sort the shaders to compile so that they always compile in the same order
for testfile in oslfiles :
compiles += oslc (testfile)
command = compiles + command
# If either out.exr or out.tif is in the reference directory but somehow
# is not in the outputs list, put it there anyway!
if (os.path.exists("ref/out.exr") and ("out.exr" not in outputs)) :
outputs.append ("out.exr")
if (os.path.exists("ref/out.tif") and ("out.tif" not in outputs)) :
outputs.append ("out.tif")
# Run the test and check the outputs
if OSL_REGRESSION_TEST != None :
# need to produce baseline images
ret = runtest (command, outputs, failureok=failureok,
failthresh=failthresh, failpercent=failpercent, regression="BASELINE")
if ret == 0 :
# run again comparing against baseline, not ref
ret = runtest (command, outputs, failureok=failureok,
failthresh=failthresh, failpercent=failpercent, regression=OSL_REGRESSION_TEST)
else :
ret = runtest (command, outputs, failureok=failureok,
failthresh=failthresh, failpercent=failpercent)
if ret == 0 and cleanup_on_success :
for ext in image_extensions + [ ".txt", ".diff" ] :
files = glob.iglob (srcdir + '/*' + ext)
baselineFiles = glob.iglob (srcdir + '/baseline/*' + ext)
for f in chain(files,baselineFiles) :
os.remove(f)
#print('REMOVED ', f)
sys.exit (ret)
|
the-stack_0_7282 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 3 14:11:25 2017
@author: juan
"""
#This program implements the clamped cubic spline with zero derivative at the
#endpoints
import numpy as np
def deltaGrid(grid):
deltas = ()
for i in range(1, len(grid)):
deltas += (grid[i] - grid[i - 1], )
return deltas
def firstLinearRelation(functionValues, deltas):
b = np.array(range(len(deltas) - 1))
for j in range(len(b)):
b[j] = 6 * (((functionValues[j + 2] - functionValues[j + 1])/(deltas[j + 1]))
- ((functionValues[j + 1] - functionValues[j])/(deltas[j])))
return b
def firstVector(functionValues, deltas):
a = np.array([0])
c = np.array([1])
b = firstLinearRelation(functionValues, deltas)
return np.concatenate((a, b, c))
def secondLinearRelation(rowLength, j, deltas):
a = np.array([deltas[j + 1], 2 * (deltas[j] + deltas[j + 1]), deltas[j]])
b = np.array([0])
c = a
for i in range(j):
c = np.concatenate((b, c))
for i in range(rowLength - (j + 2)):
c = np.concatenate((c, b))
return c
def matrix(deltas):
firstRow = np.array([1])
lastRow = np.array([1])
zero = np.array([0])
for i in range(len(deltas)):
firstRow = np.concatenate((firstRow, zero))
for i in range(len(deltas)):
lastRow = np.concatenate((zero, lastRow))
matrix = firstRow
for i in range(len(deltas) - 1):
matrix = np.vstack((matrix, secondLinearRelation(len(deltas), i, deltas)))
matrix = np.vstack((matrix, lastRow))
return matrix
def Derivatives(grid, functionValues):
deltas = deltaGrid(grid)
y = firstVector(functionValues, deltas)
matriz = matrix(deltas)
sigmas = np.linalg.solve(matriz, y)
return sigmas
def cubicPolynomialCoeficients(lowerLimit, upperLimit,
lowerLimitValue, upperLimitValue,
lowerDerivative, upperDerivative):
A = np.matrix([[1, lowerLimit, lowerLimit ** 2, lowerLimit ** 3],
[1, upperLimit, upperLimit ** 2, upperLimit ** 3],
[0, 0, 2, 6 * (lowerLimit ** 1)],
[0, 0, 2, 6 * (upperLimit ** 1)]])
y = np.array([lowerLimitValue, upperLimitValue, lowerDerivative, upperDerivative])
x = np.linalg.solve(A, y)
return x
def closestIntervalIndex(x, grid):
for i in range(len(grid) - 1):
if grid[i] <= x and x <= grid[i + 1]:
return i
if x < grid[0]:
return 0
return len(grid)-2
def polynomialFromCoeficients(x, coeficients):
value = 0.0
for i in range(len(coeficients)):
value += coeficients[i] * (x ** i)
return value
def naturalCubicSpline(x, sample, functionValues,derivatives):
closest = closestIntervalIndex(x, sample)
x_j = sample[closest]
x_j_1 = sample[closest + 1]
f_x_j = functionValues[closest]
f_x_j_1 = functionValues[closest + 1]
f_2_x_j = derivatives[closest]
f_2_x_j_1 = derivatives[closest + 1]
coeficients = cubicPolynomialCoeficients(x_j, x_j_1,
f_x_j, f_x_j_1,
f_2_x_j, f_2_x_j_1
)
spline = polynomialFromCoeficients(x, coeficients)
return spline
|
the-stack_0_7284 | # Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import io
import os
import sys
from shutil import rmtree
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
# Configure library params.
NAME = "dcgan_pytorch"
DESCRIPTION = "Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks."
URL = "https://github.com/Lornatang/DCGAN-PyTorch"
EMAIL = "[email protected]"
AUTHOR = "Liu Goodfellow"
REQUIRES_PYTHON = ">=3.8.0"
VERSION = "1.0.0"
# Libraries that must be installed.
REQUIRED = [
"torch"
]
# The following libraries directory need to be installed if you need to run all scripts.
EXTRAS = {
}
# Find the current running location.
here = os.path.abspath(os.path.dirname(__file__))
# About README file description.
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Set Current Library Version.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
class UploadCommand(Command):
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
setup(name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="Apache",
classifiers=[
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only"
],
cmdclass={
"upload": UploadCommand,
},
)
|
the-stack_0_7285 | # qubit number=4
# total number=33
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=10
prog += X(3) # number=11
prog += H(3) # number=13
prog += CZ(0,3) # number=14
prog += H(1) # number=18
prog += CZ(3,1) # number=19
prog += Z(3) # number=25
prog += H(1) # number=20
prog += RX(-3.141592653589793,3) # number=26
prog += H(3) # number=15
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(2) # number=17
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += H(0) # number=27
prog += CZ(1,0) # number=28
prog += H(0) # number=29
prog += H(0) # number=30
prog += CZ(1,0) # number=31
prog += H(0) # number=32
prog += X(1) # number=23
prog += X(1) # number=24
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1941.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.