ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40b6098f140ce151b7451c40a7c3dcdfd684b62 | from torch import nn
class GeneratorNet(nn.Module):
def __init__(self, n_features, n_out):
super(GeneratorNet, self).__init__()
self.n_features = n_features
self.n_out = n_out
self.hidden0 = nn.Sequential(
nn.Linear(self.n_features, 256),
nn.LeakyReLU(0.2)
)
self.hidden1 = nn.Sequential(
nn.Linear(256, 512),
nn.LeakyReLU(0.2)
)
self.hidden2 = nn.Sequential(
nn.Linear(512, 1024),
nn.LeakyReLU(0.2)
)
self.out = nn.Sequential(
nn.Linear(1024, self.n_out),
nn.Tanh()
)
def forward(self, _input):
_input = self.hidden0(_input)
_input = self.hidden1(_input)
_input = self.hidden2(_input)
_input = self.out(_input)
return _input
|
py | b40b60ba5edc1e5991deeb8e5230341581c8f2b0 | #!/usr/bin/env python3
# Copyright (c) ASAPP Inc.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wav2letter decoders.
"""
import gc
import itertools as it
import os.path as osp
import warnings
from collections import deque, namedtuple
import numpy as np
import torch
import torch.nn.functional as F
from examples.speech_recognition.data.replabels import unpack_replabels
from fairseq import tasks
from fairseq.utils import apply_to_sample
from einops import rearrange
try:
from wav2letter.common import create_word_dict, load_words
from wav2letter.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from wav2letter.decoder import (
CriterionType,
DecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
# LexiconFreeDecoder,
)
except:
warnings.warn(
"wav2letter python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/wav2letter/wiki/Python-bindings"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
self.temperature = getattr(args, 'eval_temperature', 1.0)
self.eval_upsample = getattr(args, 'eval_upsample', 1.0)
# criterion-specific init
if args.criterion == "ctc":
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.asg_transitions = None
elif args.criterion == "asg_loss":
self.criterion_type = CriterionType.ASG
self.blank = -1
self.asg_transitions = args.asg_transitions
self.max_replabel = args.max_replabel
assert len(self.asg_transitions) == self.vocab_size ** 2
else:
raise RuntimeError(f"unknown criterion: {args.criterion}")
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
# encoder_out = models[0].encoder(**encoder_input)
encoder_out = models[0](**encoder_input)
if self.temperature != 1.0:
encoder_out["encoder_out"].div_(self.temperature)
if self.criterion_type == CriterionType.CTC:
emissions = models[0].get_normalized_probs(encoder_out, log_probs=True)
elif self.criterion_type == CriterionType.ASG:
emissions = encoder_out["encoder_out"]
emissions = emissions.transpose(0, 1).float().cpu().contiguous()
emissions[encoder_out['padding_mask'].cpu()] = 0.
emissions[encoder_out['padding_mask'].cpu()][:, self.blank] = 10.
if self.eval_upsample != 1.:
emissions = rearrange(emissions, 'b t c -> b c t')
emissions = F.interpolate(emissions, scale_factor=self.eval_upsample)
emissions = rearrange(emissions, 'b c t -> b t c').contiguous()
return emissions
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
if self.criterion_type == CriterionType.CTC:
idxs = filter(lambda x: x != self.blank, idxs)
elif self.criterion_type == CriterionType.ASG:
idxs = filter(lambda x: x >= 0, idxs)
idxs = unpack_replabels(list(idxs), self.tgt_dict, self.max_replabel)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = DecoderOptions(
args.beam,
int(getattr(args, "beam_size_token", len(tgt_dict))),
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
args.sil_weight,
0,
False,
self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
False,
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = tgt_dict.bos()
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
lm_args = checkpoint["args"]
lm_args.data = osp.dirname(args.kenlm_model)
print(lm_args)
task = tasks.setup_task(lm_args)
model = task.build_model(lm_args)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
self.decoder_opts = DecoderOptions(
args.beam,
int(getattr(args, "beam_size_token", len(tgt_dict))),
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
args.sil_weight,
0,
False,
self.criterion_type,
)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
|
py | b40b60deaeb9c4a8834c1f180946c371e27883e7 | from django.apps import AppConfig
# from django.conf import settings
class DataSnifferConfig(AppConfig):
name = 'data_sniffer'
verbose_name = "Django Data Sniffer"
|
py | b40b6207c1c9f2e3391605522f16d0ccc18f5762 | """
Django-Select2 Widgets.
These components are responsible for rendering
the necessary HTML data markups. Since this whole
package is to render choices using Select2 JavaScript
library, hence these components are meant to be used
with choice fields.
Widgets are generally of two types:
1. **Light** --
They are not meant to be used when there
are too many options, say, in thousands.
This is because all those options would
have to be pre-rendered onto the page
and JavaScript would be used to search
through them. Said that, they are also one
the most easiest to use. They are a
drop-in-replacement for Django's default
select widgets.
2(a). **Heavy** --
They are suited for scenarios when the number of options
are large and need complex queries (from maybe different
sources) to get the options.
This dynamic fetching of options undoubtedly requires
Ajax communication with the server. Django-Select2 includes
a helper JS file which is included automatically,
so you need not worry about writing any Ajax related JS code.
Although on the server side you do need to create a view
specifically to respond to the queries.
2(b). **Model** --
Model-widgets are a further specialized versions of Heavies.
These do not require views to serve Ajax requests.
When they are instantiated, they register themselves
with one central view which handles Ajax requests for them.
Heavy and Model widgets have respectively the word 'Heavy' and 'Model' in
their name. Light widgets are normally named, i.e. there is no 'Light' word
in their names.
.. inheritance-diagram:: django_select2.forms
:parts: 1
"""
import operator
import uuid
from functools import reduce
from itertools import chain
from pickle import PicklingError # nosec
import django
from django import forms
from django.contrib.admin.widgets import SELECT2_TRANSLATIONS
from django.core import signing
from django.db.models import Q
from django.forms.models import ModelChoiceIterator
from django.urls import reverse
from django.utils.translation import get_language
from .cache import cache
from .conf import settings
if django.VERSION < (4, 0):
from django.contrib.admin.utils import lookup_needs_distinct as lookup_spawns_duplicates
else:
from django.contrib.admin.utils import lookup_spawns_duplicates
class Select2Mixin:
"""
The base mixin of all Select2 widgets.
This mixin is responsible for rendering the necessary
data attributes for select2 as well as adding the static
form media.
"""
empty_label = ""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.i18n_name = SELECT2_TRANSLATIONS.get(get_language())
def build_attrs(self, base_attrs, extra_attrs=None):
"""Add select2 data attributes."""
default_attrs = {
"lang": self.i18n_name,
"data-minimum-input-length": 0,
"data-theme": settings.SELECT2_THEME,
}
if self.is_required:
default_attrs["data-allow-clear"] = "false"
else:
default_attrs["data-allow-clear"] = "true"
default_attrs["data-placeholder"] = self.empty_label or ""
default_attrs.update(base_attrs)
attrs = super().build_attrs(default_attrs, extra_attrs=extra_attrs)
if "class" in attrs:
attrs["class"] += " django-select2"
else:
attrs["class"] = "django-select2"
return attrs
def optgroups(self, name, value, attrs=None):
"""Add empty option for clearable selects."""
if not self.is_required and not self.allow_multiple_selected:
self.choices = list(chain([("", "")], self.choices))
return super().optgroups(name, value, attrs=attrs)
@property
def media(self):
"""
Construct Media as a dynamic property.
.. Note:: For more information visit
https://docs.djangoproject.com/en/stable/topics/forms/media/#media-as-a-dynamic-property
"""
select2_js = [settings.SELECT2_JS] if settings.SELECT2_JS else []
select2_css = settings.SELECT2_CSS if settings.SELECT2_CSS else []
if isinstance(select2_css, str):
select2_css = [select2_css]
i18n_file = []
if self.i18n_name in settings.SELECT2_I18N_AVAILABLE_LANGUAGES:
i18n_file = [f"{settings.SELECT2_I18N_PATH}/{self.i18n_name}.js"]
return forms.Media(
js=select2_js + i18n_file + ["django_select2/django_select2.js"],
css={"screen": select2_css + ["django_select2/django_select2.css"]},
)
class Select2TagMixin:
"""Mixin to add select2 tag functionality."""
def build_attrs(self, base_attrs, extra_attrs=None):
"""Add select2's tag attributes."""
default_attrs = {
"data-minimum-input-length": 1,
"data-tags": "true",
"data-token-separators": '[",", " "]',
}
default_attrs.update(base_attrs)
return super().build_attrs(default_attrs, extra_attrs=extra_attrs)
class Select2Widget(Select2Mixin, forms.Select):
"""
Select2 drop in widget.
Example usage::
class MyModelForm(forms.ModelForm):
class Meta:
model = MyModel
fields = ('my_field', )
widgets = {
'my_field': Select2Widget
}
or::
class MyForm(forms.Form):
my_choice = forms.ChoiceField(widget=Select2Widget)
"""
class Select2MultipleWidget(Select2Mixin, forms.SelectMultiple):
"""
Select2 drop in widget for multiple select.
Works just like :class:`.Select2Widget` but for multi select.
"""
class Select2TagWidget(Select2TagMixin, Select2Mixin, forms.SelectMultiple):
"""
Select2 drop in widget for for tagging.
Example for :class:`.django.contrib.postgres.fields.ArrayField`::
class MyWidget(Select2TagWidget):
def value_from_datadict(self, data, files, name):
values = super().value_from_datadict(data, files, name)
return ",".join(values)
def optgroups(self, name, value, attrs=None):
values = value[0].split(',') if value[0] else []
selected = set(values)
subgroup = [self.create_option(name, v, v, selected, i) for i, v in enumerate(values)]
return [(None, subgroup, 0)]
"""
class HeavySelect2Mixin:
"""Mixin that adds select2's AJAX options and registers itself on Django's cache."""
dependent_fields = {}
def __init__(self, attrs=None, choices=(), **kwargs):
"""
Return HeavySelect2Mixin.
Args:
data_view (str): URL pattern name
data_url (str): URL
dependent_fields (dict): Dictionary of dependent parent fields.
The value of the dependent field will be passed as to :func:`.filter_queryset`.
It can be used to further restrict the search results. For example, a city
widget could be dependent on a country.
Key is a name of a field in a form.
Value is a name of a field in a model (used in `queryset`).
"""
super().__init__(attrs, choices)
self.uuid = str(uuid.uuid4())
self.field_id = signing.dumps(self.uuid)
self.data_view = kwargs.pop("data_view", None)
self.data_url = kwargs.pop("data_url", None)
dependent_fields = kwargs.pop("dependent_fields", None)
if dependent_fields is not None:
self.dependent_fields = dict(dependent_fields)
if not (self.data_view or self.data_url):
raise ValueError('You must ether specify "data_view" or "data_url".')
self.userGetValTextFuncName = kwargs.pop("userGetValTextFuncName", "null")
def get_url(self):
"""Return URL from instance or by reversing :attr:`.data_view`."""
if self.data_url:
return self.data_url
return reverse(self.data_view)
def build_attrs(self, base_attrs, extra_attrs=None):
"""Set select2's AJAX attributes."""
default_attrs = {
"data-ajax--url": self.get_url(),
"data-ajax--cache": "true",
"data-ajax--type": "GET",
"data-minimum-input-length": 2,
}
if self.dependent_fields:
default_attrs["data-select2-dependent-fields"] = " ".join(
self.dependent_fields
)
default_attrs.update(base_attrs)
attrs = super().build_attrs(default_attrs, extra_attrs=extra_attrs)
attrs["data-field_id"] = self.field_id
attrs["class"] += " django-select2-heavy"
return attrs
def render(self, *args, **kwargs):
"""Render widget and register it in Django's cache."""
output = super().render(*args, **kwargs)
self.set_to_cache()
return output
def _get_cache_key(self):
return "%s%s" % (settings.SELECT2_CACHE_PREFIX, self.uuid)
def set_to_cache(self):
"""
Add widget object to Django's cache.
You may need to overwrite this method, to pickle all information
that is required to serve your JSON response view.
"""
try:
cache.set(self._get_cache_key(), {"widget": self, "url": self.get_url()})
except (PicklingError, AttributeError):
msg = 'You need to overwrite "set_to_cache" or ensure that %s is serialisable.'
raise NotImplementedError(msg % self.__class__.__name__)
class HeavySelect2Widget(HeavySelect2Mixin, Select2Widget):
"""
Select2 widget with AJAX support that registers itself to Django's Cache.
Usage example::
class MyWidget(HeavySelect2Widget):
data_view = 'my_view_name'
or::
class MyForm(forms.Form):
my_field = forms.ChoiceField(
widget=HeavySelect2Widget(
data_url='/url/to/json/response'
)
)
"""
class HeavySelect2MultipleWidget(HeavySelect2Mixin, Select2MultipleWidget):
"""Select2 multi select widget similar to :class:`.HeavySelect2Widget`."""
class HeavySelect2TagWidget(HeavySelect2Mixin, Select2TagWidget):
"""Select2 tag widget."""
# Auto Heavy widgets
class ModelSelect2Mixin:
"""Widget mixin that provides attributes and methods for :class:`.AutoResponseView`."""
model = None
queryset = None
search_fields = []
"""
Model lookups that are used to filter the QuerySet.
Example::
search_fields = [
'title__icontains',
]
"""
max_results = 25
"""Maximal results returned by :class:`.AutoResponseView`."""
@property
def empty_label(self):
if isinstance(self.choices, ModelChoiceIterator):
return self.choices.field.empty_label
return ""
def __init__(self, *args, **kwargs):
"""
Overwrite class parameters if passed as keyword arguments.
Args:
model (django.db.models.Model): Model to select choices from.
queryset (django.db.models.query.QuerySet): QuerySet to select choices from.
search_fields (list): List of model lookup strings.
max_results (int): Max. JsonResponse view page size.
"""
self.model = kwargs.pop("model", self.model)
self.queryset = kwargs.pop("queryset", self.queryset)
self.search_fields = kwargs.pop("search_fields", self.search_fields)
self.max_results = kwargs.pop("max_results", self.max_results)
defaults = {"data_view": "django_select2:auto-json"}
defaults.update(kwargs)
super().__init__(*args, **defaults)
def set_to_cache(self):
"""
Add widget's attributes to Django's cache.
Split the QuerySet, to not pickle the result set.
"""
queryset = self.get_queryset()
cache.set(
self._get_cache_key(),
{
"queryset": [queryset.none(), queryset.query],
"cls": self.__class__,
"search_fields": tuple(self.search_fields),
"max_results": int(self.max_results),
"url": str(self.get_url()),
"dependent_fields": dict(self.dependent_fields),
},
)
def filter_queryset(self, request, term, queryset=None, **dependent_fields):
"""
Return QuerySet filtered by search_fields matching the passed term.
Args:
request (django.http.request.HttpRequest): The request is being passed from
the JSON view and can be used to dynamically alter the response queryset.
term (str): Search term
queryset (django.db.models.query.QuerySet): QuerySet to select choices from.
**dependent_fields: Dependent fields and their values. If you want to inherit
from ModelSelect2Mixin and later call to this method, be sure to pop
everything from keyword arguments that is not a dependent field.
Returns:
QuerySet: Filtered QuerySet
"""
if queryset is None:
queryset = self.get_queryset()
search_fields = self.get_search_fields()
select = Q()
use_distinct = False
if search_fields and term:
for bit in term.split():
or_queries = [Q(**{orm_lookup: bit}) for orm_lookup in search_fields]
select &= reduce(operator.or_, or_queries)
or_queries = [Q(**{orm_lookup: term}) for orm_lookup in search_fields]
select |= reduce(operator.or_, or_queries)
use_distinct |= any(
lookup_spawns_duplicates(queryset.model._meta, search_spec)
for search_spec in search_fields
)
if dependent_fields:
select &= Q(**dependent_fields)
use_distinct |= any(
lookup_spawns_duplicates(queryset.model._meta, search_spec)
for search_spec in dependent_fields.keys()
)
if use_distinct:
return queryset.filter(select).distinct()
return queryset.filter(select)
def get_queryset(self):
"""
Return QuerySet based on :attr:`.queryset` or :attr:`.model`.
Returns:
QuerySet: QuerySet of available choices.
"""
if self.queryset is not None:
queryset = self.queryset
elif hasattr(self.choices, "queryset"):
queryset = self.choices.queryset
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise NotImplementedError(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {"cls": self.__class__.__name__}
)
return queryset
def get_search_fields(self):
"""Return list of lookup names."""
if self.search_fields:
return self.search_fields
raise NotImplementedError(
'%s, must implement "search_fields".' % self.__class__.__name__
)
def optgroups(self, name, value, attrs=None):
"""Return only selected options and set QuerySet from `ModelChoicesIterator`."""
default = (None, [], 0)
groups = [default]
has_selected = False
selected_choices = {str(v) for v in value}
if not self.is_required and not self.allow_multiple_selected:
default[1].append(self.create_option(name, "", "", False, 0))
if not isinstance(self.choices, ModelChoiceIterator):
return super().optgroups(name, value, attrs=attrs)
selected_choices = {
c for c in selected_choices if c not in self.choices.field.empty_values
}
field_name = self.choices.field.to_field_name or "pk"
query = Q(**{"%s__in" % field_name: selected_choices})
for obj in self.choices.queryset.filter(query):
option_value = self.choices.choice(obj)[0]
option_label = self.label_from_instance(obj)
selected = str(option_value) in value and (
has_selected is False or self.allow_multiple_selected
)
if selected is True and has_selected is False:
has_selected = True
index = len(default[1])
subgroup = default[1]
subgroup.append(
self.create_option(
name, option_value, option_label, selected_choices, index
)
)
return groups
def label_from_instance(self, obj):
"""
Return option label representation from instance.
Can be overridden to change the representation of each choice.
Example usage::
class MyWidget(ModelSelect2Widget):
def label_from_instance(obj):
return str(obj.title).upper()
Args:
obj (django.db.models.Model): Instance of Django Model.
Returns:
str: Option label.
"""
return str(obj)
class ModelSelect2Widget(ModelSelect2Mixin, HeavySelect2Widget):
"""
Select2 drop in model select widget.
Example usage::
class MyWidget(ModelSelect2Widget):
search_fields = [
'title__icontains',
]
class MyModelForm(forms.ModelForm):
class Meta:
model = MyModel
fields = ('my_field', )
widgets = {
'my_field': MyWidget,
}
or::
class MyForm(forms.Form):
my_choice = forms.ChoiceField(
widget=ModelSelect2Widget(
model=MyOtherModel,
search_fields=['title__icontains']
)
)
.. tip:: The ModelSelect2(Multiple)Widget will try
to get the QuerySet from the fields choices.
Therefore you don't need to define a QuerySet,
if you just drop in the widget for a ForeignKey field.
"""
class ModelSelect2MultipleWidget(ModelSelect2Mixin, HeavySelect2MultipleWidget):
"""
Select2 drop in model multiple select widget.
Works just like :class:`.ModelSelect2Widget` but for multi select.
"""
class ModelSelect2TagWidget(ModelSelect2Mixin, HeavySelect2TagWidget):
"""
Select2 model widget with tag support.
This it not a simple drop in widget.
It requires to implement you own :func:`.value_from_datadict`
that adds missing tags to you QuerySet.
Example::
class MyModelSelect2TagWidget(ModelSelect2TagWidget):
queryset = MyModel.objects.all()
def value_from_datadict(self, data, files, name):
'''Create objects for given non-pimary-key values. Return list of all primary keys.'''
values = set(super().value_from_datadict(data, files, name))
# This may only work for MyModel, if MyModel has title field.
# You need to implement this method yourself, to ensure proper object creation.
pks = self.queryset.filter(**{'pk__in': list(values)}).values_list('pk', flat=True)
pks = set(map(str, pks))
cleaned_values = list(values)
for val in values - pks:
cleaned_values.append(self.queryset.create(title=val).pk)
return cleaned_values
"""
|
py | b40b622b8f1da503b06510eb68e53c23ddc7d06f | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
__all__ = ["safe_embedding_lookup_sparse", "hashed_embedding_lookup",
"hashed_embedding_lookup_sparse"]
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner="mean",
default_id=None,
name=None,
partition_strategy="div"):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors. The total unpartitioned shape should be
`[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size and
`e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
Returns:
Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if embedding_weights is None or len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
contrib_tensor_util.assert_same_float_dtype(embedding_weights +
[sparse_weights])
with ops.op_scope(embedding_weights + [sparse_ids, sparse_weights], name,
"embedding_lookup") as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.shape
original_rank_dim = sparse_ids.shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = ops.SparseTensor(sparse_ids.indices,
sparse_weights.values, sparse_ids.shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.pack([1, array_ops.shape(result)[1]]))
result = math_ops.select(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(result, array_ops.concat(0, [
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32),
[0], [original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])]))
final_result.set_shape(tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid, math_ops.greater(sparse_weights.values, 0))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def hashed_embedding_lookup(params, values, dimension, name=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor` or `list` of `Tensors`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded.
dimension: Embedding dimension
name: An optional name for this op.
Returns:
A tensor with shape [d0, ..., dn, dimension]
with shape(values) = [d0, ..., dn]
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if not isinstance(params, list):
params = [params]
with ops.op_scope(params + [dimension, values], name,
"hashed_embedding_lookup"):
if dimension <= 0:
raise ValueError("Dimension should be >0 not %d" % dimension)
num_partitions = len(params)
partition_sizes = []
for p in range(num_partitions):
shape = params[p].get_shape()
shape.assert_has_rank(1)
shape.assert_is_fully_defined()
partition_sizes.append(shape[0].value)
num_params = sum(partition_sizes) # Total number of parameters.
# Assert the size of each partition.
for p in range(num_partitions):
expected_size = (num_params - p - 1) // num_partitions + 1
if partition_sizes[p] != expected_size:
raise ValueError("Tensor %d in params has size %d, expected %d." %
(p, partition_sizes[p], expected_size))
# Flatten the values
values_shape = array_ops.shape(values)
values = array_ops.reshape(values, [-1, 1])
# With two values v1 and v2 and 3 dimensions, we will cross
# [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
tensors_to_cross = [array_ops.tile(array_ops.expand_dims(
math_ops.range(0, dimension), 0), array_ops.shape(values)), values]
ids = sparse_feature_cross_op.sparse_feature_cross(
tensors_to_cross, hashed_output=True, num_buckets=num_params)
ids = sparse_ops.sparse_tensor_to_dense(ids)
# No need to validate the indices since we have checked the params
# dimensions and we know the largest id.
result = embedding_ops.embedding_lookup(
params, ids, partition_strategy="div", validate_indices=False)
return array_ops.reshape(result, array_ops.concat(
0, [values_shape, [dimension]]))
def hashed_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner="mean",
default_value=None,
name=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.hashed_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor` or `list` of `Tensors`.
Each tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, ops.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.op_scope(params + [sparse_values], name,
"hashed_sparse_embedding_lookup") as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = hashed_embedding_lookup(params, values, dimension)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids,
name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings
|
py | b40b62fb67655254b37db6d052f04d1768b7d99a | import os
import bagel
def main():
bagel.utils.mkdirs(OUTPUT)
file_list = bagel.utils.file_list(INPUT)
for file in file_list:
kpi = bagel.utils.load_kpi(file)
print(f'KPI: {kpi.name}')
kpi.complete_timestamp()
train_kpi, valid_kpi, test_kpi = kpi.split((0.49, 0.21, 0.3))
train_kpi, mean, std = train_kpi.standardize()
valid_kpi, _, _ = valid_kpi.standardize(mean=mean, std=std)
test_kpi, _, _ = test_kpi.standardize(mean=mean, std=std)
model = bagel.Bagel()
model.fit(kpi=train_kpi.use_labels(0.), validation_kpi=valid_kpi, epochs=EPOCHS, verbose=1)
anomaly_scores = model.predict(test_kpi)
results = bagel.testing.get_test_results(labels=test_kpi.labels,
scores=anomaly_scores,
missing=test_kpi.missing)
stats = bagel.testing.get_kpi_stats(kpi, test_kpi)
print('Metrics')
print(f'precision: {results.get("precision"):.3f} - '
f'recall: {results.get("recall"):.3f} - '
f'f1score: {results.get("f1score"):.3f}\n')
with open(f'{os.path.join(OUTPUT, kpi.name)}.txt', 'w') as output:
output.write(f'kpi_name={kpi.name}\n\n'
'[result]\n'
f'threshold={results.get("threshold")}\n'
f'precision={results.get("precision"):.3f}\n'
f'recall={results.get("recall"):.3f}\n'
f'f1_score={results.get("f1score"):.3f}\n\n'
'[overall]\n'
f'num_points={stats[0].num_points}\n'
f'num_missing_points={stats[0].num_missing}\n'
f'missing_rate={stats[0].missing_rate:.6f}\n'
f'num_anomaly_points={stats[0].num_anomaly}\n'
f'anomaly_rate={stats[0].anomaly_rate:.6f}\n\n'
'[test]\n'
f'num_points={stats[1].num_points}\n'
f'num_missing_points={stats[1].num_missing}\n'
f'missing_rate={stats[1].missing_rate:.6f}\n'
f'num_anomaly_points={stats[1].num_anomaly}\n'
f'anomaly_rate={stats[1].anomaly_rate:.6f}\n')
if __name__ == '__main__':
EPOCHS = 50
INPUT = 'data'
OUTPUT = os.path.join('out', 'bagel')
main()
|
py | b40b632c1f1dc6900c7b5d4ea8ced0eae97a6e71 | #!/usr/bin/env python3
import os
import re
from setuptools import setup, find_packages
base_path = os.path.dirname(__file__)
requirements = []
with open(os.path.join(base_path, "libsocks/__init__.py")) as f:
VERSION = re.compile(r'.*__version__ = "(.*?)"', re.S).match(f.read()).group(1)
setup(
name="libsocks",
version=VERSION,
description="A socks5/socks/http proxy client module",
long_description="https://github.com/ccssrryy/libsocks/blob/master/README.md",
long_description_content_type="text/markdown",
url="https://github.com/ccssrryy/libsocks",
license="MIT",
author="ccssrryy",
author_email="[email protected]",
keywords=["socks", "socks5", "socks4", "asyncio", "proxy"],
include_package_data=True,
packages=find_packages(include=[
"libsocks", "libsocks.*"
]),
install_requires=requirements,
python_requires=">=3.5",
classifiers=(
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
),
)
|
py | b40b63aea2d2ea014b082ad11f972199a94e58d8 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Generic nn operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import cpp
def _default_schedule(outs, auto_inline):
"""Default schedule for llvm."""
target = tvm.target.current_target(allow_none=False)
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
if target.target_name not in ("llvm", "c"):
raise RuntimeError("schedule not registered for '%s'" % target)
s = tvm.create_schedule([x.op for x in outs])
if auto_inline:
x = outs[0]
tvm.schedule.AutoInlineInjective(s)
s[x].fuse(s[x].op.axis)
return s
@tvm.target.generic_func
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_nhwc_pack(outs):
"""Schedule for conv2d_nhwc_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nhwc_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_nhwc(outs):
"""Schedule for conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_NCHWc(outs):
"""Schedule for conv2d_NCHW[x]c
Parameters
----------
outs : Array of Tensor
The computation graph description of conv2d_NCHWc
in the format of an array of tensors.
The number of filter, i.e., the output channel.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_winograd_weight_transform(outs):
"""Schedule for weight transformation of winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in nnvm PreCompute pass
# so we make a schedule here for cpu llvm
s = tvm.create_schedule([x.op for x in outs])
output = outs[0]
_, G = s[output].op.input_tensors
s[G].compute_inline()
eps, nu, co, ci = s[output].op.axis
r_kh, r_kw = s[output].op.reduce_axis
s[output].reorder(co, ci, r_kh, r_kw, eps, nu)
for axis in [r_kh, r_kw, eps, nu]:
s[output].unroll(axis)
s[output].parallel(co)
return s
@tvm.target.generic_func
def schedule_conv2d_winograd_without_weight_transform(outs):
"""Schedule for winograd without weight transformation
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_winograd_nnpack_weight_transform(outs):
"""Schedule for weight transformation of winograd
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
# Typically this is computed in nnvm PreCompute pass
s = tvm.create_schedule([x.op for x in outs])
return s
@tvm.target.generic_func
def schedule_conv2d_winograd_nnpack_without_weight_transform(outs):
"""Schedule for winograd without weight transformation
Parameters
----------
outs: Array of Tensor
The computation graph description of this operator
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_conv2d_transpose_nchw(outs):
"""Schedule for conv2d_transpose_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_transpose_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_depthwise_conv2d_nhwc(outs):
"""Schedule for depthwise_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_depthwise_conv2d_NCHWc(outs):
"""Schedule for depthwise_conv2d_NCHWc
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_group_conv2d_nchw(outs):
"""Schedule for group_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of group_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_deformable_conv2d_nchw(outs):
"""Schedule for deformable_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of deformable_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_bitserial_conv2d_nchw(outs):
"""Schedule for bitserial_conv2d_nchw
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_bitserial_conv2d_nhwc(outs):
"""Schedule for bitserial_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_conv2d_nchw
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_bitserial_dense(outs):
"""Schedule for bitserial_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of bitserial_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_reduce")
def schedule_reduce(outs):
"""Schedule for reduction
Parameters
----------
outs: Array of Tensor
The computation graph description of reduce
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, True)
@tvm.target.override_native_generic_func("schedule_softmax")
def schedule_softmax(outs):
"""Schedule for softmax
Parameters
----------
outs: Array of Tensor
The computation graph description of softmax
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_dense")
def schedule_dense(outs):
"""Schedule for dense
Parameters
----------
outs: Array of Tensor
The computation graph description of dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_pool")
def schedule_pool(outs, layout):
"""Schedule for pool
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_adaptive_pool")
def schedule_adaptive_pool(outs):
"""Schedule for adaptive pool
Parameters
----------
outs: Array of Tensor
The computation graph description of adaptive pool
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_binarize_pack")
def schedule_binarize_pack(outs):
"""Schedule for binarize_pack
Parameters
----------
outs: Array of Tensor
The computation graph description of binarize_pack
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.override_native_generic_func("schedule_binary_dense")
def schedule_binary_dense(outs):
"""Schedule for binary_dense
Parameters
----------
outs: Array of Tensor
The computation graph description of binary_dense
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
@tvm.target.generic_func
def schedule_lrn(outs):
"""Schedule for lrn
Parameters
----------
outs: Array of Tensor
The computation graph description of lrn
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.current_target(allow_none=False)
cpp_target = cpp.TEST_create_target(target.target_name)
return cpp.generic.default_schedule(cpp_target, outs, False)
@tvm.target.generic_func
def schedule_l2_normalize(outs):
"""Schedule for l2 normalize
Parameters
----------
outs: Array of Tensor
The computation graph description of l2 normalize
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
target = tvm.target.current_target(allow_none=False)
cpp_target = cpp.TEST_create_target(target.target_name)
return cpp.generic.default_schedule(cpp_target, outs, False)
@tvm.target.generic_func
def schedule_batch_matmul(outs):
target = tvm.target.current_target(allow_none=False)
cpp_target = cpp.TEST_create_target(target.target_name)
return cpp.generic.default_schedule(cpp_target, outs, False)
|
py | b40b6478e6f8ebf49b82da52a327490f749f5e27 | class Dates:
def __init__(self, date):
self._date = date
def get_date(self):
return self._date
@staticmethod
def to_dash_date(date):
return date.replace("/", "-")
def main():
date = Dates("2018-10-10") # <1>
print('date.get_date():', date.get_date()) # <2>
date_from_birthday = "2018/12/12"
date_with_dash = Dates.to_dash_date(date_from_birthday) # <3>
print('date_with_dash:', date_with_dash)
if __name__ == "__main__":
main()
|
py | b40b6489e961592a146818960a8580191027bcc0 | #!/usr/bin/env python
# Copyright 2016 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import sys
import setuptools
# Utility function to read the README file
def readfile(filename):
with open(filename) as f:
return f.read()
# Utility function to read requirements.txt files
def readreq(filename):
result = []
with open(filename) as f:
for line in f:
line = line.strip()
# Process requirement file references
if line.startswith('-r '):
subfilename = line.split(None, 1)[-1].split('#', 1)[0].strip()
if subfilename:
result += readreq(subfilename)
continue
# Strip out "-e" prefixes
if line.startswith('-e '):
line = line.split(None, 1)[-1]
if '/' in line:
line = line.rsplit('/', 1)[-1]
# Detect URLs in the line
idx = line.find('#egg=')
if idx >= 0:
line = line[idx + 5:]
# Strip off any comments
line = line.split('#', 1)[0].strip()
# Save the requirement
if line:
result.append(line.split('#', 1)[0].strip())
return result
# Invoke setup
setuptools.setup(
name='managed-security-test',
version='0.1.0',
author='Kevin L. Mitchell',
author_email='[email protected]',
url='https://github.com/klmitch/managed-security-test',
description='File Indexer',
long_description=readfile('README.rst'),
license='Apache License (2.0)',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
py_modules=['file_indexer'],
install_requires=readreq('requirements.txt'),
tests_require=readreq('test-requirements.txt'),
entry_points={
'console_scripts': [
'file_indexer = file_indexer:main.console',
],
},
)
|
py | b40b65a436d37944488f52a0a6baa815d1627d3c | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
An example to show authentication using credentials defined by azure.identity library asynchronously.
EnvironmentCredential is capable of authenticating as a service principal using a client secret or a certificate, or as
a user with a username and password. Configuration is attempted in this order, using these environment variables:
Service principal with secret:
- **AZURE_TENANT_ID**: ID of the service principal's tenant. Also called its 'directory' ID.
- **AZURE_CLIENT_ID**: the service principal's client ID
- **AZURE_CLIENT_SECRET**: one of the service principal's client secrets
Service principal with certificate:
- **AZURE_TENANT_ID**: ID of the service principal's tenant. Also called its 'directory' ID.
- **AZURE_CLIENT_ID**: the service principal's client ID
- **AZURE_CLIENT_CERTIFICATE_PATH**: path to a PEM-encoded certificate file including the private key. The
certificate must not be password-protected.
User with username and password:
- **AZURE_CLIENT_ID**: the application's client ID
- **AZURE_USERNAME**: a username (usually an email address)
- **AZURE_PASSWORD**: that user's password
- **AZURE_TENANT_ID**: (optional) ID of the service principal's tenant. Also called its 'directory' ID.
If not provided, defaults to the 'organizations' tenant, which supports only Azure Active Directory work or
school accounts.
Please refer to azure.identity library for detailed information.
"""
import os
import asyncio
from azure.servicebus import Message
from azure.servicebus.aio import ServiceBusClient
from azure.identity.aio import EnvironmentCredential
FULLY_QUALIFIED_NAMESPACE = os.environ['SERVICE_BUS_NAMESPACE']
QUEUE_NAME = os.environ["SERVICE_BUS_QUEUE_NAME"]
credential = EnvironmentCredential()
# Note: One has other options to specify the credential. For instance, DefaultAzureCredential.
# Default Azure Credentials attempt a chained set of authentication methods, per documentation here: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/identity/azure-identity
# For example user to be logged in can be specified by the environment variable AZURE_USERNAME, consumed via the ManagedIdentityCredential
# Alternately, one can specify the AZURE_TENANT_ID, AZURE_CLIENT_ID, and AZURE_CLIENT_SECRET to use the EnvironmentCredentialClass.
# The docs above specify all mechanisms which the defaultCredential internally support.
# credential = DefaultAzureCredential()
async def run():
servicebus_client = ServiceBusClient(FULLY_QUALIFIED_NAMESPACE, credential)
async with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
async with sender:
await sender.send(Message("DATA" * 64))
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
print("Send message is done.")
|
py | b40b65b90751455bc29d21464e21fdc8982aa25c | import tkinter as tk
from tkinter import *
from PIL import ImageTk, Image
import time;
import pyautogui;
from config.config import *
import lib.ipc_manager as ipc_manager
import winsound
import multiprocessing
def loop_overlay():
time.sleep( 2 )
root = tk.Tk()
root.wm_title("Tk")
window = tk.Toplevel(root)
window.wm_title("Visualisation")
window.configure(bg="#000000")
##window.overrideredirect(1) #Remove border
##window.attributes('-topmost', -1) # Keep on top
window.geometry('%dx%d+%d+%d' % (500, 700, 0, 0))
##window.attributes("-transparentcolor", "#FFFFFF") # Remove background
timestamp = time.time()
text = tk.Text(window, height=1, width=20, relief="flat")
text.configure(bg="#000000", fg="#FFFFFF", padx=20, pady=20, font=("Arial Black", 40, "bold"))
text.insert(INSERT, "vowel_ah")
text.place(x=150, y=0)
commandText = tk.Text(window, relief="flat", width=20, height=1)
commandText.configure(bg="#000000", fg="#FFFFFF", font=("Arial Black", 120, "bold"))
commandText.tag_add("here", "1.0", "1.1")
commandText.tag_configure("times", offset=70, font=('Arial Black', 40, "bold"))
commandText.insert(INSERT,"Q","","*10","times")
commandText.grid(row=0)
commandText.place(x=80, y=80)
holdText = tk.Text(window, relief="flat", width=20, height=1)
holdText.configure(bg="#000000", fg="#FFFFFF", font=("Arial Black", 35, "bold"))
holdText.insert(INSERT," CTRL ","CTRL"," SHIFT ","SHIFT", " ALT ", "ALT")
holdText.tag_configure("CTRL", background="#FF0000", foreground="#000000")
holdText.tag_configure("SHIFT", background="#0000FF", foreground="#000000")
holdText.tag_configure("ALT", background="#FFF000", foreground="#000000")
holdText.place(x=0, y=430)
# Draw the controller nipple
canvas_size = 85
canvas = Canvas(window, width=canvas_size, height=canvas_size, bg='#000000', highlightthickness=0)
canvas.place(x=380, y=420 - canvas_size)
canvas.create_oval(3, 3, canvas_size - 3, canvas_size - 3, outline="#333", fill="#444", width=2)
width = 40
height = 40
x = canvas_size / 2 - width / 2
y = canvas_size / 2 - height / 2
points = [[x, y + 0.5 * height], [x + 0.15 * width, y + 0.15 * height], [x + 0.5 *width, y], [x + 0.85 * width, y + 0.15 * height],
[x + width, y + 0.5 * height], [x + 0.85 * width, y + 0.85 * height], [x + 0.5 * width, y + height], [x + 0.15 * width, y + 0.85 * height], [x, y + 0.5 * height]]
canvas.create_polygon(points, fill="#111")
basePosX = canvas_size / 2
basePosY = canvas_size / 2
canvasOffsetX = basePosX
canvasOffsetY = basePosY
nipple_outer = canvas.create_oval(canvasOffsetX - 25, canvasOffsetY - 25, canvasOffsetX + 25, canvasOffsetY + 25, outline="#555", fill="#999", width=2)
nipple_middle = canvas.create_oval(canvasOffsetX - 18, canvasOffsetY - 18, canvasOffsetX + 18, canvasOffsetY + 18, outline="#555", fill="#999", width=2)
nipple_inner = canvas.create_oval(canvasOffsetX - 13, canvasOffsetY - 13, canvasOffsetX + 13, canvasOffsetY + 13, outline="#555", fill="#999", width=2)
nipple_dot = canvas.create_oval(canvasOffsetX - 1, canvasOffsetY - 1, canvasOffsetX + 1, canvasOffsetY + 1, outline="#000", fill="#000", width=1)
img = ImageTk.PhotoImage(Image.open("media/sound.png"))
panel = Label(window, image = img, borderwidth = 0)
current_overlay_status = ""
while True:
time.sleep( 0.032 )
sound = ipc_manager.getSoundName()
times = ipc_manager.getActionAmount()
command = ipc_manager.getActionName()
text.delete('1.0', END)
text.insert(INSERT, sound)
commandText.delete('1.0', END)
canvas.delete( nipple_outer )
canvas.delete( nipple_middle )
canvas.delete( nipple_inner )
canvas.delete( nipple_dot )
canvasOffsetX = basePosX
canvasOffsetY = basePosY
if ( ipc_manager.getButtonState('left') == True ):
canvasOffsetX = canvasOffsetX - canvas_size / 5
elif ( ipc_manager.getButtonState('right') == True ):
canvasOffsetX = canvasOffsetX + canvas_size / 5
if ( ipc_manager.getButtonState('up') == True ):
canvasOffsetY = canvasOffsetY - canvas_size / 5
elif ( ipc_manager.getButtonState('down') == True ):
canvasOffsetY = canvasOffsetY + canvas_size / 5
# Draw the controller nipple
nipple_outer = canvas.create_oval(canvasOffsetX - 25, canvasOffsetY - 25, canvasOffsetX + 25, canvasOffsetY + 25, outline="#555", fill="#999", width=2)
nipple_middle = canvas.create_oval(canvasOffsetX - 18, canvasOffsetY - 18, canvasOffsetX + 18, canvasOffsetY + 18, outline="#555", fill="#999", width=2)
nipple_inner = canvas.create_oval(canvasOffsetX - 13, canvasOffsetY - 13, canvasOffsetX + 13, canvasOffsetY + 13, outline="#555", fill="#999", width=2)
nipple_dot = canvas.create_oval(canvasOffsetX - 2, canvasOffsetY - 2, canvasOffsetX + 2, canvasOffsetY + 2, outline="#000", fill="#000", width=2)
if( len(command) > 3 ):
commandText.configure(font=("Arial Black", 50, "bold"))
commandText.tag_configure("times", offset=10, font=('Arial Black', 40, "bold"))
commandText.place(x=120, y=230)
if( len(command) > 8 ):
commandText.place(x=40, y=230)
else:
commandText.configure(font=("Arial Black", 120, "bold"))
commandText.tag_configure("times", offset=70, font=('Arial Black', 40, "bold"))
commandText.place(x=160, y=150)
if (ipc_manager.getButtonState('ctrl') == True):
holdText.tag_configure("CTRL", background="#FF0000", foreground="#000000")
else:
holdText.tag_configure("CTRL", background="#000000", foreground="#000000")
if (ipc_manager.getButtonState('shift') == True):
holdText.tag_configure("SHIFT", background="#0000FF", foreground="#000000")
else:
holdText.tag_configure("SHIFT", background="#000000", foreground="#000000")
if (ipc_manager.getButtonState('alt') == True):
holdText.tag_configure("ALT", background="#FFF000", foreground="#000000")
else:
holdText.tag_configure("ALT", background="#000000", foreground="#000000")
if (times != "1"):
commandText.insert(INSERT,command,"", "*" + str(times),"times")
else:
commandText.insert(INSERT,command)
panel.place(x=80, y=20)
window.update_idletasks()
window.update()
loop_overlay()
#root = tk.Tk()
#l=Text(root)
#l.tag_configure("s", offset=5)
#l.insert(INSERT,"X","","2","s")
#l.grid(row=0)
#root.mainloop()
|
py | b40b661358651e721041ed4e09621aab434a1258 | from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import SentryApp
@register(SentryApp)
class SentryAppSerializer(Serializer):
def serialize(self, obj, attrs, user):
return {
'name': obj.name,
'slug': obj.slug,
'scopes': obj.get_scopes(),
'status': obj.get_status_display(),
'uuid': obj.uuid,
'webhookUrl': obj.webhook_url,
'redirectUrl': obj.redirect_url,
'clientId': obj.application.client_id,
'clientSecret': obj.application.client_secret,
'overview': obj.overview,
}
|
py | b40b67c0f389b76f2f6bc83fe785efa150660621 | import json
from urllib.parse import quote
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import PermissionDenied
from django.db.models import Prefetch, Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
from wagtail.actions.publish_page_revision import PublishPageRevisionAction
from wagtail.admin import messages
from wagtail.admin.action_menu import PageActionMenu
from wagtail.admin.mail import send_notification
from wagtail.admin.side_panels import PageSidePanels
from wagtail.admin.views.generic import HookResponseMixin
from wagtail.admin.views.pages.utils import get_valid_next_url_from_request
from wagtail.exceptions import PageClassNotFoundError
from wagtail.models import (
COMMENTS_RELATION_NAME,
Comment,
CommentReply,
Page,
PageSubscription,
UserPagePermissionsProxy,
WorkflowState,
)
class EditView(TemplateResponseMixin, ContextMixin, HookResponseMixin, View):
def get_template_names(self):
if self.page.alias_of_id:
return ["wagtailadmin/pages/edit_alias.html"]
else:
return ["wagtailadmin/pages/edit.html"]
def add_legacy_moderation_warning(self):
# Check for revisions still undergoing moderation and warn - this is for the old moderation system
if self.latest_revision and self.latest_revision.submitted_for_moderation:
buttons = []
if self.page.live:
buttons.append(self.get_compare_with_live_message_button())
messages.warning(
self.request,
_("This page is currently awaiting moderation"),
buttons=buttons,
)
def add_save_confirmation_message(self):
if self.is_reverting:
message = _("Page '{0}' has been replaced with version from {1}.").format(
self.page.get_admin_display_title(),
self.previous_revision.created_at.strftime("%d %b %Y %H:%M"),
)
else:
message = _("Page '{0}' has been updated.").format(
self.page.get_admin_display_title()
)
messages.success(self.request, message)
def get_commenting_changes(self):
"""
Finds comments that have been changed during this request.
Returns a tuple of 5 lists:
- New comments
- Deleted comments
- Resolved comments
- Edited comments
- Replied comments (dict containing the instance and list of replies)
"""
# Get changes
comments_formset = self.form.formsets["comments"]
new_comments = comments_formset.new_objects
deleted_comments = comments_formset.deleted_objects
# Assume any changed comments that are resolved were only just resolved
resolved_comments = []
edited_comments = []
for changed_comment, changed_fields in comments_formset.changed_objects:
if changed_comment.resolved_at and "resolved" in changed_fields:
resolved_comments.append(changed_comment)
if "text" in changed_fields:
edited_comments.append(changed_comment)
new_replies = []
deleted_replies = []
edited_replies = []
for comment_form in comments_formset.forms:
# New
replies = getattr(comment_form.formsets["replies"], "new_objects", [])
if replies:
new_replies.append((comment_form.instance, replies))
# Deleted
replies = getattr(comment_form.formsets["replies"], "deleted_objects", [])
if replies:
deleted_replies.append((comment_form.instance, replies))
# Edited
replies = getattr(comment_form.formsets["replies"], "changed_objects", [])
replies = [
reply for reply, changed_fields in replies if "text" in changed_fields
]
if replies:
edited_replies.append((comment_form.instance, replies))
return {
"new_comments": new_comments,
"deleted_comments": deleted_comments,
"resolved_comments": resolved_comments,
"edited_comments": edited_comments,
"new_replies": new_replies,
"deleted_replies": deleted_replies,
"edited_replies": edited_replies,
}
def send_commenting_notifications(self, changes):
"""
Sends notifications about any changes to comments to anyone who is subscribed.
"""
relevant_comment_ids = []
relevant_comment_ids.extend(
comment.pk for comment in changes["resolved_comments"]
)
relevant_comment_ids.extend(
comment.pk for comment, replies in changes["new_replies"]
)
# Skip if no changes were made
# Note: We don't email about edited comments so ignore those here
if (
not changes["new_comments"]
and not changes["deleted_comments"]
and not changes["resolved_comments"]
and not changes["new_replies"]
):
return
# Get global page comment subscribers
subscribers = PageSubscription.objects.filter(
page=self.page, comment_notifications=True
).select_related("user")
global_recipient_users = [
subscriber.user
for subscriber in subscribers
if subscriber.user != self.request.user
]
# Get subscribers to individual threads
replies = CommentReply.objects.filter(comment_id__in=relevant_comment_ids)
comments = Comment.objects.filter(id__in=relevant_comment_ids)
thread_users = (
get_user_model()
.objects.exclude(pk=self.request.user.pk)
.exclude(pk__in=subscribers.values_list("user_id", flat=True))
.filter(
Q(comment_replies__comment_id__in=relevant_comment_ids)
| Q(**{("%s__pk__in" % COMMENTS_RELATION_NAME): relevant_comment_ids})
)
.prefetch_related(
Prefetch("comment_replies", queryset=replies),
Prefetch(COMMENTS_RELATION_NAME, queryset=comments),
)
)
# Skip if no recipients
if not (global_recipient_users or thread_users):
return
thread_users = [
(
user,
set(
list(user.comment_replies.values_list("comment_id", flat=True))
+ list(
getattr(user, COMMENTS_RELATION_NAME).values_list(
"pk", flat=True
)
)
),
)
for user in thread_users
]
mailed_users = set()
for current_user, current_threads in thread_users:
# We are trying to avoid calling send_notification for each user for performance reasons
# so group the users receiving the same thread notifications together here
if current_user in mailed_users:
continue
users = [current_user]
mailed_users.add(current_user)
for user, threads in thread_users:
if user not in mailed_users and threads == current_threads:
users.append(user)
mailed_users.add(user)
send_notification(
users,
"updated_comments",
{
"page": self.page,
"editor": self.request.user,
"new_comments": [
comment
for comment in changes["new_comments"]
if comment.pk in threads
],
"resolved_comments": [
comment
for comment in changes["resolved_comments"]
if comment.pk in threads
],
"deleted_comments": [],
"replied_comments": [
{
"comment": comment,
"replies": replies,
}
for comment, replies in changes["new_replies"]
if comment.pk in threads
],
},
)
return send_notification(
global_recipient_users,
"updated_comments",
{
"page": self.page,
"editor": self.request.user,
"new_comments": changes["new_comments"],
"resolved_comments": changes["resolved_comments"],
"deleted_comments": changes["deleted_comments"],
"replied_comments": [
{
"comment": comment,
"replies": replies,
}
for comment, replies in changes["new_replies"]
],
},
)
def log_commenting_changes(self, changes, revision):
"""
Generates log entries for any changes made to comments or replies.
"""
for comment in changes["new_comments"]:
comment.log_create(page_revision=revision, user=self.request.user)
for comment in changes["edited_comments"]:
comment.log_edit(page_revision=revision, user=self.request.user)
for comment in changes["resolved_comments"]:
comment.log_resolve(page_revision=revision, user=self.request.user)
for comment in changes["deleted_comments"]:
comment.log_delete(page_revision=revision, user=self.request.user)
for comment, replies in changes["new_replies"]:
for reply in replies:
reply.log_create(page_revision=revision, user=self.request.user)
for comment, replies in changes["edited_replies"]:
for reply in replies:
reply.log_edit(page_revision=revision, user=self.request.user)
for comment, replies in changes["deleted_replies"]:
for reply in replies:
reply.log_delete(page_revision=revision, user=self.request.user)
def get_edit_message_button(self):
return messages.button(
reverse("wagtailadmin_pages:edit", args=(self.page.id,)), _("Edit")
)
def get_view_draft_message_button(self):
return messages.button(
reverse("wagtailadmin_pages:view_draft", args=(self.page.id,)),
_("View draft"),
new_window=False,
)
def get_view_live_message_button(self):
return messages.button(self.page.url, _("View live"), new_window=False)
def get_compare_with_live_message_button(self):
return messages.button(
reverse(
"wagtailadmin_pages:revisions_compare",
args=(self.page.id, "live", self.latest_revision.id),
),
_("Compare with live version"),
)
def get_page_for_status(self):
if self.page.live and self.page.has_unpublished_changes:
# Page status needs to present the version of the page containing the correct live URL
return self.real_page_record.specific
else:
return self.page
def dispatch(self, request, page_id):
self.real_page_record = get_object_or_404(Page, id=page_id)
self.latest_revision = self.real_page_record.get_latest_revision()
self.page_content_type = self.real_page_record.cached_content_type
self.page_class = self.real_page_record.specific_class
if self.page_class is None:
raise PageClassNotFoundError(
f"The page '{self.real_page_record}' cannot be edited because the "
f"model class used to create it ({self.page_content_type.app_label}."
f"{self.page_content_type.model}) can no longer be found in the codebase. "
"This usually happens as a result of switching between git "
"branches without running migrations to trigger the removal of "
"unused ContentTypes. To edit the page, you will need to switch "
"back to a branch where the model class is still present."
)
self.page = self.real_page_record.get_latest_revision_as_page()
self.parent = self.page.get_parent()
self.page_perms = self.page.permissions_for_user(self.request.user)
if not self.page_perms.can_edit():
raise PermissionDenied
self.next_url = get_valid_next_url_from_request(self.request)
response = self.run_hook("before_edit_page", self.request, self.page)
if response:
return response
try:
self.subscription = PageSubscription.objects.get(
page=self.page, user=self.request.user
)
except PageSubscription.DoesNotExist:
self.subscription = PageSubscription(
page=self.page, user=self.request.user, comment_notifications=False
)
self.edit_handler = self.page_class.get_edit_handler()
self.form_class = self.edit_handler.get_form_class()
if getattr(settings, "WAGTAIL_WORKFLOW_ENABLED", True):
# Retrieve current workflow state if set, default to last workflow state
self.workflow_state = (
self.page.current_workflow_state
or self.page.workflow_states.order_by("created_at").last()
)
else:
self.workflow_state = None
if self.workflow_state:
self.workflow_tasks = self.workflow_state.all_tasks_with_status()
else:
self.workflow_tasks = []
self.errors_debug = None
return super().dispatch(request)
def get(self, request):
if self.page_perms.user_has_lock():
if self.page.locked_at:
lock_message = format_html(
_("<b>Page '{}' was locked</b> by <b>you</b> on <b>{}</b>."),
self.page.get_admin_display_title(),
self.page.locked_at.strftime("%d %b %Y %H:%M"),
)
else:
lock_message = format_html(
_("<b>Page '{}' is locked</b> by <b>you</b>."),
self.page.get_admin_display_title(),
)
lock_message += format_html(
'<span class="buttons"><button type="button" class="button button-small button-secondary" data-action-lock-unlock data-url="{}">{}</button></span>',
reverse("wagtailadmin_pages:unlock", args=(self.page.id,)),
_("Unlock"),
)
messages.warning(self.request, lock_message, extra_tags="lock")
elif self.page.locked and self.page_perms.page_locked():
# the page can also be locked at a permissions level if in a workflow, on a task the user is not a reviewer for
# this should be indicated separately
if self.page.locked_by and self.page.locked_at:
lock_message = format_html(
_("<b>Page '{}' was locked</b> by <b>{}</b> on <b>{}</b>."),
self.page.get_admin_display_title(),
str(self.page.locked_by),
self.page.locked_at.strftime("%d %b %Y %H:%M"),
)
else:
# Page was probably locked with an old version of Wagtail, or a script
lock_message = format_html(
_("<b>Page '{}' is locked</b>."),
self.page.get_admin_display_title(),
)
if self.page_perms.can_unlock():
lock_message += format_html(
'<span class="buttons"><button type="button" class="button button-small button-secondary" data-action-lock-unlock data-url="{}">{}</button></span>',
reverse("wagtailadmin_pages:unlock", args=(self.page.id,)),
_("Unlock"),
)
messages.error(self.request, lock_message, extra_tags="lock")
if self.page.current_workflow_state:
workflow = self.workflow_state.workflow
task = self.workflow_state.current_task_state.task
if (
self.workflow_state.status != WorkflowState.STATUS_NEEDS_CHANGES
and task.specific.page_locked_for_user(self.page, self.request.user)
):
# Check for revisions still undergoing moderation and warn
if len(self.workflow_tasks) == 1:
# If only one task in workflow, show simple message
workflow_info = _("This page is currently awaiting moderation.")
else:
workflow_info = format_html(
_(
"This page is awaiting <b>'{}'</b> in the <b>'{}'</b> workflow."
),
task.name,
workflow.name,
)
messages.error(
self.request,
mark_safe(
workflow_info
+ " "
+ _("Only reviewers for this task can edit the page.")
),
extra_tags="lock",
)
self.form = self.form_class(
instance=self.page,
subscription=self.subscription,
parent_page=self.parent,
for_user=self.request.user,
)
self.has_unsaved_changes = False
self.add_legacy_moderation_warning()
self.page_for_status = self.get_page_for_status()
return self.render_to_response(self.get_context_data())
def add_cancel_workflow_confirmation_message(self):
message = _("Workflow on page '{0}' has been cancelled.").format(
self.page.get_admin_display_title()
)
messages.success(
self.request,
message,
buttons=[
self.get_view_draft_message_button(),
self.get_edit_message_button(),
],
)
def post(self, request):
# Don't allow POST requests if the page is an alias
if self.page.alias_of_id:
# Return 405 "Method Not Allowed" response
return HttpResponse(status=405)
self.form = self.form_class(
self.request.POST,
self.request.FILES,
instance=self.page,
subscription=self.subscription,
parent_page=self.parent,
for_user=self.request.user,
)
self.is_cancelling_workflow = (
bool(self.request.POST.get("action-cancel-workflow"))
and self.workflow_state
and self.workflow_state.user_can_cancel(self.request.user)
)
if self.form.is_valid() and not self.page_perms.page_locked():
return self.form_valid(self.form)
else:
return self.form_invalid(self.form)
def workflow_action_is_valid(self):
self.workflow_action = self.request.POST["workflow-action-name"]
available_actions = self.page.current_workflow_task.get_actions(
self.page, self.request.user
)
available_action_names = [
name for name, verbose_name, modal in available_actions
]
return self.workflow_action in available_action_names
def form_valid(self, form):
self.is_reverting = bool(self.request.POST.get("revision"))
# If a revision ID was passed in the form, get that revision so its
# date can be referenced in notification messages
if self.is_reverting:
self.previous_revision = get_object_or_404(
self.page.revisions, id=self.request.POST.get("revision")
)
self.has_content_changes = self.form.has_changed()
if self.request.POST.get("action-publish") and self.page_perms.can_publish():
return self.publish_action()
elif (
self.request.POST.get("action-submit")
and self.page_perms.can_submit_for_moderation()
):
return self.submit_action()
elif (
self.request.POST.get("action-restart-workflow")
and self.page_perms.can_submit_for_moderation()
and self.workflow_state
and self.workflow_state.user_can_cancel(self.request.user)
):
return self.restart_workflow_action()
elif (
self.request.POST.get("action-workflow-action")
and self.workflow_action_is_valid()
):
return self.perform_workflow_action()
elif self.is_cancelling_workflow:
return self.cancel_workflow_action()
else:
return self.save_action()
def save_action(self):
self.page = self.form.save(commit=False)
self.subscription.save()
# Save revision
revision = self.page.save_revision(
user=self.request.user,
log_action=True, # Always log the new revision on edit
previous_revision=(self.previous_revision if self.is_reverting else None),
)
self.add_save_confirmation_message()
if self.has_content_changes and "comments" in self.form.formsets:
changes = self.get_commenting_changes()
self.log_commenting_changes(changes, revision)
self.send_commenting_notifications(changes)
response = self.run_hook("after_edit_page", self.request, self.page)
if response:
return response
# Just saving - remain on edit page for further edits
return self.redirect_and_remain()
def publish_action(self):
self.page = self.form.save(commit=False)
self.subscription.save()
# Save revision
revision = self.page.save_revision(
user=self.request.user,
log_action=True, # Always log the new revision on edit
previous_revision=(self.previous_revision if self.is_reverting else None),
)
# store submitted go_live_at for messaging below
go_live_at = self.page.go_live_at
response = self.run_hook("before_publish_page", self.request, self.page)
if response:
return response
action = PublishPageRevisionAction(
revision,
user=self.request.user,
changed=self.has_content_changes,
previous_revision=(self.previous_revision if self.is_reverting else None),
)
action.execute(skip_permission_checks=True)
if self.has_content_changes and "comments" in self.form.formsets:
changes = self.get_commenting_changes()
self.log_commenting_changes(changes, revision)
self.send_commenting_notifications(changes)
# Need to reload the page because the URL may have changed, and we
# need the up-to-date URL for the "View Live" button.
self.page = self.page.specific_class.objects.get(pk=self.page.pk)
response = self.run_hook("after_publish_page", self.request, self.page)
if response:
return response
# Notifications
if go_live_at and go_live_at > timezone.now():
# Page has been scheduled for publishing in the future
if self.is_reverting:
message = _(
"Version from {0} of page '{1}' has been scheduled for publishing."
).format(
self.previous_revision.created_at.strftime("%d %b %Y %H:%M"),
self.page.get_admin_display_title(),
)
else:
if self.page.live:
message = _(
"Page '{0}' is live and this version has been scheduled for publishing."
).format(self.page.get_admin_display_title())
else:
message = _("Page '{0}' has been scheduled for publishing.").format(
self.page.get_admin_display_title()
)
messages.success(
self.request, message, buttons=[self.get_edit_message_button()]
)
else:
# Page is being published now
if self.is_reverting:
message = _(
"Version from {0} of page '{1}' has been published."
).format(
self.previous_revision.created_at.strftime("%d %b %Y %H:%M"),
self.page.get_admin_display_title(),
)
else:
message = _("Page '{0}' has been published.").format(
self.page.get_admin_display_title()
)
buttons = []
if self.page.url is not None:
buttons.append(self.get_view_live_message_button())
buttons.append(self.get_edit_message_button())
messages.success(self.request, message, buttons=buttons)
response = self.run_hook("after_edit_page", self.request, self.page)
if response:
return response
# we're done here - redirect back to the explorer
return self.redirect_away()
def submit_action(self):
self.page = self.form.save(commit=False)
self.subscription.save()
# Save revision
revision = self.page.save_revision(
user=self.request.user,
log_action=True, # Always log the new revision on edit
previous_revision=(self.previous_revision if self.is_reverting else None),
)
if self.has_content_changes and "comments" in self.form.formsets:
changes = self.get_commenting_changes()
self.log_commenting_changes(changes, revision)
self.send_commenting_notifications(changes)
if (
self.workflow_state
and self.workflow_state.status == WorkflowState.STATUS_NEEDS_CHANGES
):
# If the workflow was in the needs changes state, resume the existing workflow on submission
self.workflow_state.resume(self.request.user)
else:
# Otherwise start a new workflow
workflow = self.page.get_workflow()
workflow.start(self.page, self.request.user)
message = _("Page '{0}' has been submitted for moderation.").format(
self.page.get_admin_display_title()
)
messages.success(
self.request,
message,
buttons=[
self.get_view_draft_message_button(),
self.get_edit_message_button(),
],
)
response = self.run_hook("after_edit_page", self.request, self.page)
if response:
return response
# we're done here - redirect back to the explorer
return self.redirect_away()
def restart_workflow_action(self):
self.page = self.form.save(commit=False)
self.subscription.save()
# save revision
revision = self.page.save_revision(
user=self.request.user,
log_action=True, # Always log the new revision on edit
previous_revision=(self.previous_revision if self.is_reverting else None),
)
if self.has_content_changes and "comments" in self.form.formsets:
changes = self.get_commenting_changes()
self.log_commenting_changes(changes, revision)
self.send_commenting_notifications(changes)
# cancel workflow
self.workflow_state.cancel(user=self.request.user)
# start new workflow
workflow = self.page.get_workflow()
workflow.start(self.page, self.request.user)
message = _("Workflow on page '{0}' has been restarted.").format(
self.page.get_admin_display_title()
)
messages.success(
self.request,
message,
buttons=[
self.get_view_draft_message_button(),
self.get_edit_message_button(),
],
)
response = self.run_hook("after_edit_page", self.request, self.page)
if response:
return response
# we're done here - redirect back to the explorer
return self.redirect_away()
def perform_workflow_action(self):
self.page = self.form.save(commit=False)
self.subscription.save()
if self.has_content_changes:
# Save revision
revision = self.page.save_revision(
user=self.request.user,
log_action=True, # Always log the new revision on edit
previous_revision=(
self.previous_revision if self.is_reverting else None
),
)
if "comments" in self.form.formsets:
changes = self.get_commenting_changes()
self.log_commenting_changes(changes, revision)
self.send_commenting_notifications(changes)
extra_workflow_data_json = self.request.POST.get(
"workflow-action-extra-data", "{}"
)
extra_workflow_data = json.loads(extra_workflow_data_json)
self.page.current_workflow_task.on_action(
self.page.current_workflow_task_state,
self.request.user,
self.workflow_action,
**extra_workflow_data,
)
self.add_save_confirmation_message()
response = self.run_hook("after_edit_page", self.request, self.page)
if response:
return response
# we're done here - redirect back to the explorer
return self.redirect_away()
def cancel_workflow_action(self):
self.workflow_state.cancel(user=self.request.user)
self.page = self.form.save(commit=False)
self.subscription.save()
# Save revision
revision = self.page.save_revision(
user=self.request.user,
log_action=True, # Always log the new revision on edit
previous_revision=(self.previous_revision if self.is_reverting else None),
)
if self.has_content_changes and "comments" in self.form.formsets:
changes = self.get_commenting_changes()
self.log_commenting_changes(changes, revision)
self.send_commenting_notifications(changes)
# Notifications
self.add_cancel_workflow_confirmation_message()
response = self.run_hook("after_edit_page", self.request, self.page)
if response:
return response
# Just saving - remain on edit page for further edits
return self.redirect_and_remain()
def redirect_away(self):
if self.next_url:
# redirect back to 'next' url if present
return redirect(self.next_url)
else:
# redirect back to the explorer
return redirect("wagtailadmin_explore", self.page.get_parent().id)
def redirect_and_remain(self):
target_url = reverse("wagtailadmin_pages:edit", args=[self.page.id])
if self.next_url:
# Ensure the 'next' url is passed through again if present
target_url += "?next=%s" % quote(self.next_url)
return redirect(target_url)
def form_invalid(self, form):
# even if the page is locked due to not having permissions, the original submitter can still cancel the workflow
if self.is_cancelling_workflow:
self.workflow_state.cancel(user=self.request.user)
self.add_cancel_workflow_confirmation_message()
if self.page_perms.page_locked():
messages.error(
self.request, _("The page could not be saved as it is locked")
)
else:
messages.validation_error(
self.request,
_("The page could not be saved due to validation errors"),
self.form,
)
self.errors_debug = repr(self.form.errors) + repr(
[
(name, formset.errors)
for (name, formset) in self.form.formsets.items()
if formset.errors
]
)
self.has_unsaved_changes = True
self.add_legacy_moderation_warning()
self.page_for_status = self.get_page_for_status()
return self.render_to_response(self.get_context_data())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
bound_panel = self.edit_handler.get_bound_panel(
instance=self.page, request=self.request, form=self.form
)
action_menu = PageActionMenu(self.request, view="edit", page=self.page)
side_panels = PageSidePanels(
self.request,
self.page_for_status,
comments_enabled=self.form.show_comments_toggle,
)
context.update(
{
"page": self.page,
"page_for_status": self.page_for_status,
"content_type": self.page_content_type,
"edit_handler": bound_panel,
"errors_debug": self.errors_debug,
"action_menu": action_menu,
"side_panels": side_panels,
"preview_modes": self.page.preview_modes,
"form": self.form,
"next": self.next_url,
"has_unsaved_changes": self.has_unsaved_changes,
"page_locked": self.page_perms.page_locked(),
"workflow_state": self.workflow_state
if self.workflow_state and self.workflow_state.is_active
else None,
"current_task_state": self.page.current_workflow_task_state,
"publishing_will_cancel_workflow": self.workflow_tasks
and getattr(settings, "WAGTAIL_WORKFLOW_CANCEL_ON_PUBLISH", True),
"locale": None,
"translations": [],
"media": bound_panel.media
+ self.form.media
+ action_menu.media
+ side_panels.media,
}
)
if getattr(settings, "WAGTAIL_I18N_ENABLED", False):
user_perms = UserPagePermissionsProxy(self.request.user)
context.update(
{
"locale": self.page.locale,
"translations": [
{
"locale": translation.locale,
"url": reverse(
"wagtailadmin_pages:edit", args=[translation.id]
),
}
for translation in self.page.get_translations()
.only("id", "locale", "depth")
.select_related("locale")
if user_perms.for_page(translation).can_edit()
],
}
)
return context
|
py | b40b67c6be9984a162c1a28c7f3bb00353f614cb | """ tasks for chapter 00 The Function """
from random import randint
def minInWeek():
"""
task 0.5.1
return the number of minutes in a week
"""
return 60 * 24 * 7
def rem(number=2304811, divisor=47):
"""
task 0.5.2
"""
return number - (
(number // divisor) * divisor
)
def testIfSumIsDivisable(numA=673, numB=909, div=3):
"""
task 0.5.3
return true if the sum of two numbers is divisible by gin divisor
"""
return (numA + numB) % div == 0
def perdict(x=-9, y=None, guess=10):
"""
task 0.5.4
pass x and y into equation and guess the value
"""
testY = float(float(1) / float(2))
if y is None:
y = testY
if 2**(y + testY) if x + 10 < 0 else 2**(y - testY) == guess:
return True
else:
return False
def compFirstFivePosInt(iset={1, 2, 3, 4, 5}):
"""
task 0.5.5
a comprehension over the given set whose value is the set consisting
of the squares of the first five positive integers
"""
return {2**x for x in iset}
def compFirstFivePowOf2(iset={0, 1, 2, 3, 4}):
"""
task 0.5.6
a comprehension over the given set whose value is the set consisting
of the first five powers of two, starting with 2**0
"""
return {2**x for x in iset}
def nineElementSetComp(setA={1, 2, 3}, setB={2, 3, 4}):
"""
task 0.5.7
"""
tmp = {x * y for x in setA for y in setB}
result = False
if len(tmp) == 9:
result = True
return str(result) + ' : ' + str(tmp)
def overlapThreeElemSets(setA={1, 2, 3}, setB={1, 2, 3}):
"""
task 0.5.8
use two disjoint three element sets so that the value
becomes a five element set
"""
tmp = {x * y for x in setA for y in setB}
result = False
if len(tmp) == 5:
result = True
return str(result) + ' : ' + str(tmp)
def compOverValueTwoSets(setA={1, 2, 3, 4}, setB={3, 4, 5, 6}):
"""
task 0.5.9
comprehension whose value is the intersection of setA and setB
without using the '&' operator
"""
return {x for x in (setA | setB) if x in setA and x in setB}
def expressValAvgList(i=[20, 10, 15, 75]):
"""
task 0.5.10
expression whose value is the avg. of the elements in the list i
"""
return sum(i) / len(i)
def doubleListComp(listA=['A', 'B', 'C'], listB=[1, 2, 3]):
"""
task 0.5.11
write a double list comprehension over the two given lists
"""
return [x for x in [[y] + [z] for y in listA for z in listB]]
def evalSumLists(lists=[[.25, .75, .1], [-1, 0], [4, 4, 4, 4]]):
"""
task 0.5.12
sum lists of lists
"""
return sum([sum(x) for x in lists])
def checkLftToRht(l=[0, 1, 2, 3], spliceA=2, spliceB=9):
"""
task 0.5.13
show what happens when the len. of the left hand side list
does not match the len. of the right hand side
"""
return l[spliceA:spliceB]
def compValListOfTuples(s={-4, -2, 1, 2, 5, 0}):
"""
task 0.5.14 && 0.5.15 && 0.5.16
a triple comprehension whose value is a list of all three element tuples
that are elements of the set 's' whose sum is zero
"""
resA = [(i, j, k) for i in s for j in s for k in s if i + j + k == 0]
resB = [(i, j, k) for i in s for j in s for k in s if i + j + k == 0 and len({i, j, k}) > 1]
print resA
print resB
print resB[0]
def exListLenDiff(l=[1, 1]):
"""
task 0.5.17
example of a list 'l' whose len(l) and len(list(set(l))) are different
"""
if len(l) == len(list(set(l))):
return True
else:
return False
def compRangeValOddNum(r=100):
"""
task 0.5.18
comprehension over a range whose value is the set
of odd numbers from 1 to 99
"""
return {x for x in range(r) if x % 2 > 0}
# return range(1, 100, 2)
def assignFirstFive(l=['A', 'B', 'C', 'D', 'E']):
"""
task 0.5.19
use the list 'l' in and express that uses range and zip, but not using a comprehension
"""
return list(zip(range(len(l)), l))
def compListSumTen(lA=[10, 25, 40], lB=[1, 15, 20]):
"""
task 0.5.20
comprehension whose value is a three element list in which the first
element is the sum of 10 and 1, second is the sum of 25 and 15,
and the third is the sum of 40 and 20
use zip, but not list
"""
return [sum(x) for x in zip(lA, lB)]
def compListDict(i=0):
"""
task 0.5.21 && 0.5.22
a comprehension that eval. to the list whose 'ith' element is the value
corresponding to key 'k' in the 'ith' dict.
"""
data = [
{'james': 'Sean', 'director': 'Terence'},
{'James': 'Roger', 'director': 'Lewis'},
{'James': 'Pierce', 'director': 'Roger'},
{'Bilbo': 'Baggins', 'director': 'Richard'}
]
return [[i for i,j in [d.keys() for d in data]] + [j for i,j in [d.keys() for d in data]]]
#return [data[i][x] for x in data[i].keys()]
def compRangeDict():
"""
task 0.5.23
comprehension whose value is sa dict. keys should be integers from 0 - 99
and the val. equal to the square of a key
"""
return {i:i**2 for i in range(0, 100)}
def compEvalDict(data={'red', 'white', 'blue'}):
"""
task 0.5.24
eval. a dict. that represents the identity function
"""
return {v:v is v for v in data}
def compDictMaps(base=10):
"""
task 0.5.25
dict. comprehension that maps each integer in the reange between the provided base
to the list of three digits that represents each integer
"""
return {i:v for i,v in enumerate([[x, y, z] for x in range(base) for y in range(base) for z in range(base)])}
def compDictValDistinct(
id2salary={0:1000.0, 3:990, 1:1200.50},
names=['Larry', 'Curly', '', 'Moe']
):
"""
task 0.5.26
comprehension whose value is a dict. mapping of employee names to salaries that are distinct
"""
return {name:id2salary[i] for i, name in enumerate(names) if i in id2salary.keys()}
def twice(z):
"""
task 0.5.27
implement one line procedures def twice(z): return 2*z
"""
return 2*z
def nextInts(l=[1, 5, 7]):
"""
task 0.5.28
implement one line procedure taking list of integers 'l'
output should be list of integers whose 'i'th element is one more then the 'i'th element of 'l'
e.g. input[1, 5, 7] output[2, 6, 8]
def nextInts(l=[1, 5, 7]): return [i + 1 for i in l]
"""
return [i + 1 for i in l]
def cubes(l=[1, 2, 3]):
"""
task 0.5.29
implement one line procedure taking list of integers 'l'
output should be a list of numbers whose 'i'th element is the cube of the 'i'th element of 'l'
e.g. input[1, 2, 3] output[1, 8, 27]
def cubes(l=[1, 2, 3]): return [i ** 3 for i in l]
"""
return [i ** 3 for i in l]
def dict2list(
d={'a':'A', 'b':'B', 'c':'C'},
keylist=['b', 'c', 'a']
):
"""
task 0.5.30 && task 0.6.3
implement one line procedure taking a dictionary and a keylist
output should be a list of the dict keylist values
e.g. input dict={'a':'A', 'b':'B', 'c':'C'} keylist=['b', 'c', 'a'] output=['B', 'C', 'A']
def dict2list(d={'a':'A', 'b':'B', 'c':'C'}, keylist=['b', 'c', 'a']): return [d[key] for key in keylist]
"""
return [d[key] for key in keylist]
def list2dict(
l=['A', 'B', 'C'],
keylist=['a', 'b', 'c']
):
"""
task 0.5.31 && task 0.6.3
implement one line procedure taking a list of letters and a keylist
output should be a dict. that maps them together
e.g. input l=['A', 'B', 'C'] keylist=['a', 'b', 'c'] output={'a':'A', 'b':'B', 'c':'C'}
def list2dict(l=['A', 'B', 'C'], keylist=['a', 'b', 'c']): return {k:l[i] for i,k in enumerate(keylist)}
"""
return {k:l[i] for i,k in enumerate(keylist)}
def all_3_digit_numbers(
base=2,
digits={0, 1}
):
"""
task 0.5.32 ### not really sure what it's aking for here....
implement one line procedure taking a base and a set of digits
output should be the set of all three digit numbers where the base is base
e.g. input base=2 digits={0, 1} output={0, 1, 2, 3, 4, 5, 6, 7}
def all_3_digit_numbers(base=2, digits={0, 1}):
"""
return {}
def mathExample():
"""
task 0..6.1
import the math module and check out it's help module
"""
import math
help(math)
def movieReview():
"""
task 0.6.2
take a movie title as a string for input
return a random movie review as a string
"""
REVIEWS = [
'See It!',
'A gem!',
'Ideological claptrap!',
'Crapola',
'Beautiful'
]
return REVIEWS[
randint(
0,
(len(REVIEWS) - 1)
)
]
def makeInverseIndex(strList=[
'dont put off what can be done today',
'today is not tomorrow'
]):
"""
task 0.6.6
input is a given list of words making up a document
output should be dictionary that maps each word to the set consisting
of the document numbers of documents in which that word appears, an inverse index
"""
docs = {i:set(doc.split()) for i, doc in enumerate(strList)}
words = set()
for i in docs: words = words | docs[i]
response = {}
for word in words:
response[word] = {}
for key in docs:
response[word][key] = True if word in docs[key] else False
return response
def orSearch(
inverseIndex=makeInverseIndex(),
query=['is', 'today']
):
"""
task 0.6.7
return the set of document numbers specifying all documents that contain any of the words in query
"""
return {word:inverseIndex[word] for word in inverseIndex.keys() if word in query}
def andSearch(
inverseIndex=makeInverseIndex(),
query=set(['is', 'today'])
):
"""
task 0.6.8
return the set of document numbers specifying all documents that contain all of the words in query
"""
result = None
tmpA = set()
for word in query:
print word
if word not in inverseIndex:
break
tmpB = set()
for word_doc_id in inverseIndex[word]:
if inverseIndex[word][word_doc_id]:
tmpB.add(word_doc_id)
print tmpB
if len(tmpA) == 0:
tmpA = set(tmpB)
for t in tmpA:
if t not in tmpB:
tmpA.remove(t)
print 'tmpA after word: ' + str(word) + ' : ' + str(tmpA)
print 'tmpA final: ' + str(tmpA)
if len(tmpA) > 0:
result = tmpA
print result
|
py | b40b6870cd2b7e930a6331832ceb5c023e4ab20b | '''
Aluno: Marcelo Araújo dos Santos
Matrícula: 16/0035481
'''
from dijkstar import Graph, find_path
import tkinter as tk
import math
canvas_height=310
canvas_width=310
raio = 5
rowInterac = 3
cidades = {}
rodovias = {}
graph = Graph()
master = tk.Tk()
master.title('mapa')
master.geometry('800x700')
w = tk.Canvas(master, width=canvas_width, height=canvas_height)
class Edge:
def __init__(self, nome, x, y):
self.nome = nome
self.x = x
self.y = y
'''
pesos:
sp = situacaoPista (0 a 10), 10 não há como passar, 0 pista tá perfeita
pp = precoPedagios
tv = tempoDeViagem (distancia / velocidadeMax)
p = perigo (0 a 10), 0 = sem perigo
vão de 0 a 5
'''
def create_circle(x, y, r, color, canvasName): #center coordinates, radius
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvasName.create_oval(x0, y0, x1, y1, fill=color)
def pintarMapa(cidades, rodovias, master, w, result=None):
for r in rodovias:
#print(r)
c1, c2 = r.split(',')
w.create_line(cidades[c1].x, cidades[c1].y, cidades[c2].x, cidades[c2].y )
for c in cidades:
create_circle(cidades[c].x, cidades[c].y, raio, "blue", w)
w.create_text(cidades[c].x,cidades[c].y-10,fill="darkblue",
text=cidades[c].nome)
#w.Label(master, text=cidades[c].nome, width=cidades[c].x, height=cidades[c].y+5)
if result != None:
length = len(result)
tam = 0
for i in range(length-1):
c1, c2 = result[i], result[i+1]
#==
'''print(c1.nome)
str = "{},{}".format(c1.nome, c2.nome)
print(rodovias[str])'''
'''p = x.replace('\n', '').split(', ')
ei = cidades[p[0]]
ef = cidades[p[1]]
dist = math.sqrt( (ef.x-ei.x)**2 + (ef.y-ei.y)**2 )
tempoDeViagem = dist / int(p[5])
infoEdge = {
'situacaoPista':int(p[2]),
'precoPedagios':float(p[3]),
'perigo':int(p[4]),
'tempoDeViagem':tempoDeViagem
}
rodovias["{},{}".format(p[0], p[1])] = infoEdge'''
w.create_line(cidades[c1].x, cidades[c1].y, cidades[c2].x, cidades[c2].y, fill="red")
return 0
def execButton(graph, cost_func, inicio, fim, textRes):
if(inicio.get() == "" or fim.get() == ""):
textRes.config(text="preencha os dois campos de cidade inicio e fim")
else:
try:
path = find_path(graph, inicio.get(), fim.get(), cost_func=cost_func)
#print("veja: ",path)
caminho = path.nodes
texto = "de '{}' ate '{}' o menor caminho eh {} com custo de {}km".format(caminho[0], caminho[-1], caminho, int(path.total_cost*100)/100)
print(texto)
textRes.config(text=texto)
pintarMapa(cidades, rodovias, master, w, path.nodes)
except:
textRes.config(text="nao foi possivel achar caminho de '{}' ate '{}'".format(inicio.get(), fim.get()))
def main():
w.grid(row=0,column=0)
texti = tk.Label(master, text='inicio')
texti.grid(row=1, column=0)#, columnspan=1, rowspan=1) # 0, 0
textf = tk.Label(master, text='fim')
textf.grid(row=2, column=0)#, columnspan=1, rowspan=2) # 0, 1
textRes = tk.Label(master, text='----')
textRes.grid(row=rowInterac, column=1)
inicio = tk.Entry(master)
inicio.grid(row=1, column=1)#, columnspan=2, rowspan=1) # 1, 0
fim = tk.Entry(master)
fim.grid(row=2, column=1)#, columnspan=2, rowspan=2) # 1, 1
button = tk.Button(master, text='calcular caminho', width=25,
command=lambda: execButton(graph, cost_func, inicio, fim, textRes))
button.grid(row=rowInterac,column=0)
fc = open("cidades.txt","r")
fr = open("rodovias.txt","r")
# lendo as cidades (nodes) e guardando
fl =fc.readlines()
for x in fl:
c = x.replace('\n', '').split(', ')
cidades[c[0]] = Edge(c[0], int(c[1]), int(c[2]))
fc.close
# lendo as rodovias (edges) e guardando no grafo
fl =fr.readlines()
for x in fl:
p = x.replace('\n', '').split(', ')
ei = cidades[p[0]]
ef = cidades[p[1]]
dist = math.sqrt( (ef.x-ei.x)**2 + (ef.y-ei.y)**2 )
tempoDeViagem = dist / int(p[5])
infoEdge = {
'situacaoPista':int(p[2]),
'precoPedagios':float(p[3]),
'perigo':int(p[4]),
'tempoDeViagem':tempoDeViagem,
'distancia':dist
}
rodovias["{},{}".format(p[0], p[1])] = infoEdge
graph.add_edge(p[0], p[1], infoEdge)
graph.add_edge(p[1], p[0], infoEdge)
fr.close()
pesos = {'sp': 3, 'pp': 3, 'tv': 3, 'p': 3}
cost_func = lambda u, v, e, prev_e: e['distancia']#e['situacaoPista'] + e['precoPedagios']+ e['perigo'] + e['tempoDeViagem']
#path = find_path(graph, 'a', 'z', cost_func=cost_func)
#print(path.nodes)
pintarMapa(cidades, rodovias, master, w)
master.mainloop()
if __name__== "__main__":
main() |
py | b40b690388ad5b87eef68650aec66117c4ea3dde | ''' Inception utilities
This file contains methods for calculating IS and FID, using either
the original numpy code or an accelerated fully-pytorch version that
uses a fast newton-schulz approximation for the matrix sqrt. There are also
methods for acquiring a desired number of samples from the Generator,
and parallelizing the inbuilt PyTorch inception network.
NOTE that Inception Scores and FIDs calculated using these methods will
*not* be directly comparable to values calculated using the original TF
IS/FID code. You *must* use the TF model if you wish to report and compare
numbers. This code tends to produce IS values that are 5-10% lower than
those obtained through TF.
'''
import numpy as np
from scipy import linalg # For numpy FID
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter as P
from torchvision.models.inception import inception_v3
# Module that wraps the inception network to enable use with dataparallel and
# returning pool features and logits.
class WrapInception(nn.Module):
def __init__(self, net):
super(WrapInception,self).__init__()
self.net = net
self.mean = P(torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1),
requires_grad=False)
self.std = P(torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1),
requires_grad=False)
def forward(self, x):
# Normalize x
x = (x + 1.) / 2.0
x = (x - self.mean) / self.std
# Upsample if necessary
if x.shape[2] != 299 or x.shape[3] != 299:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
# 299 x 299 x 3
x = self.net.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.net.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.net.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.net.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.net.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.net.Mixed_5b(x)
# 35 x 35 x 256
x = self.net.Mixed_5c(x)
# 35 x 35 x 288
x = self.net.Mixed_5d(x)
# 35 x 35 x 288
x = self.net.Mixed_6a(x)
# 17 x 17 x 768
x = self.net.Mixed_6b(x)
# 17 x 17 x 768
x = self.net.Mixed_6c(x)
# 17 x 17 x 768
x = self.net.Mixed_6d(x)
# 17 x 17 x 768
x = self.net.Mixed_6e(x)
# 17 x 17 x 768
# 17 x 17 x 768
x = self.net.Mixed_7a(x)
# 8 x 8 x 1280
x = self.net.Mixed_7b(x)
# 8 x 8 x 2048
x = self.net.Mixed_7c(x)
# 8 x 8 x 2048
pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2)
# 1 x 1 x 2048
logits = self.net.fc(F.dropout(pool, training=False).view(pool.size(0), -1))
# 1000 (num_classes)
return pool, logits
# A pytorch implementation of cov, from Modar M. Alfadly
# https://discuss.pytorch.org/t/covariance-and-gradient-support/16217/2
def torch_cov(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
# m = m.type(torch.double) # uncomment this line if desired
fact = 1.0 / (m.size(1) - 1)
m -= torch.mean(m, dim=1, keepdim=True)
mt = m.t() # if complex: mt = m.t().conj()
return fact * m.matmul(mt).squeeze()
# Pytorch implementation of matrix sqrt, from Tsung-Yu Lin, and Subhransu Maji
# https://github.com/msubhransu/matrix-sqrt
def sqrt_newton_schulz(A, numIters, dtype=None):
with torch.no_grad():
if dtype is None:
dtype = A.type()
batchSize = A.shape[0]
dim = A.shape[1]
normA = A.mul(A).sum(dim=1).sum(dim=1).sqrt()
Y = A.div(normA.view(batchSize, 1, 1).expand_as(A));
I = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
Z = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
for i in range(numIters):
T = 0.5*(3.0*I - Z.bmm(Y))
Y = Y.bmm(T)
Z = T.bmm(Z)
sA = Y*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(A)
return sA
# FID calculator from TTUR--consider replacing this with GPU-accelerated cov
# calculations using torch?
def numpy_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
print('wat')
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
out = diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
return out
def torch_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Run 50 itrs of newton-schulz to get the matrix sqrt of sigma1 dot sigma2
covmean = sqrt_newton_schulz(sigma1.mm(sigma2).unsqueeze(0), 50).squeeze()
out = (diff.dot(diff) + torch.trace(sigma1) + torch.trace(sigma2)
- 2 * torch.trace(covmean))
return out
# Calculate Inception Score mean + std given softmax'd logits and number of splits
def calculate_inception_score(pred, num_splits=10):
scores = []
for index in range(num_splits):
pred_chunk = pred[index * (pred.shape[0] // num_splits): (index + 1) * (pred.shape[0] // num_splits), :]
kl_inception = pred_chunk * (np.log(pred_chunk) - np.log(np.expand_dims(np.mean(pred_chunk, 0), 0)))
kl_inception = np.mean(np.sum(kl_inception, 1))
scores.append(np.exp(kl_inception))
return np.mean(scores), np.std(scores)
# Loop and run the sampler and the net until it accumulates num_inception_images
# activations. Return the pool, the logits, and the labels (if one wants
# Inception Accuracy the labels of the generated class will be needed)
def accumulate_inception_activations(sample, net, num_inception_images=50000):
pool, logits, labels = [], [], []
while (torch.cat(logits, 0).shape[0] if len(logits) else 0) < num_inception_images:
with torch.no_grad():
images, labels_val = sample()
pool_val, logits_val = net(images.float())
pool += [pool_val]
logits += [F.softmax(logits_val, 1)]
labels += [labels_val]
return torch.cat(pool, 0), torch.cat(logits, 0), torch.cat(labels, 0)
# Load and wrap the Inception model
def load_inception_net(parallel=False):
inception_model = inception_v3(pretrained=True, transform_input=False)
inception_model = WrapInception(inception_model.eval()).cuda()
if parallel:
print('Parallelizing Inception module...')
inception_model = nn.DataParallel(inception_model)
return inception_model
# This produces a function which takes in an iterator which returns a set number of samples
# and iterates until it accumulates config['num_inception_images'] images.
# The iterator can return samples with a different batch size than used in
# training, using the setting confg['inception_batchsize']
def prepare_inception_metrics(dataset, parallel, no_fid=False):
# Load metrics; this is intentionally not in a try-except loop so that
# the script will crash here if it cannot find the Inception moments.
# By default, remove the "hdf5" from dataset
dataset = dataset.strip('_hdf5')
data_mu = np.load(dataset+'_inception_moments.npz')['mu']
data_sigma = np.load(dataset+'_inception_moments.npz')['sigma']
# Load network
net = load_inception_net(parallel)
def get_inception_metrics(sample, num_inception_images, num_splits=10,
prints=True, use_torch=True):
if prints:
print('Gathering activations...')
pool, logits, labels = accumulate_inception_activations(sample, net, num_inception_images)
if prints:
print('Calculating Inception Score...')
IS_mean, IS_std = calculate_inception_score(logits.cpu().numpy(), num_splits)
if no_fid:
FID = 9999.0
else:
if prints:
print('Calculating means and covariances...')
if use_torch:
mu, sigma = torch.mean(pool, 0), torch_cov(pool, rowvar=False)
else:
mu, sigma = np.mean(pool.cpu().numpy(), axis=0), np.cov(pool.cpu().numpy(), rowvar=False)
if prints:
print('Covariances calculated, getting FID...')
#if use_torch:
# FID = torch_calculate_frechet_distance(mu, sigma, torch.tensor(data_mu).float().cuda(), torch.tensor(data_sigma).float().cuda())
# FID = float(FID.cpu().numpy())
#else:
FID = numpy_calculate_frechet_distance(mu.cpu().numpy(), sigma.cpu().numpy(), data_mu, data_sigma)
# Delete mu, sigma, pool, logits, and labels, just in case
del mu, sigma, pool, logits, labels
return IS_mean, IS_std, FID
return get_inception_metrics |
py | b40b69ee8401f4ca5eeb43ad10cd68695acd56fa | # ROS IMPORTS
from std_msgs.msg import Float32MultiArray, Float32
from sensor_msgs.msg import Image
# EAGERx IMPORTS
from eagerx_ode.engine import OdeEngine
from eagerx.core.entities import (
Object,
EngineNode,
SpaceConverter,
EngineState,
)
from eagerx.core.specs import ObjectSpec
from eagerx.core.graph_engine import EngineGraph
import eagerx.core.register as register
class Pendulum(Object):
entity_id = "Eagerx_Ode_Pendulum"
@staticmethod
@register.sensors(
pendulum_output=Float32MultiArray, action_applied=Float32MultiArray, image=Image, theta=Float32, dtheta=Float32
)
@register.actuators(pendulum_input=Float32MultiArray)
@register.engine_states(model_state=Float32MultiArray, model_parameters=Float32MultiArray)
@register.config(render_shape=[480, 480], Dfun="tests.pendulum.pendulum_ode/pendulum_dfun")
def agnostic(spec: ObjectSpec, rate):
"""Agnostic definition of the pendulum"""
# Register standard converters, space_converters, and processors
import eagerx.converters # noqa # pylint: disable=unused-import
# Set observation properties: (space_converters, rate, etc...)
spec.sensors.pendulum_output.rate = rate
spec.sensors.pendulum_output.space_converter = SpaceConverter.make(
"Space_Float32MultiArray", low=[-3.14, -9], high=[3.14, 9], dtype="float32"
)
spec.sensors.action_applied.rate = rate
spec.sensors.action_applied.space_converter = SpaceConverter.make(
"Space_Float32MultiArray", low=[-3], high=[3], dtype="float32"
)
spec.sensors.image.rate = 15
spec.sensors.image.space_converter = SpaceConverter.make(
"Space_Image",
low=0,
high=1,
shape=spec.config.render_shape,
dtype="float32",
)
spec.sensors.theta.rate = rate
spec.sensors.theta.space_converter = SpaceConverter.make("Space_Float32", low=-9999.0, high=9999.0, dtype="float32")
spec.sensors.dtheta.rate = rate
spec.sensors.dtheta.space_converter = SpaceConverter.make("Space_Float32", low=-9999.0, high=9999.0, dtype="float32")
# Set actuator properties: (space_converters, rate, etc...)
spec.actuators.pendulum_input.rate = rate
spec.actuators.pendulum_input.window = 1
spec.actuators.pendulum_input.space_converter = SpaceConverter.make(
"Space_Float32MultiArray", low=[-3], high=[3], dtype="float32"
)
# Set model_state properties: (space_converters)
spec.states.model_state.space_converter = SpaceConverter.make(
"Space_Float32MultiArray",
low=[-3.14159265359, -9],
high=[3.14159265359, 9],
dtype="float32",
)
# Set model_parameters properties: (space_converters) # [J, m, l, b0, K, R, c, a]
fixed = [0.000189238, 0.0563641, 0.0437891, 0.000142205, 0.0502769, 9.83536]
diff = [0, 0, 0, 0.05, 0.05] # Percentual delta with respect to fixed value
low = [val - diff * val for val, diff in zip(fixed, diff)]
high = [val + diff * val for val, diff in zip(fixed, diff)]
spec.states.model_parameters.space_converter = SpaceConverter.make(
"Space_Float32MultiArray", low=low, high=high, dtype="float32"
)
@staticmethod
@register.spec(entity_id, Object)
def spec(
spec: ObjectSpec,
name: str,
sensors=None,
states=None,
rate=30,
Dfun="tests.pendulum.pendulum_ode/pendulum_dfun",
):
"""Object spec of pendulum"""
# Modify default agnostic params
# Only allow changes to the agnostic params (rates, windows, (space)converters, etc...
spec.config.name = name
spec.config.sensors = sensors if sensors else ["pendulum_output", "action_applied", "image", "theta", "dtheta"]
spec.config.actuators = ["pendulum_input"]
spec.config.states = states if states else ["model_state"]
# Add registered agnostic params
spec.config.render_shape = [480, 480]
spec.config.Dfun = Dfun
# Add engine implementation
Pendulum.agnostic(spec, rate)
@staticmethod
@register.engine(entity_id, OdeEngine) # This decorator pre-initializes engine implementation with default object_params
def ode_engine(spec: ObjectSpec, graph: EngineGraph):
"""Engine-specific implementation (OdeEngine) of the object."""
# Import any object specific entities for this engine
# Set object arguments (nothing to set here in this case)
spec.OdeEngine.ode = "tests.pendulum.pendulum_ode/pendulum_ode"
# Set default params of pendulum ode [J, m, l, b0, K, R].
spec.OdeEngine.ode_params = [
0.000189238,
0.0563641,
0.0437891,
0.000142205,
0.0502769,
9.83536,
]
spec.OdeEngine.Dfun = spec.config.Dfun
# Create engine states (no agnostic states defined in this case)
spec.OdeEngine.states.model_state = EngineState.make("OdeEngineState")
spec.OdeEngine.states.model_parameters = EngineState.make("OdeParameters", list(range(5)))
# Create sensor engine nodes
obs = EngineNode.make(
"OdeOutput",
"pendulum_output",
rate=spec.sensors.pendulum_output.rate,
process=2,
)
image = EngineNode.make(
"OdeRender",
"image",
shape=spec.config.render_shape,
render_fn="tests.pendulum.pendulum_render/pendulum_render_fn",
rate=spec.sensors.image.rate,
process=0,
)
theta = EngineNode.make("OdeFloatOutput", "theta", rate=spec.sensors.theta.rate, process=2, idx=0)
dtheta = EngineNode.make("OdeFloatOutput", "dtheta", rate=spec.sensors.dtheta.rate, process=2, idx=1)
# Create actuator engine nodes
action = EngineNode.make(
"OdeInput",
"pendulum_actuator",
rate=spec.actuators.pendulum_input.rate,
process=2,
default_action=[0],
)
# Connect all engine nodes
graph.add([obs, image, action, theta, dtheta])
graph.connect(source=obs.outputs.observation, sensor="pendulum_output")
graph.connect(actuator="pendulum_input", target=action.inputs.action)
graph.connect(
source=action.outputs.action_applied,
target=image.inputs.action_applied,
skip=True,
)
graph.connect(source=obs.outputs.observation, target=image.inputs.observation)
graph.connect(source=image.outputs.image, sensor="image")
# Add action applied
applied = EngineNode.make("ActionApplied", "applied", rate=spec.sensors.action_applied.rate, process=2)
graph.add(applied)
graph.connect(
source=action.outputs.action_applied,
target=applied.inputs.action_applied,
skip=True,
)
graph.connect(source=applied.outputs.action_applied, sensor="action_applied")
graph.connect(source=theta.outputs.observation, sensor="theta")
graph.connect(source=dtheta.outputs.observation, sensor="dtheta")
|
py | b40b6b034301c00cb328246ad38260bf1c7caf7b | import torch
import torch.nn as nn
import math
from utils import sum_except_batch, mean_except_batch
# Code adapted from : https://github.com/didriknielsen/survae_flows/
class Distribution(nn.Module):
"""Distribution base class."""
def log_prob(self, x, context=None):
"""Calculate log probability under the distribution.
Args:
x: Tensor, shape (batch_size, ...)
Returns:
log_prob: Tensor, shape (batch_size,)
"""
raise NotImplementedError()
def sample(self, num_samples, context=None):
"""Generates samples from the distribution.
Args:
num_samples: int, number of samples to generate.
Returns:
samples: Tensor, shape (num_samples, ...)
"""
raise NotImplementedError()
def sample_with_log_prob(self, num_samples, context=None, n_points=None):
"""Generates samples from the distribution together with their log probability.
Args:
num_samples: int, number of samples to generate.
Returns:
samples: Tensor, shape (num_samples, ...)
log_prob: Tensor, shape (num_samples,)
"""
samples = self.sample(num_samples, context=context, n_points=n_points)
log_prob = self.log_prob(samples, context=context)
return samples, log_prob
def forward(self, *args, mode, **kwargs):
'''
To allow Distribution objects to be wrapped by DataParallelDistribution,
which parallelizes .forward() of replicas on subsets of data.
DataParallelDistribution.log_prob() calls DataParallel.forward().
DataParallel.forward() calls Distribution.forward() for different
data subsets on each device and returns the combined outputs.
'''
if mode == 'log_prob':
return self.log_prob(*args, **kwargs)
else:
raise RuntimeError("Mode {} not supported.".format(mode))
class ConditionalDistribution(Distribution):
"""ConditionalDistribution base class"""
def log_prob(self, x, context):
"""Calculate log probability under the distribution.
Args:
x: Tensor, shape (batch_size, ...).
context: Tensor, shape (batch_size, ...).
Returns:
log_prob: Tensor, shape (batch_size,)
"""
raise NotImplementedError()
def sample(self, context):
"""Generates samples from the distribution.
Args:
context: Tensor, shape (batch_size, ...).
Returns:
samples: Tensor, shape (batch_size, ...).
"""
raise NotImplementedError()
def sample_with_log_prob(self, context):
"""Generates samples from the distribution together with their log probability.
Args:
context: Tensor, shape (batch_size, ...).
Returns::
samples: Tensor, shape (batch_size, ...).
log_prob: Tensor, shape (batch_size,)
"""
raise NotImplementedError()
class ConditionalMeanStdNormal(ConditionalDistribution):
"""A multivariate Normal with conditional mean and learned std."""
def __init__(self, net, scale_shape):
super(ConditionalMeanStdNormal, self).__init__()
self.net = net
self.log_scale = nn.Parameter(torch.zeros(scale_shape))
def cond_dist(self, context):
mean = self.net(context)
return torch.distributions.Normal(loc=mean, scale=self.log_scale.exp())
def log_prob(self, x, context):
dist = self.cond_dist(context)
return sum_except_batch(dist.log_prob(x), num_dims=2)
def sample(self, context):
dist = self.cond_dist(context)
return dist.rsample()
def sample_with_log_prob(self, context):
dist = self.cond_dist(context)
z = dist.rsample()
log_prob = dist.log_prob(z)
log_prob = sum_except_batch(log_prob, num_dims=2)
return z, log_prob
def mean(self, context):
return self.cond_dist(context).mean
class ConditionalNormal(ConditionalDistribution):
"""A multivariate Normal with conditional mean and log_std."""
def __init__(self, net, split_dim=-1, clamp=False):
super().__init__()
self.net = net
self.clamp = clamp
def cond_dist(self, context):
params = torch.utils.checkpoint.checkpoint(
self.net, context, preserve_rng_state=False)
#params = self.net(context)
mean, log_std = torch.chunk(params, chunks=2, dim=-1)
scale = log_std.exp()
if self.clamp:
scale = scale.clamp_max(self.clamp)
return torch.distributions.Normal(loc=mean, scale=scale)
def log_prob(self, x, context):
dist = self.cond_dist(context)
return sum_except_batch(dist.log_prob(x), num_dims=2)
def sample(self, context):
dist = self.cond_dist(context)
return dist.rsample()
def sample_with_log_prob(self, context):
dist = self.cond_dist(context)
z = dist.rsample()
log_prob = dist.log_prob(z)
log_prob = sum_except_batch(log_prob, num_dims=2)
return z, log_prob
def mean(self, context):
return self.cond_dist(context).mean
def mean_stddev(self, context):
dist = self.cond_dist(context)
return dist.mean, dist.stddev
class StandardUniform(Distribution):
"""A multivariate Uniform with boundaries (0,1)."""
def __init__(self, shape):
super().__init__()
self.shape = torch.Size(shape)
self.register_buffer('zero', torch.zeros(1))
self.register_buffer('one', torch.ones(1))
def log_prob(self, x, context=None):
lb = mean_except_batch(
x.ge(self.zero).type(self.zero.dtype), num_dims=2)
ub = mean_except_batch(x.le(self.one).type(self.one.dtype), num_dims=2)
return torch.log(lb*ub)
def sample(self, num_samples, context=None, n_points=None):
sample_shape = list(self.shape)
sample_shape[-2] = n_points
return torch.rand((num_samples,) + sample_shape, device=self.zero.device, dtype=self.zero.dtype)
class StandardNormal(Distribution):
"""A multivariate Normal with zero mean and unit covariance."""
def __init__(self, shape):
super(StandardNormal, self).__init__()
self.shape = torch.Size(shape)
self.register_buffer('buffer', torch.zeros(1))
def log_prob(self, x, context=None):
log_base = - 0.5 * math.log(2 * math.pi)
log_inner = - 0.5 * x**2
return sum_except_batch(log_base+log_inner, num_dims=2)
def sample(self, num_samples, context=None, n_points=None):
sample_shape = list(self.shape)
sample_shape[-2] = n_points
return torch.randn(num_samples, *sample_shape, device=self.buffer.device, dtype=self.buffer.dtype)
class Normal(Distribution):
"""Normal distribution of given loc and scale"""
def __init__(self, loc, scale, shape):
super().__init__()
self.std_normal = StandardNormal(shape)
self.shape = torch.Size(shape)
self.register_buffer('loc', loc)
self.register_buffer('scale', scale)
def log_prob(self, x, context=None):
x = (x-self.loc)/self.scale
return self.std_normal.log_prob(x, context=None)
def sample(self, num_samples, context=None, n_points=None):
sample_shape = list(self.shape)
sample_shape[-2] = n_points
return (self.std_normal.sample(num_samples=num_samples, n_points=n_points, context=None) * self.scale) + self.loc
|
py | b40b6d03f14fa3d4d1a53e936d8d943b857e145c | # code to be evaluated for max field output value
# https://gist.github.com/crmccreary/1074551
"""
Module containing tools for Abaqus cae.
"""
import os
import odbAccess
from abaqusConstants import *
import string
# definition of a method to search for a keyword position
def get_block_position(model, block_prefix):
"""
Find a string and return the block number.
Method to find a given string on the keywords file of a model and return an integer for the position of the first
occurrence.
Parameters
----------
model : class
Abaqus model to search for keyword
block_prefix : string
String to look for
Attributes
----------
Notes
-----
References
----------
"""
pos = 0
for block in model.keywordBlock.sieBlocks:
if string.lower(block[0:len(block_prefix)]) == string.lower(block_prefix):
return pos
pos = pos + 1
return -1
def open_odb(odb_path):
"""
A more sophisticated open odb function.
Parameters
----------
odb_path : string
Path and filename of the database (without the '.odb' extension)
Attributes
----------
Notes
-----
References
----------
"""
base, ext = os.path.splitext(odb_path)
odb_path = base + '.odb'
if odbAccess.isUpgradeRequiredForOdb(upgradeRequiredOdbPath=odb_path):
print('odb %s needs upgrading' % (odb_path,))
path, file_name = os.path.split(odb_path)
file_name = base + "_upgraded.odb"
new_odb_path = os.path.join(path, file_name)
odbAccess.upgradeOdb(existingOdbPath=odb_path, upgradedOdbPath=new_odb_path)
odb_path = new_odb_path
odb = odbAccess.openOdb(path=odb_path, readOnly=True)
return odb
def field_max(odb, result):
"""
Look for the max value in a field output.
Scan a field output on an abaqus result database and return the maximum value
Parameters
----------
odb : class
Abaqus model containing the field results
result : class
Field output result to search for max
Attributes
----------
Notes
-----
References
----------
"""
result_field, result_invariant = result
_max = -1.0e20
for step in odb.steps.values():
print('Processing Step:', step.name)
for frame in step.frames:
if frame.frameValue > 0.0:
all_fields = frame.fieldOutputs
if all_fields.has_key(result_field):
stress_set = all_fields[result_field]
for stressValue in stress_set.values:
if result_invariant:
if hasattr(stressValue, result_invariant.lower()):
val = getattr(stressValue, result_invariant.lower())
else:
raise ValueError('Field value does not have invariant %s' % (result_invariant,))
else:
val = stressValue.data
if val > _max:
_max = val
else:
raise ValueError('Field output does not have field %s' % (result_field,))
return _max
# Look for the max value in a history output
# TO BE FIXED. THE REFERENCE POINTS (rp1key, ho1key etc.) ARE NOT GENERIC.
# Fetch maximum load, displacement and LPF for a riks analysis.
# The method assumes that a) the the odb is located in the current directory
def history_max(odb_name, step_name):
"""
Look for the max value in a history output.
Scan a history output on a step on an abaqus result database and return the maximum value.
Currently, it returns LPF, load and disp history outputs. To be generalised.
Parameters
----------
odb_name : class
Abaqus model containing the history results
step_name : string
Name of the step
Attributes
----------
Notes
-----
References
----------
"""
my_odb = odbAccess.openOdb(path=odb_name + '.odb')
riks_step = my_odb.steps[step_name]
rp1key = riks_step.historyRegions.keys()[1]
ho1key = riks_step.historyRegions[rp1key].historyOutputs.keys()[0]
rp2key = riks_step.historyRegions.keys()[2]
ho2key = riks_step.historyRegions[rp2key].historyOutputs.keys()[0]
asskey = riks_step.historyRegions.keys()[0]
hoasse = riks_step.historyRegions[asskey].historyOutputs.keys()[-1]
load_hist = riks_step.historyRegions[rp1key].historyOutputs[ho1key].data
disp_hist = riks_step.historyRegions[rp2key].historyOutputs[ho2key].data
lpf_hist = riks_step.historyRegions[asskey].historyOutputs[hoasse].data
maxpos = load_hist.index(max(load_hist, key=lambda x: x[1]))
load = load_hist[maxpos][1]
disp = -disp_hist[maxpos][1]
lpf = lpf_hist[maxpos][1]
odbAccess.closeOdb(my_odb)
return lpf, load, disp
def fetch_eigenv(odb_name, step_name, n_eigen):
"""
Get eigenvalues.
Return the eigenvalues of a perturbation buckling analysis from an abaqus database.
Parameters
----------
odb_name : class
Abaqus model containing the eigenvalues
step_name : string
Name of the step
n_eigen : int
Number of eigenvalues to return
Attributes
----------
Notes
-----
References
----------
"""
bckl_odb = odbAccess.openOdb(path=odb_name + '.odb')
bckl_step = bckl_odb.steps[step_name]
# Gather the eigenvalues
eigenvalues = ()
eigen_string = ""
for J_eigenvalues in range(1, n_eigen + 1):
current_eigen = float(bckl_step.frames[J_eigenvalues].description.split()[-1])
eigenvalues = eigenvalues + (current_eigen,)
eigen_string = eigen_string + "%.3E " % current_eigen
# Close the odb
odbAccess.closeOdb(bckl_odb)
# Return variables
return eigenvalues, eigen_string
|
py | b40b6d2b3a743685de0496c0a4037e94a41daaae | # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: null
callback_type: stdout
requirements:
- set as main display callback
short_description: Don't display stuff to screen
version_added: "2.5"
description:
- This callback prevents outputing events to screen
'''
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
'''
This callback wont print messages to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'null'
|
py | b40b6d4662b477bfacf67940713dff82c6fbd42c | #!/usr/bin/python
import argparse
import os
from .eclipse import write_metadata_files
from .git import clone
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("remote_url", help="url of the remote git repository (https or ssh)")
parser.add_argument("destination", help="directory to clone repository to")
parser.add_argument("--gitargs", metavar="args", type=str, help="extra arguments for git")
parser.add_argument("--force", nargs="?", help="write metadata files even if they already exist in the repo",
default=False, type=bool)
return parser.parse_args()
def main():
args = parse_args()
repo = clone(args.remote_url, args.destination)
write_metadata_files(args.destination, os.path.split(args.destination)[1], args.force)
if __name__ == "__main__":
main() |
py | b40b6e1a4372216934444384bffba196984ac25e | """Script to test all notebooks under examples/ folder."""
import os
from pathlib import Path
from subprocess import PIPE, Popen # noqa S404
if __name__ == "__main__":
dir_path = os.path.dirname(os.path.realpath(__file__))
example_notebook_paths = [
str(path)
for path in list(Path(dir_path).rglob("*.ipynb"))
if ".ipynb_checkpoints" not in str(path)
]
print("\n>>> Notebook Examples Tests")
errors = []
for path in example_notebook_paths:
print(f" >>> Running {path}")
p = Popen( # noqa S607, S603
[
"jupyter",
"nbconvert",
"--to",
"notebook",
"--inplace",
"--no-prompt",
"--execute",
"--log-level='ERROR'",
path,
],
stdout=PIPE,
stderr=PIPE,
)
_, error = p.communicate()
if p.returncode != 0:
errors.append({"notebook": path, "error": error})
print(f" >>> Error in execution!\n")
else:
print(f" >>> Successful execution\n")
if errors:
print(">>> Errors in the following notebooks:")
for run in errors:
print("\n >>>", run["notebook"])
print(run["error"].decode("utf-8"))
|
py | b40b6e5eaf2158b70d0368e49cf631dbe04d50be | # -*- coding: utf-8 -*-
from __future__ import print_function
import abc
import copy
import importlib
import os
import shutil
import sys
import tarfile
import tempfile
import warnings
import zipfile
import requests
from .entity import _ModelDBEntity
from .._internal_utils import (
_artifact_utils,
_histogram_utils,
_utils,
)
from .._protos.public.common import CommonService_pb2 as _CommonCommonService
from ..external import six
# location in DeploymentService model container
_CUSTOM_MODULES_DIR = os.environ.get('VERTA_CUSTOM_MODULES_DIR', "/app/custom_modules/")
# for caching files
_CACHE_DIR = os.path.join(
os.path.expanduser("~"),
".verta",
"cache",
)
@six.add_metaclass(abc.ABCMeta)
class _DeployableEntity(_ModelDBEntity):
@property
def _histogram_endpoint(self):
return "{}://{}/api/v1/monitoring/data/references/{}".format(
self._conn.scheme,
self._conn.socket,
self.id,
)
@abc.abstractmethod
def _get_artifact(self, key):
raise NotImplementedError
@abc.abstractmethod
def download_artifact(self, key, download_to_path):
"""Downloads the artifact with name `key` to path `download_to_path`.
Parameters
----------
key : str
Name of the artifact.
download_to_path : str
Path to download to.
Returns
-------
downloaded_to_path : str
Absolute path where artifact was downloaded to. Matches `download_to_path`.
"""
raise NotImplementedError
@abc.abstractmethod
def download_model(self, download_to_path):
"""Downloads the model logged with :meth:`log_model` to path `download_to_path`.
Parameters
----------
download_to_path : str
Path to download to.
Returns
-------
downloaded_to_path : str
Absolute path where artifact was downloaded to. Matches `download_to_path`.
"""
raise NotImplementedError
def _cache_file(self, filename, contents):
"""
Caches `contents` to `filename` within ``_CACHE_DIR``.
If `contents` represents a ZIP file, then it will be unzipped, and the path to the target
directory will be returned.
Parameters
----------
filename : str
Filename within ``_CACHE_DIR`` to write to.
contents : bytes
Contents to be cached.
Returns
-------
str
Full path to cached contents.
"""
# write contents to temporary file
with tempfile.NamedTemporaryFile(delete=False) as tempf:
tempf.write(contents)
tempf.flush() # flush object buffer
os.fsync(tempf.fileno()) # flush OS buffer
name, extension = os.path.splitext(filename)
if extension == '.zip':
temp_path = tempfile.mkdtemp()
with zipfile.ZipFile(tempf.name, 'r') as zipf:
zipf.extractall(temp_path)
os.remove(tempf.name)
elif extension == '.tgz':
temp_path = tempfile.mkdtemp()
with tarfile.open(tempf.name, 'r:gz') as tarf:
tarf.extractall(temp_path)
os.remove(tempf.name)
elif extension == '.tar':
temp_path = tempfile.mkdtemp()
with tarfile.open(tempf.name, 'r') as tarf:
tarf.extractall(temp_path)
os.remove(tempf.name)
elif extension == '.gz' and os.path.splitext(name)[1] == '.tar':
name = os.path.splitext(name)[0]
temp_path = tempfile.mkdtemp()
with tarfile.open(tempf.name, 'r:gz') as tarf:
tarf.extractall(temp_path)
os.remove(tempf.name)
else:
name = filename
temp_path = tempf.name
path = os.path.join(_CACHE_DIR, name)
# create intermediate dirs
try:
os.makedirs(os.path.dirname(path))
except OSError: # already exists
pass
# move written contents to cache location
shutil.move(temp_path, path)
return path
def _get_cached_file(self, filename):
name, extension = os.path.splitext(filename)
if extension == '.zip':
pass
elif extension == '.tgz':
pass
elif extension == '.tar':
pass
elif extension == '.gz' and os.path.splitext(name)[1] == '.tar':
name = os.path.splitext(name)[0]
else:
name = filename
path = os.path.join(_CACHE_DIR, name)
return path if os.path.exists(path) else None
def fetch_artifacts(self, keys):
"""
Downloads artifacts that are associated with a class model.
Parameters
----------
keys : list of str
Keys of artifacts to download.
Returns
-------
dict of str to str
Map of artifacts' keys to their cache filepaths—for use as the ``artifacts`` parameter
to a Verta class model.
Examples
--------
.. code-block:: python
run.log_artifact("weights", open("weights.npz", 'rb'))
# upload complete (weights)
run.log_artifact("text_embeddings", open("embedding.csv", 'rb'))
# upload complete (text_embeddings)
artifact_keys = ["weights", "text_embeddings"]
artifacts = run.fetch_artifacts(artifact_keys)
artifacts
# {'weights': '/Users/convoliution/.verta/cache/artifacts/50a9726b3666d99aea8af006cf224a7637d0c0b5febb3b0051192ce1e8615f47/weights.npz',
# 'text_embeddings': '/Users/convoliution/.verta/cache/artifacts/2d2d1d809e9bce229f0a766126ae75df14cadd1e8f182561ceae5ad5457a3c38/embedding.csv'}
ModelClass(artifacts=artifacts).predict(["Good book.", "Bad book!"])
# [0.955998517288053, 0.09809996313422353]
run.log_model(ModelClass, artifacts=artifact_keys)
# upload complete (custom_modules.zip)
# upload complete (model.pkl)
# upload complete (model_api.json)
"""
if not (isinstance(keys, list)
and all(isinstance(key, six.string_types) for key in keys)):
raise TypeError("`keys` must be list of str, not {}".format(type(keys)))
# validate that `keys` are actually logged
self._refresh_cache()
existing_artifact_keys = {artifact.key for artifact in self._msg.artifacts}
unlogged_artifact_keys = set(keys) - existing_artifact_keys
if unlogged_artifact_keys:
raise ValueError("`keys` contains keys that have not been logged: {}".format(sorted(unlogged_artifact_keys)))
# get artifact checksums
paths = {artifact.key: artifact.path
for artifact in self._msg.artifacts}
artifacts = dict()
for key in keys:
filename = os.path.join("artifacts", paths[key])
# check cache, otherwise write to cache
# "try-get-then-create" can lead multiple threads trying to write to the cache
# simultaneously, but artifacts being cached at a particular location should be
# identical, so multiple writes would be idempotent.
path = self._get_cached_file(filename)
if path is None:
contents = self._get_artifact(key)
if isinstance(contents, tuple):
# ExperimentRun._get_artifact() returns two values (contents, path_only)
# whereas ModelVersion._get_artifact() returns one (contents), so until
# their implementations are unified, this check is to handle the difference.
contents, _ = contents # TODO: raise error if path_only
path = self._cache_file(filename, contents)
artifacts.update({key: path})
return artifacts
def _custom_modules_as_artifact(self, paths=None):
if isinstance(paths, six.string_types):
paths = [paths]
# If we include a path that is actually a module, then we _must_ add its parent to the
# adjusted sys.path in the end so that we can re-import with the same name.
forced_local_sys_paths = []
if paths is not None:
new_paths = []
for p in paths:
abspath = os.path.abspath(os.path.expanduser(p))
if os.path.exists(abspath):
new_paths.append(abspath)
else:
try:
mod = importlib.import_module(p)
new_paths.extend(mod.__path__)
forced_local_sys_paths.extend(map(os.path.dirname, mod.__path__))
except ImportError:
raise ValueError("custom module {} does not correspond to an existing folder or module".format(p))
paths = new_paths
forced_local_sys_paths = sorted(list(set(forced_local_sys_paths)))
# collect local sys paths
local_sys_paths = copy.copy(sys.path)
## replace empty first element with cwd
## https://docs.python.org/3/library/sys.html#sys.path
if local_sys_paths[0] == "":
local_sys_paths[0] = os.getcwd()
## convert to absolute paths
local_sys_paths = list(map(os.path.abspath, local_sys_paths))
## remove paths that don't exist
local_sys_paths = list(filter(os.path.exists, local_sys_paths))
## remove .ipython
local_sys_paths = list(filter(lambda path: not path.endswith(".ipython"), local_sys_paths))
## remove virtual (and real) environments
local_sys_paths = list(filter(lambda path: not _utils.is_in_venv(path), local_sys_paths))
# get paths to files within
if paths is None:
# Python files within filtered sys.path dirs
paths = local_sys_paths
extensions = ['py', 'pyc', 'pyo']
else:
# all user-specified files
paths = paths
extensions = None
local_filepaths = _utils.find_filepaths(
paths, extensions=extensions,
include_hidden=True,
include_venv=False, # ignore virtual environments nested within
)
## remove .git
local_filepaths = set(filter(lambda path: not path.endswith(".git") and ".git/" not in path,
local_filepaths))
# obtain deepest common directory
# This directory on the local system will be mirrored in `_CUSTOM_MODULES_DIR` in
# deployment.
curr_dir = os.path.join(os.getcwd(), "")
paths_plus = list(local_filepaths) + [curr_dir]
common_prefix = os.path.commonprefix(paths_plus)
common_dir = os.path.dirname(common_prefix)
# replace `common_dir` with `_CUSTOM_MODULES_DIR` for deployment sys.path
depl_sys_paths = list(map(lambda path: os.path.relpath(path, common_dir), local_sys_paths + forced_local_sys_paths))
depl_sys_paths = list(map(lambda path: os.path.join(_CUSTOM_MODULES_DIR, path), depl_sys_paths))
bytestream = six.BytesIO()
with zipfile.ZipFile(bytestream, 'w') as zipf:
for filepath in local_filepaths:
arcname = os.path.relpath(filepath, common_dir) # filepath relative to archive root
try:
zipf.write(filepath, arcname)
except:
# maybe file has corrupt metadata; try reading then writing contents
with open(filepath, 'rb') as f:
zipf.writestr(
_artifact_utils.global_read_zipinfo(arcname),
f.read(),
)
# add verta config file for sys.path and chdir
working_dir = os.path.join(_CUSTOM_MODULES_DIR, os.path.relpath(curr_dir, common_dir))
zipf.writestr(
_artifact_utils.global_read_zipinfo("_verta_config.py"),
six.ensure_binary('\n'.join([
"import os, sys",
"",
"",
"sys.path = sys.path[:1] + {} + sys.path[1:]".format(depl_sys_paths),
"",
"try:",
" os.makedirs(\"{}\")".format(working_dir),
"except OSError: # already exists",
" pass",
"os.chdir(\"{}\")".format(working_dir),
]))
)
# add __init__.py
init_filename = "__init__.py"
if init_filename not in zipf.namelist():
zipf.writestr(
_artifact_utils.global_read_zipinfo(init_filename),
b"",
)
bytestream.seek(0)
return bytestream
def log_training_data(self, train_features, train_targets, overwrite=False):
"""
Associate training data with this model reference.
.. versionchanged:: 0.14.4
Instead of uploading the data itself as a CSV artifact ``'train_data'``, this method now
generates a histogram for internal use by our deployment data monitoring system.
.. deprecated:: 0.17.7
This method is no longer supported. Please see our documentation
for information about our platform's data monitoring features.
Parameters
----------
train_features : pd.DataFrame
pandas DataFrame representing features of the training data.
train_targets : pd.DataFrame or pd.Series
pandas DataFrame representing targets of the training data.
overwrite : bool, default False
Whether to allow overwriting existing training data.
"""
warnings.warn(
"This method is no longer supported. Please see our documentation"
" for information about our platform's data monitoring features",
category=FutureWarning,
)
if train_features.__class__.__name__ != "DataFrame":
raise TypeError("`train_features` must be a pandas DataFrame, not {}".format(type(train_features)))
if train_targets.__class__.__name__ == "Series":
train_targets = train_targets.to_frame()
elif train_targets.__class__.__name__ != "DataFrame":
raise TypeError("`train_targets` must be a pandas DataFrame or Series, not {}".format(type(train_targets)))
# check for overlapping column names
common_column_names = set(train_features.columns) & set(train_targets.columns)
if common_column_names:
raise ValueError("`train_features` and `train_targets` combined have overlapping column names;"
" please ensure column names are unique")
train_df = train_features.join(train_targets)
histograms = _histogram_utils.calculate_histograms(train_df)
response = _utils.make_request("PUT", self._histogram_endpoint, self._conn, json=histograms)
_utils.raise_for_http_error(response)
def _get_histogram(self):
"""
Returns histogram JSON.
Note that in Python 2, the JSON library returns strings as ``unicode``.
Returns
-------
dict
"""
response = _utils.make_request("GET", self._histogram_endpoint, self._conn)
try:
_utils.raise_for_http_error(response)
except requests.HTTPError as e:
if e.response.status_code == 404:
e.args = ("log_training_data() may not yet have been called; error message: \n\n{}".format(e.args[0]),) + e.args[1:]
raise e
return response.json()
|
py | b40b6eba333072f3e090d860d165e30512156ae5 | from pysal.model.spvcm import upper_level as upper
from pysal.model.spvcm import utils
from pysal.model.spvcm.tests.utils import Model_Mixin
from pysal.model.spvcm.abstracts import Trace
import unittest as ut
import pandas as pd
import os
FULL_PATH = os.path.dirname(os.path.abspath(__file__))
class Test_Upper_SMA(ut.TestCase, Model_Mixin):
def setUp(self):
super(Test_Upper_SMA, self).build_self()
self.cls = upper.Upper_SMA
del self.inputs["W"]
self.inputs['n_samples'] = 0
instance = self.cls(**self.inputs)
self.answer_trace = Trace.from_csv(FULL_PATH + '/data/upper_sma.csv')
|
py | b40b6fed2d4068002dc842000b5e9bcbcf0c4b9a | # coding: utf-8
"""
ContentManagementApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ContentManagementApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete_contentmanagement_document(self, document_id, **kwargs):
"""
Delete a document.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_contentmanagement_document(document_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str document_id: Document ID (required)
:param bool override: Override any lock on the document
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document_id', 'override']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_contentmanagement_document" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document_id' is set
if ('document_id' not in params) or (params['document_id'] is None):
raise ValueError("Missing the required parameter `document_id` when calling `delete_contentmanagement_document`")
resource_path = '/api/v2/contentmanagement/documents/{documentId}'.replace('{format}', 'json')
path_params = {}
if 'document_id' in params:
path_params['documentId'] = params['document_id']
query_params = {}
if 'override' in params:
query_params['override'] = params['override']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_contentmanagement_share(self, share_id, **kwargs):
"""
Deletes an existing share.
This revokes sharing rights specified in the share record
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_contentmanagement_share(share_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str share_id: Share ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['share_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_contentmanagement_share" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'share_id' is set
if ('share_id' not in params) or (params['share_id'] is None):
raise ValueError("Missing the required parameter `share_id` when calling `delete_contentmanagement_share`")
resource_path = '/api/v2/contentmanagement/shares/{shareId}'.replace('{format}', 'json')
path_params = {}
if 'share_id' in params:
path_params['shareId'] = params['share_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_contentmanagement_status_status_id(self, status_id, **kwargs):
"""
Cancel the command for this status
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_contentmanagement_status_status_id(status_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str status_id: Status ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['status_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_contentmanagement_status_status_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'status_id' is set
if ('status_id' not in params) or (params['status_id'] is None):
raise ValueError("Missing the required parameter `status_id` when calling `delete_contentmanagement_status_status_id`")
resource_path = '/api/v2/contentmanagement/status/{statusId}'.replace('{format}', 'json')
path_params = {}
if 'status_id' in params:
path_params['statusId'] = params['status_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_contentmanagement_workspace(self, workspace_id, **kwargs):
"""
Delete a workspace
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_contentmanagement_workspace(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str move_children_to_workspace_id: New location for objects in deleted workspace.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'move_children_to_workspace_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_contentmanagement_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `delete_contentmanagement_workspace`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'move_children_to_workspace_id' in params:
query_params['moveChildrenToWorkspaceId'] = params['move_children_to_workspace_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_contentmanagement_workspace_member(self, workspace_id, member_id, **kwargs):
"""
Delete a member from a workspace
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_contentmanagement_workspace_member(workspace_id, member_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str member_id: Member ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'member_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_contentmanagement_workspace_member" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `delete_contentmanagement_workspace_member`")
# verify the required parameter 'member_id' is set
if ('member_id' not in params) or (params['member_id'] is None):
raise ValueError("Missing the required parameter `member_id` when calling `delete_contentmanagement_workspace_member`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/members/{memberId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'member_id' in params:
path_params['memberId'] = params['member_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_contentmanagement_workspace_tagvalue(self, workspace_id, tag_id, **kwargs):
"""
Delete workspace tag
Delete a tag from a workspace. Will remove this tag from all documents.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_contentmanagement_workspace_tagvalue(workspace_id, tag_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str tag_id: Tag ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'tag_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_contentmanagement_workspace_tagvalue" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `delete_contentmanagement_workspace_tagvalue`")
# verify the required parameter 'tag_id' is set
if ('tag_id' not in params) or (params['tag_id'] is None):
raise ValueError("Missing the required parameter `tag_id` when calling `delete_contentmanagement_workspace_tagvalue`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/tagvalues/{tagId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'tag_id' in params:
path_params['tagId'] = params['tag_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_document(self, document_id, **kwargs):
"""
Get a document.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_document(document_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str document_id: Document ID (required)
:param list[str] expand: Which fields, if any, to expand.
:return: Document
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document_id', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_document" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document_id' is set
if ('document_id' not in params) or (params['document_id'] is None):
raise ValueError("Missing the required parameter `document_id` when calling `get_contentmanagement_document`")
resource_path = '/api/v2/contentmanagement/documents/{documentId}'.replace('{format}', 'json')
path_params = {}
if 'document_id' in params:
path_params['documentId'] = params['document_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Document',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_document_audits(self, document_id, **kwargs):
"""
Get a list of audits for a document.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_document_audits(document_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str document_id: Document ID (required)
:param int page_size: Page size
:param int page_number: Page number
:param str transaction_filter: Transaction filter
:param str level: level
:param str sort_by: Sort by
:param str sort_order: Sort order
:return: DocumentAuditEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document_id', 'page_size', 'page_number', 'transaction_filter', 'level', 'sort_by', 'sort_order']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_document_audits" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document_id' is set
if ('document_id' not in params) or (params['document_id'] is None):
raise ValueError("Missing the required parameter `document_id` when calling `get_contentmanagement_document_audits`")
resource_path = '/api/v2/contentmanagement/documents/{documentId}/audits'.replace('{format}', 'json')
path_params = {}
if 'document_id' in params:
path_params['documentId'] = params['document_id']
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'transaction_filter' in params:
query_params['transactionFilter'] = params['transaction_filter']
if 'level' in params:
query_params['level'] = params['level']
if 'sort_by' in params:
query_params['sortBy'] = params['sort_by']
if 'sort_order' in params:
query_params['sortOrder'] = params['sort_order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentAuditEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_document_content(self, document_id, **kwargs):
"""
Download a document.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_document_content(document_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str document_id: Document ID (required)
:param str disposition: Request how the content will be downloaded: a file attachment or inline. Default is attachment.
:param str content_type: The requested format for the specified document. If supported, the document will be returned in that format. Example contentType=audio/wav
:return: DownloadResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document_id', 'disposition', 'content_type']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_document_content" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document_id' is set
if ('document_id' not in params) or (params['document_id'] is None):
raise ValueError("Missing the required parameter `document_id` when calling `get_contentmanagement_document_content`")
resource_path = '/api/v2/contentmanagement/documents/{documentId}/content'.replace('{format}', 'json')
path_params = {}
if 'document_id' in params:
path_params['documentId'] = params['document_id']
query_params = {}
if 'disposition' in params:
query_params['disposition'] = params['disposition']
if 'content_type' in params:
query_params['contentType'] = params['content_type']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DownloadResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_documents(self, workspace_id, **kwargs):
"""
Get a list of documents.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_documents(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str name: Name
:param list[str] expand: Which fields, if any, to expand.
:param int page_size: Page size
:param int page_number: Page number
:param str sort_by: name or dateCreated
:param str sort_order: ascending or descending
:return: DocumentEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'name', 'expand', 'page_size', 'page_number', 'sort_by', 'sort_order']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_documents" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_contentmanagement_documents`")
resource_path = '/api/v2/contentmanagement/documents'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'workspace_id' in params:
query_params['workspaceId'] = params['workspace_id']
if 'name' in params:
query_params['name'] = params['name']
if 'expand' in params:
query_params['expand'] = params['expand']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'sort_by' in params:
query_params['sortBy'] = params['sort_by']
if 'sort_order' in params:
query_params['sortOrder'] = params['sort_order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_query(self, query_phrase, **kwargs):
"""
Query content
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_query(query_phrase, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str query_phrase: Phrase tokens are ANDed together over all searchable fields (required)
:param int page_size: Page size
:param int page_number: Page number
:param str sort_by: name or dateCreated
:param str sort_order: ascending or descending
:param list[str] expand: Which fields, if any, to expand.
:return: QueryResults
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['query_phrase', 'page_size', 'page_number', 'sort_by', 'sort_order', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'query_phrase' is set
if ('query_phrase' not in params) or (params['query_phrase'] is None):
raise ValueError("Missing the required parameter `query_phrase` when calling `get_contentmanagement_query`")
resource_path = '/api/v2/contentmanagement/query'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'sort_by' in params:
query_params['sortBy'] = params['sort_by']
if 'sort_order' in params:
query_params['sortOrder'] = params['sort_order']
if 'query_phrase' in params:
query_params['queryPhrase'] = params['query_phrase']
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResults',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_securityprofile(self, security_profile_id, **kwargs):
"""
Get a Security Profile
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_securityprofile(security_profile_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str security_profile_id: Security Profile Id (required)
:return: SecurityProfile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['security_profile_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_securityprofile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'security_profile_id' is set
if ('security_profile_id' not in params) or (params['security_profile_id'] is None):
raise ValueError("Missing the required parameter `security_profile_id` when calling `get_contentmanagement_securityprofile`")
resource_path = '/api/v2/contentmanagement/securityprofiles/{securityProfileId}'.replace('{format}', 'json')
path_params = {}
if 'security_profile_id' in params:
path_params['securityProfileId'] = params['security_profile_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SecurityProfile',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_securityprofiles(self, **kwargs):
"""
Get a List of Security Profiles
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_securityprofiles(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: SecurityProfileEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_securityprofiles" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/contentmanagement/securityprofiles'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SecurityProfileEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_share(self, share_id, **kwargs):
"""
Retrieve details about an existing share.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_share(share_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str share_id: Share ID (required)
:param list[str] expand: Which fields, if any, to expand.
:return: Share
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['share_id', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_share" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'share_id' is set
if ('share_id' not in params) or (params['share_id'] is None):
raise ValueError("Missing the required parameter `share_id` when calling `get_contentmanagement_share`")
resource_path = '/api/v2/contentmanagement/shares/{shareId}'.replace('{format}', 'json')
path_params = {}
if 'share_id' in params:
path_params['shareId'] = params['share_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Share',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_shared_shared_id(self, shared_id, **kwargs):
"""
Get shared documents. Securely download a shared document.
This method requires the download sharing URI obtained in the get document response (downloadSharingUri). Documents may be shared between users in the same workspace. Documents may also be shared between any user by creating a content management share.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_shared_shared_id(shared_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str shared_id: Shared ID (required)
:param bool redirect: Turn on or off redirect
:param str disposition: Request how the share content will be downloaded: attached as a file or inline. Default is attachment.
:param str content_type: The requested format for the specified document. If supported, the document will be returned in that format. Example contentType=audio/wav
:param str expand: Expand some document fields
:return: SharedResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['shared_id', 'redirect', 'disposition', 'content_type', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_shared_shared_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'shared_id' is set
if ('shared_id' not in params) or (params['shared_id'] is None):
raise ValueError("Missing the required parameter `shared_id` when calling `get_contentmanagement_shared_shared_id`")
resource_path = '/api/v2/contentmanagement/shared/{sharedId}'.replace('{format}', 'json')
path_params = {}
if 'shared_id' in params:
path_params['sharedId'] = params['shared_id']
query_params = {}
if 'redirect' in params:
query_params['redirect'] = params['redirect']
if 'disposition' in params:
query_params['disposition'] = params['disposition']
if 'content_type' in params:
query_params['contentType'] = params['content_type']
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SharedResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_shares(self, **kwargs):
"""
Gets a list of shares. You must specify at least one filter (e.g. entityId).
Failing to specify a filter will return 400.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_shares(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str entity_id: Filters the shares returned to only the entity specified by the value of this parameter.
:param list[str] expand: Which fields, if any, to expand.
:param int page_size: Page size
:param int page_number: Page number
:return: ShareEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_id', 'expand', 'page_size', 'page_number']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_shares" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/contentmanagement/shares'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'entity_id' in params:
query_params['entityId'] = params['entity_id']
if 'expand' in params:
query_params['expand'] = params['expand']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ShareEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_status(self, **kwargs):
"""
Get a list of statuses for pending operations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_status(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_size: Page size
:param int page_number: Page number
:return: CommandStatusEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page_number']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_status" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/contentmanagement/status'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CommandStatusEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_status_status_id(self, status_id, **kwargs):
"""
Get a status.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_status_status_id(status_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str status_id: Status ID (required)
:return: CommandStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['status_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_status_status_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'status_id' is set
if ('status_id' not in params) or (params['status_id'] is None):
raise ValueError("Missing the required parameter `status_id` when calling `get_contentmanagement_status_status_id`")
resource_path = '/api/v2/contentmanagement/status/{statusId}'.replace('{format}', 'json')
path_params = {}
if 'status_id' in params:
path_params['statusId'] = params['status_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CommandStatus',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_usage(self, **kwargs):
"""
Get usage details.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_usage(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Usage
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_usage" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/contentmanagement/usage'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Usage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_workspace(self, workspace_id, **kwargs):
"""
Get a workspace.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_workspace(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param list[str] expand: Which fields, if any, to expand.
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_contentmanagement_workspace`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Workspace',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_workspace_documents(self, workspace_id, **kwargs):
"""
Get a list of documents.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_workspace_documents(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param list[str] expand: Which fields, if any, to expand.
:param int page_size: Page size
:param int page_number: Page number
:param str sort_by: name or dateCreated
:param str sort_order: ascending or descending
:return: DocumentEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'expand', 'page_size', 'page_number', 'sort_by', 'sort_order']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_workspace_documents" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_contentmanagement_workspace_documents`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/documents'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'sort_by' in params:
query_params['sortBy'] = params['sort_by']
if 'sort_order' in params:
query_params['sortOrder'] = params['sort_order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_workspace_member(self, workspace_id, member_id, **kwargs):
"""
Get a workspace member
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_workspace_member(workspace_id, member_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str member_id: Member ID (required)
:param list[str] expand: Which fields, if any, to expand.
:return: WorkspaceMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'member_id', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_workspace_member" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_contentmanagement_workspace_member`")
# verify the required parameter 'member_id' is set
if ('member_id' not in params) or (params['member_id'] is None):
raise ValueError("Missing the required parameter `member_id` when calling `get_contentmanagement_workspace_member`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/members/{memberId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'member_id' in params:
path_params['memberId'] = params['member_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkspaceMember',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_workspace_members(self, workspace_id, **kwargs):
"""
Get a list workspace members
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_workspace_members(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param int page_size: Page size
:param int page_number: Page number
:param list[str] expand: Which fields, if any, to expand.
:return: WorkspaceMemberEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'page_size', 'page_number', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_workspace_members" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_contentmanagement_workspace_members`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/members'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkspaceMemberEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_workspace_tagvalue(self, workspace_id, tag_id, **kwargs):
"""
Get a workspace tag
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_workspace_tagvalue(workspace_id, tag_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str tag_id: Tag ID (required)
:param list[str] expand: Which fields, if any, to expand.
:return: TagValue
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'tag_id', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_workspace_tagvalue" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_contentmanagement_workspace_tagvalue`")
# verify the required parameter 'tag_id' is set
if ('tag_id' not in params) or (params['tag_id'] is None):
raise ValueError("Missing the required parameter `tag_id` when calling `get_contentmanagement_workspace_tagvalue`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/tagvalues/{tagId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'tag_id' in params:
path_params['tagId'] = params['tag_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TagValue',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_workspace_tagvalues(self, workspace_id, **kwargs):
"""
Get a list of workspace tags
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_workspace_tagvalues(workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str value: filter the list of tags returned
:param int page_size: Page size
:param int page_number: Page number
:param list[str] expand: Which fields, if any, to expand.
:return: TagValueEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'value', 'page_size', 'page_number', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_workspace_tagvalues" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_contentmanagement_workspace_tagvalues`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/tagvalues'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'value' in params:
query_params['value'] = params['value']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TagValueEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_contentmanagement_workspaces(self, **kwargs):
"""
Get a list of workspaces.
Specifying 'content' access will return all workspaces the user has document access to, while 'admin' access will return all group workspaces the user has administrative rights to.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contentmanagement_workspaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_size: Page size
:param int page_number: Page number
:param list[str] access: Requested access level.
:param list[str] expand: Which fields, if any, to expand.
:return: WorkspaceEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page_number', 'access', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contentmanagement_workspaces" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/contentmanagement/workspaces'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'access' in params:
query_params['access'] = params['access']
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkspaceEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_auditquery(self, body, **kwargs):
"""
Query audits
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_auditquery(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ContentQueryRequest body: Allows for a filtered query returning facet information (required)
:return: QueryResults
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_auditquery" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_auditquery`")
resource_path = '/api/v2/contentmanagement/auditquery'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResults',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_document(self, document_id, body, **kwargs):
"""
Update a document.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_document(document_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str document_id: Document ID (required)
:param DocumentUpdate body: Document (required)
:param str expand: Expand some document fields
:param bool override: Override any lock on the document
:return: Document
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document_id', 'body', 'expand', 'override']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_document" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document_id' is set
if ('document_id' not in params) or (params['document_id'] is None):
raise ValueError("Missing the required parameter `document_id` when calling `post_contentmanagement_document`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_document`")
resource_path = '/api/v2/contentmanagement/documents/{documentId}'.replace('{format}', 'json')
path_params = {}
if 'document_id' in params:
path_params['documentId'] = params['document_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
if 'override' in params:
query_params['override'] = params['override']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Document',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_document_content(self, document_id, body, **kwargs):
"""
Replace the contents of a document.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_document_content(document_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str document_id: Document ID (required)
:param ReplaceRequest body: Replace Request (required)
:param bool override: Override any lock on the document
:return: ReplaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['document_id', 'body', 'override']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_document_content" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'document_id' is set
if ('document_id' not in params) or (params['document_id'] is None):
raise ValueError("Missing the required parameter `document_id` when calling `post_contentmanagement_document_content`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_document_content`")
resource_path = '/api/v2/contentmanagement/documents/{documentId}/content'.replace('{format}', 'json')
path_params = {}
if 'document_id' in params:
path_params['documentId'] = params['document_id']
query_params = {}
if 'override' in params:
query_params['override'] = params['override']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReplaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_documents(self, body, **kwargs):
"""
Add a document.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_documents(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param DocumentUpload body: Document (required)
:param str copy_source: Copy a document within a workspace or to a new workspace. Provide a document ID as the copy source.
:param str move_source: Move a document to a new workspace. Provide a document ID as the move source.
:param bool override: Override any lock on the source document
:return: Document
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'copy_source', 'move_source', 'override']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_documents" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_documents`")
resource_path = '/api/v2/contentmanagement/documents'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'copy_source' in params:
query_params['copySource'] = params['copy_source']
if 'move_source' in params:
query_params['moveSource'] = params['move_source']
if 'override' in params:
query_params['override'] = params['override']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Document',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_query(self, body, **kwargs):
"""
Query content
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_query(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param QueryRequest body: Allows for a filtered query returning facet information (required)
:param str expand: Expand some document fields
:return: QueryResults
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_query`")
resource_path = '/api/v2/contentmanagement/query'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResults',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_shares(self, body, **kwargs):
"""
Creates a new share or updates an existing share if the entity has already been shared
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_shares(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CreateShareRequest body: CreateShareRequest - entity id and type and a single member or list of members are required (required)
:return: CreateShareResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_shares" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_shares`")
resource_path = '/api/v2/contentmanagement/shares'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateShareResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_workspace_tagvalues(self, workspace_id, body, **kwargs):
"""
Create a workspace tag
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_workspace_tagvalues(workspace_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param TagValue body: tag (required)
:return: TagValue
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_workspace_tagvalues" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `post_contentmanagement_workspace_tagvalues`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_workspace_tagvalues`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/tagvalues'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TagValue',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_workspace_tagvalues_query(self, workspace_id, body, **kwargs):
"""
Perform a prefix query on tags in the workspace
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_workspace_tagvalues_query(workspace_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param TagQueryRequest body: query (required)
:param list[str] expand: Which fields, if any, to expand.
:return: TagValueEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'body', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_workspace_tagvalues_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `post_contentmanagement_workspace_tagvalues_query`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_workspace_tagvalues_query`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/tagvalues/query'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TagValueEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_contentmanagement_workspaces(self, body, **kwargs):
"""
Create a group workspace
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_contentmanagement_workspaces(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param WorkspaceCreate body: Workspace (required)
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_contentmanagement_workspaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_contentmanagement_workspaces`")
resource_path = '/api/v2/contentmanagement/workspaces'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Workspace',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_contentmanagement_workspace(self, workspace_id, body, **kwargs):
"""
Update a workspace
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_contentmanagement_workspace(workspace_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param Workspace body: Workspace (required)
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_contentmanagement_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `put_contentmanagement_workspace`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `put_contentmanagement_workspace`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Workspace',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_contentmanagement_workspace_member(self, workspace_id, member_id, body, **kwargs):
"""
Add a member to a workspace
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_contentmanagement_workspace_member(workspace_id, member_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str member_id: Member ID (required)
:param WorkspaceMember body: Workspace Member (required)
:return: WorkspaceMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'member_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_contentmanagement_workspace_member" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `put_contentmanagement_workspace_member`")
# verify the required parameter 'member_id' is set
if ('member_id' not in params) or (params['member_id'] is None):
raise ValueError("Missing the required parameter `member_id` when calling `put_contentmanagement_workspace_member`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `put_contentmanagement_workspace_member`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/members/{memberId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'member_id' in params:
path_params['memberId'] = params['member_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkspaceMember',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_contentmanagement_workspace_tagvalue(self, workspace_id, tag_id, body, **kwargs):
"""
Update a workspace tag. Will update all documents with the new tag value.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_contentmanagement_workspace_tagvalue(workspace_id, tag_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str workspace_id: Workspace ID (required)
:param str tag_id: Tag ID (required)
:param TagValue body: Workspace (required)
:return: TagValue
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['workspace_id', 'tag_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_contentmanagement_workspace_tagvalue" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `put_contentmanagement_workspace_tagvalue`")
# verify the required parameter 'tag_id' is set
if ('tag_id' not in params) or (params['tag_id'] is None):
raise ValueError("Missing the required parameter `tag_id` when calling `put_contentmanagement_workspace_tagvalue`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `put_contentmanagement_workspace_tagvalue`")
resource_path = '/api/v2/contentmanagement/workspaces/{workspaceId}/tagvalues/{tagId}'.replace('{format}', 'json')
path_params = {}
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
if 'tag_id' in params:
path_params['tagId'] = params['tag_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TagValue',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
py | b40b701ccd3cfaa12d82f4f776f0cea4cb9538a7 | """Contains code related to the RecMA module."""
# standard
import logging
import time
import datetime
# local
from modules.constants import RUN_SLEEP, BOTTOM, NOT_PARTICIPANT
import modules.constants as constants
from resolve.enums import MessageType
# globals
logger = logging.getLogger(__name__)
class RecSAModule:
"""RecSA module"""
def __init__(self, id, resolver, n, init_config=None):
"""Initializes the module."""
self.resolver = resolver
self.id = id
self.number_of_nodes = n
self.msgs_sent = 0
# Algorithm variables:
self.config = {} # Dictionary where key is an id and value is a config (list)
self.fd = {self.id: self.resolver.fd_get_trusted()} # Dictionary where key is an id and value is a list of trusted ids
self.fd_part = {self.id: []} # Dictionary where key is an id and value is a list of trusted participants
self.echo_part = {}
self.echo_prp = {}
self.echo_all = {}
self.prp = {} # Dictionary where key is an id and value is tuple (phase, set). set='BOTTOM' indicates 'no proposal'
self.alll = {} # Dictionary where key is an id and value is a Boolean
self.all_seen = set() # Set of id k for which p_i received the alll[k] indication
for k in range(self.number_of_nodes):
self.config[k] = constants.NOT_PARTICIPANT
self.prp[k] = constants.DFLT_NTF
self.alll[k] = False
self.echo_part[k] = self.get_fd_part_j(k)
self.echo_prp[k] = constants.DFLT_NTF
self.echo_all[k] = False
# GETTERS for safe access to local dictionary variables:
def get_config_j(self, j):
return self.config[j] if j in self.config.keys() else []
def get_fd_j(self, j):
if j == self.id:
return self.resolver.fd_get_trusted()
else:
return self.fd[j] if j in self.fd.keys() else []
def get_fd_part_j(self, j):
if j == self.id:
fd_part_i = []
for pj in self.get_fd_j(self.id):
if pj in self.config.keys():
if self.get_config_j(pj) != constants.NOT_PARTICIPANT:
fd_part_i.append(pj)
return fd_part_i
else:
return self.fd_part[j] if j in self.fd_part.keys() else []
def get_echo_part_j(self, j):
if j == self.id:
return self.get_fd_part_j(self.id)
else:
return self.echo_part[j] if j in self.echo_part.keys() else []
def get_echo_prp_j(self, j):
if j == self.id:
return self.get_prp_j(self.id)
else:
return self.echo_prp[j] if j in self.echo_prp.keys() else constants.DFLT_NTF
def get_echo_all_j(self, j):
if j == self.id:
return self.get_all_j(self.id)
else:
return self.echo_all[j] if j in self.echo_all.keys() else False
# Assumption: a non-reported all[] value should be treated as False.
def get_prp_j(self, j):
return self.prp[j] if j in self.prp.keys() else constants.DFLT_NTF
def get_all_j(self, j):
return self.alll[j] if j in self.alll.keys() else False
# Assumption: a non-reported all[] value should be treated as False.
# INTERFACE FUNCTIONS:
def get_config(self):
"""Returns the current quorum configuration, i.e. config[i]
May return # if p_i is not a participant or BOTTOM during the process
of a configuration reset
"""
if self.allow_reco():
return self.chs_config()
else:
return self.get_config_j(self.id)
def get_config_app(self):
"""Returns the current quorum configuration (config[i]) to application
Returns config[i] when no reconfiguration occurs or no agreement on
proposal. Once participants agree on proposal, returns proposal set U
current configuration.
"""
if self.degree(self.id) in [0, 1, 2]:
return self.get_config_j(self.id)
else:
return list(set(self.get_config_j(self.id)) | set(self.get_prp_j(self.id)[1]))
def allow_reco(self):
fd_of_trusted = []
part_of_trusted = [set(self.get_fd_part_j(self.id))]
no_reset = True
all_dflt_ntf = True
for j in self.get_fd_j(self.id):
if j != self.id:
fd_of_trusted.append(set(self.get_fd_j(j)))
part_of_j = set(self.get_fd_part_j(j)).union(set(self.get_echo_part_j(j)))
if part_of_j not in part_of_trusted:
part_of_trusted.append(part_of_trusted)
if self.get_config_j(j) == constants.BOTTOM:
no_reset = False
if (self.get_prp_j(j) != constants.DFLT_NTF) or \
(not self.get_all_j(j)):
all_dflt_ntf = False
trusted_by_trusted = False if fd_of_trusted == [] \
else self.id in set.intersection(*fd_of_trusted)
part_stabilized = len(part_of_trusted) == 1
all_part_echo = True
for k in self.get_fd_part_j(self.id):
if not self.echo_fun(k):
all_part_echo = False
return (not self.config_conflict()) and self.all_seen_fun() and \
all_part_echo and trusted_by_trusted and part_stabilized and \
no_reset and all_dflt_ntf
def estab(self, s):
"""Interface for RecMA to request configuration update
Used to replace the configuration by the RecMA module. Proposed set
must be non-empty and not the same as current conf.
"""
logger.info("Running estab(set) with set:", s)
if self.allow_reco() and (set(s) not in [set(), set(self.get_config_j(self.id))]):
logger.info("estab() allowed!")
self.prp[self.id] = (1, s)
self.alll[self.id] = False
self.all_seen = set()
def participate(self):
"""Interface for Joining mechanism to request a join for p_i."""
if self.allow_reco():
self.config[self.id] = self.chs_config()
# MACROS:
def chs_config(self):
"""Returns config whenever there is a single such non-# value.
Returns BOTTOM if no config exists.
"""
conf = set()
for j in self.get_fd_j(self.id):
if self.get_config_j(j) != constants.NOT_PARTICIPANT:
conf |= set(self.get_config_j(j))
if conf == set():
return constants.BOTTOM
else:
return list(conf)
def my_alll(self, k):
"""Returns either the value stored in all[k] or if k == i,
whether there exists p_l one phase "ahead" of p_i
"""
all_k = self.get_all_j(k)
exists_pl_ahead = False
ahead = (self.get_prp_j(self.id)[0] + 1) % 3
for l in self.all_seen:
if self.get_prp_j(l)[0] == ahead:
exists_pl_ahead = True
return all_k or ((k == self.id) and exists_pl_ahead)
def degree(self, k):
"""Calculates the degree of p_k's most recently received notification
Calculated as twice the notification phase plus one whenever all
participants are using the same notification (0 otherwise), where each
notification is a configuration replacement proposal.
"""
one_if_my_all_k = 1 if self.my_alll(k) else 0
return (2 * self.get_prp_j(k)[0]) + one_if_my_all_k
def corr_deg(self, k, k_prime):
"""Tests whether p_k and p_k' have degrees that differ by <= 1
Used when considering operations in mod 6
"""
ok_deg_tups = [{0, 5}, {5, 5}]
for x in range(0, 5):
ok_deg_tups.append({x, x+1})
ok_deg_tups.append({x, x})
return {self.degree(k), self.degree(k_prime)} in ok_deg_tups
def echo_no_all(self, k):
"""Tests whether p_i was acked by all participants for the values it has sent.
Considers just the fields that are related to its own participant set
and notification.
"""
same_fd_part = set(self.get_fd_part_j(self.id)) == set(self.get_echo_part_j(k))
(phase_i, set_i) = self.get_prp_j(self.id)
(phase_k, set_k) = self.get_echo_prp_j(k)
same_prp = (phase_i == phase_k) and (set(set_i) == set(set_k))
return same_fd_part and same_prp
def echo_fun(self, k):
"""Tests whether p_k was acked by all participants for the values it has sent.
Considers the fields that are related to its own participant set
and notification as well as all[].
"""
same_all = self.my_alll(self.id) == self.get_echo_all_j(k)
ok_deg = ((self.degree(k) - self.degree(self.id)) % 6) in {0, 1}
return self.echo_no_all(k) and same_all and ok_deg
def config_set(self, val):
"""Wrapper to modify config of this processor.
Acts as a wrapper function for accessing pi’s local copies of the field config.
This macro also makes sure that there are no (local) active notifications.
"""
for k in range(self.number_of_nodes):
self.config[k] = val
self.prp[k] = constants.DFLT_NTF
logger.info(f"Set config to {self.config}")
def increment(self, prp):
"""Performs the transition between phases of the delicate reconfiguration."""
(prp_phase, prp_set) = prp
if prp_phase == 1:
return ((2, prp_set), False)
elif prp_phase == 2:
return (constants.DFLT_NTF, False)
else:
return (self.get_prp_j(self.id), self.get_all_j(self.id))
def all_seen_fun(self):
"""Tests whether all active participants have noticed that all other participants
have finished the current phase.
"""
return self.get_all_j(self.id) and \
(set(self.get_fd_part_j(self.id)) <= (self.all_seen | {self.id}))
def mod_max(self):
"""Returns maximum phase value of two processors considering mod 3 operations.
Assumes that no two processors in FD[i].part have two notifications that p_i
stores for which the degree differs by more than one.
"""
phs = set()
for k in self.get_fd_part_j(self.id):
phs.add(self.get_prp_j(k)[0])
if (1 in phs) and (2 not in phs) and (self.get_prp_j(self.id)[0] != max(phs)):
self.all_seen = set()
return max(phs)
else:
return self.get_prp_j(self.id)[0]
def max_ntf(self):
"""Selects notification with maximal lexicographical value.
Returns BOTTOM in the absence of notification that is not phase 0 notification.
"""
deg_diffs = set()
for k in self.get_fd_part_j(self.id):
deg_diff = (self.degree(k) - self.degree(self.id)) % 6
deg_diffs.add(deg_diff)
if not (deg_diffs <= {0, 1}):
return self.get_prp_j(self.id)
else:
max_lex_set = constants.BOTTOM
for k in self.get_fd_part_j(self.id):
max_lex_set = self.max_lex(max_lex_set, self.get_prp_j(k)[1])
return (self.mod_max(), max_lex_set)
def run(self, testing=False):
"""The main loop of the Reconfiguration Stability Assurance module"""
# block until system is ready
while not testing and not self.resolver.system_running():
time.sleep(0.1)
while True:
# Update some local variables
# self.fd[self.id] = self.get_fd_j(self.id)
# Algorithm 3.1 in the technical report
# line 22:
trusted = self.get_fd_part_j(self.id)
for k in range(self.number_of_nodes):
if (k not in trusted) and \
((self.get_config_j(k) != constants.NOT_PARTICIPANT) or (self.get_prp_j(k) != constants.DFLT_NTF)):
self.config[k] = constants.NOT_PARTICIPANT
self.prp[k] = constants.DFLT_NTF
self.resolver.fd_reset_monitor(k)
# line 23:
self.prp[self.id] = self.max_ntf()
# line 25:
all_no_all = True
for k in self.get_fd_part_j(self.id):
if not self.echo_no_all(k):
all_no_all = False
self.alll[self.id] = all_no_all
# line 26:
for k in self.get_fd_part_j(self.id):
if self.get_all_j(k):
self.all_seen.add(k)
# line 24:
if self.stale_info_type_1() or \
self.stale_info_type_2() or \
self.stale_info_type_3() or \
self.stale_info_type_4() or \
self.no_participants_and_stable_fd_monitors():
self.config_set(constants.BOTTOM)
# lines 27-32:
if self.no_ntf_arrived():
if self.config_conflict():
logger.debug("Stale info (config conflict) found!")
self.config_set(constants.BOTTOM)
if (self.get_config_j(self.id) == constants.BOTTOM) and self.fds_stabilized():
self.config_set(self.get_fd_j(self.id))
else:
if (self.get_prp_j(self.id)[0] == 2) and self.get_all_j(self.id):
self.config[self.id] = self.get_prp_j(self.id)[1]
if self.all_seen_fun():
echo_fun_all = True
for k in self.get_fd_part_j(self.id):
if not self.echo_fun(k):
echo_fun_all = False
if echo_fun_all:
(self.prp[self.id], self.alll[self.id]) = self.increment(self.get_prp_j(self.id))
self.all_seen = set()
# line 33:
if self.get_config_j(self.id) != constants.NOT_PARTICIPANT:
for j in self.get_fd_j(self.id):
self.send_state(j)
else:
logger.debug(f"Node not a participant, not sending state")
logger.debug(f"Another iteration of main RecSA loop completed")
time.sleep(RUN_SLEEP)
# HELPER FUNCTIONS:
def stale_info_type_1(self):
"""Stale info check - type 1
Tests that all notifications of configuration proposals are valid.
"""
for k in self.prp.keys():
if (self.get_prp_j(k)[0] == 0) and (self.get_prp_j(k)[1] != constants.BOTTOM):
logger.debug("Stale info (type 1) found!")
return True
return False
def stale_info_type_2(self):
"""Stale info check - type 2
Tests that there are no configuration conflicts or an active reset
process
"""
config_values = self.config.values()
bot_exists = constants.BOTTOM in config_values
empty_exists = [] in config_values
if bot_exists or empty_exists:
logger.debug(f"Stale info (type 2) found! Current config: {self.config}")
return bot_exists or empty_exists
def stale_info_type_3(self):
"""Stale info check - type 3
Tests that the phase information, including all_seen, are not out of
synch
"""
type_3_a = False
type_3_b_set = set()
prp_sets = []
exists_phase_2 = False
for k in self.get_fd_part_j(self.id):
if not self.corr_deg(self.id, k):
type_3_a = True
if self.get_prp_j(k)[0] == ((self.get_prp_j(self.id)[0] + 1) % 3):
type_3_b_set.add(k)
prp_k_set = self.get_prp_j(k)[1]
if (prp_k_set != constants.BOTTOM) and \
(set(prp_k_set) not in prp_sets):
prp_sets.append(set(prp_k_set))
if self.get_prp_j(k)[0] == 2:
exists_phase_2 = True
type_3_b = not (type_3_b_set <= self.all_seen)
type_3_c = exists_phase_2 and (len(prp_sets) > 1)
type_3 = type_3_a or type_3_b or type_3_c
if type_3:
logger.debug("Stale info (type 3) found!")
return type_3
def stale_info_type_4(self):
"""Stale info check - type 4
Tests that there are active participants in the config
"""
if self.get_fd_part_j(self.id) == []:
type_4_a = False
else:
type_4_a = True
for k in self.get_fd_part_j(self.id):
different_fd = self.get_fd_j(self.id) != self.get_fd_j(k)
different_fd_part = self.get_fd_part_j(self.id) != self.get_fd_part_j(k)
if different_fd or different_fd_part:
type_4_a = False
type_4_b = self.get_config_j(self.id) not in [constants.BOTTOM, constants.NOT_PARTICIPANT]
type_4_c = True
for k in self.get_fd_part_j(self.id):
if k in self.get_config_j(self.id):
type_4_c = False
type_4 = type_4_a and type_4_b and type_4_c
if type_4:
logger.debug("Stale info (type 4) found!")
return type_4
def no_participants_and_stable_fd_monitors(self):
"""Tests if we are in the special state where there are no participants and all FD monitors are stable"""
if len(self.get_fd_j(self.id)) < 1:
return False
for k in self.get_fd_j(self.id):
if (not self.resolver.fd_stable_monitor(k)) or (self.get_config_j(k) != constants.NOT_PARTICIPANT):
return False
logger.debug("There are no participants and all FD monitors are stable!")
return True
def no_ntf_arrived(self):
ntf_arrived = False
for k in self.get_fd_part_j(self.id):
if self.get_prp_j(k)[0] != 0:
ntf_arrived = True
return not ntf_arrived
def config_conflict(self):
real_configs_found = []
for k in self.get_fd_j(self.id):
if self.get_config_j(k) not in [constants.BOTTOM, constants.NOT_PARTICIPANT]:
config_k = set(self.get_config_j(k))
if config_k not in real_configs_found:
real_configs_found.append(config_k)
return len(real_configs_found) > 1
def fds_stabilized(self):
fd_i = set(self.get_fd_j(self.id))
for j in self.get_fd_j(self.id):
if set(self.fd.get(j, [])) != fd_i:
logger.debug("FDs have not stabilized")
return False
logger.debug("FDs have stabilized!")
return True
def max_lex(self, s1, s2):
if s1 == constants.BOTTOM:
return s2
if s2 == constants.BOTTOM:
return s1
s1_sorted = sorted(s1)
s2_sorted = sorted(s2)
return max(s1_sorted, s2_sorted)
def receive_msg(self, msg):
"""Called whenever a message is received from another processor."""
self.fd[self.id] = self.get_fd_j(self.id)
# Update state values
j = int(msg["sender"])
data = msg["data"]
self.fd[j] = data["fd"]
self.fd_part[j] = data["fd_part"]
self.config[j] = data["config"]
self.prp[j] = data["prp"]
self.alll[j] = data["alll"]
self.echo_part[j] = data["echo_fd_part"]
self.echo_prp[j] = data["echo_prp"]
self.echo_all[j] = data["echo_all"]
def send_state(self, receiver):
data = {
"fd": self.get_fd_j(self.id),
"fd_part": self.get_fd_part_j(self.id),
"config": self.get_config_j(self.id),
"prp": self.get_prp_j(self.id),
"alll": self.my_alll(self.id),
"echo_fd_part": self.get_fd_part_j(receiver),
"echo_prp": self.get_prp_j(receiver),
"echo_all": self.get_all_j(receiver)
}
msg = {
"type": MessageType.RECSA_MESSAGE,
"sender": self.id,
"data": data
}
self.resolver.send_to_node(receiver, msg)
def get_data(self):
"""Called by the API, used to expose data to 3rd party services."""
return {
"fd": self.get_fd_j(self.id),
"fd_part": self.get_fd_part_j(self.id),
"config": self.config,
# "config": self.get_config_j(self.id),
"prp": self.get_prp_j(self.id),
"alll": self.my_alll(self.id)
}
|
py | b40b7056bdc335ef6fcc5a9a0192ebadf194d41b | import wikipedia as wiki
import nltk
from nltk.tokenize import sent_tokenize
import re
import requests
from bs4 import BeautifulSoup
import text2num as t2n
from Quiz import Quiz
from QuestionSentence import QuestionSentence
def dbpedia(q):
q = q.replace(' ', '_')
url = 'http://dbpedia.org/page/{}'.format(q)
r = requests.get(url)
if r.status_code != 200:
return None
contents = r.content.decode('utf8')
soup = BeautifulSoup(contents, 'lxml')
try:
abstract = soup.find('span', attrs={
"xml:lang": "en",
"property": "dbo:abstract"
})
text = abstract.text
return text
except Exception as e:
print(str(e))
return None
class Article():
def __init__(self, name):
self.name = name
self.page = dbpedia(name)
if self.page == None:
return
self.quiz = Quiz([])
self.generate_questions_for(self.page.encode('ascii', 'ignore'))
'''
NOT CURRENTLY USED, but maye be useful at a later point when knowing the
section a question was sourced from might be of use.
'''
# def iterate_sections(self):
# # Iterate through article's sections
# for section in self.page.sections:
# print section
# sec = self.page.section(section).encode('ascii', 'ignore')
# if sec is None:
# continue
# self.generate_questions_for(sec)
'''
tokenizes and chunks a sentence based on a simple grammar
'''
def get_question_data(self, s):
tokens = nltk.word_tokenize(s)
tagged = nltk.pos_tag(tokens)
grammar = """
NUMBER: {<$>*<CD>+<NN>*}
LOCATION: {<IN><NNP>+<,|IN><NNP>+}
PROPER: {<NNP|NNPS><NNP|NNPS>+}
"""
#
# HIT!: {<PROPER><NN>?<VBZ|VBN>+}
# DATE: {<IN>(<$>*<CD>+<NN>*)}
chunker = nltk.RegexpParser(grammar)
result = chunker.parse(tagged)
return result
'''
splits a Wikipedia section into sentences and then chunks/tokenizes each
sentence
'''
def generate_questions_for(self, sec):
# Rid of all parentheses for easier processing
_sec = "".join(re.split('\(',
sec.decode("utf-8").replace(")", "("))[0::2])
for sentence in sent_tokenize(_sec):
qdata = self.get_question_data(sentence)
if len(qdata) >= 75 and len(qdata) <= 150:
qdata = []
self.create_questions(sentence, qdata)
'''
given a setence in chunked and original form, produce the params necessary
to create a Question, and then add that to our Quiz object
'''
def create_questions(self, sentence, chunked):
gaps = []
for word in chunked:
if type(word) != tuple:
target = []
for y in word:
target.append(y[0])
orig_phrase = " ".join(target)
if word.label() == "NUMBER":
modified_phrase = orig_phrase[:]
try:
# convert spelled out word to numerical value
modified_phrase = t2n.text2num(phrase)
except:
try:
test = int(modified_phrase) + \
float(modified_phrase)
except:
# if the word could not be converted and
# was not already numerical, ignore it
continue
if self.probably_range(modified_phrase):
return
gaps.append((word.label(), orig_phrase, modified_phrase))
elif word.label() in ["LOCATION", "PROPER"]:
gaps.append((word.label(), orig_phrase, orig_phrase))
if len(gaps) >= 1 and len(gaps) == len(set(gaps)):
gaps_filtered = [gap for gap in gaps if gap[0]
== 'NUMBER' or gap[0] == 'LOCATION']
if len(gaps_filtered):
self.quiz.add(QuestionSentence(sentence, gaps_filtered))
'''
Wikipedia returns non-hyphenated number ranges, so we need to check for mushed together years
and remove them. Not a complete solution to the problem, but most of the incidents are years
'''
def probably_range(self, val):
s = str(val)
if s.count("19") > 1 or s.count("20") > 1 or (s.count("19") == 1 and s.count("20") == 1):
return True
return False
|
py | b40b70b5bf32ba5e6bd666e71e5eaf0f1fe96912 | import inspect
import logging
import sys
from pathlib import Path
import click
import tomlkit
from tomlkit.items import Comment, Trivia
from tqdm import tqdm
from .errors import TrainingCrash
from .evaluate import evaluate
from .fit import fit_wf
from .io import wf_from_file
from .sampling import LangevinSampler, sample_wf
from .train import train
from .wf import ANSATZES
__all__ = ()
log = logging.getLogger(__name__)
DEEPQMC_DEFAULTS = {
(train, 'sampler_kwargs'): LangevinSampler.from_wf,
(train, 'fit_kwargs'): fit_wf,
(train, 'optimizer_kwargs'): True,
(train, 'lr_scheduler_kwargs'): True,
(LangevinSampler.from_wf, 'kwargs'): LangevinSampler,
(evaluate, 'sampler_kwargs'): (
LangevinSampler.from_wf,
[('n_decorrelate', 4), 'n_discard', 'sample_size'],
),
(evaluate, 'sample_kwargs'): sample_wf,
}
def _get_subkwargs(func, name, mapping):
target = mapping[func, name]
target, override = target if isinstance(target, tuple) else (target, [])
if isinstance(target, dict):
sub_kwargs = {k: collect_kwarg_defaults(v, mapping) for k, v in target.items()}
else:
sub_kwargs = collect_kwarg_defaults(target, mapping)
for x in override:
if isinstance(x, tuple):
key, val = x
sub_kwargs[key] = val
else:
del sub_kwargs[x]
return sub_kwargs
def collect_kwarg_defaults(func, mapping):
kwargs = tomlkit.table()
for p in inspect.signature(func).parameters.values():
if p.name == 'kwargs':
assert p.default is p.empty
assert p.kind is inspect.Parameter.VAR_KEYWORD
sub_kwargs = _get_subkwargs(func, 'kwargs', mapping)
for item in sub_kwargs.value.body:
kwargs.add(*item)
elif p.name.endswith('_kwargs'):
if mapping.get((func, p.name)) is True:
kwargs[p.name] = p.default
else:
assert p.default is None
assert p.kind is inspect.Parameter.KEYWORD_ONLY
sub_kwargs = _get_subkwargs(func, p.name, mapping)
kwargs[p.name] = sub_kwargs
elif p.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD:
assert p.default in (p.empty, p.default)
else:
assert p.kind is inspect.Parameter.KEYWORD_ONLY
if p.default is None:
kwargs.add(Comment(Trivia(comment=f'#: {p.name} = ...')))
else:
try:
kwargs[p.name] = p.default
except ValueError:
print(func, p.name, p.kind, p.default)
raise
return kwargs
class TqdmStream:
def write(self, msg):
tqdm.write(msg, end='')
class CLI(click.Group):
def list_commands(self, ctx):
return self.commands.keys()
def get_command(self, ctx, name):
if name.startswith('extra:'):
from .extra import cli as extra_cli
name = name.split(':', 1)[1]
for attr in dir(extra_cli):
cmd = getattr(extra_cli, attr)
if isinstance(cmd, click.core.Command) and cmd.name == name:
return cmd
return super().get_command(ctx, name)
@click.group(cls=CLI)
@click.option('-v', '--verbose', count=True, help='Increase verbosity.')
@click.option('-q', '--quiet', is_flag=True, help='Suppres all output.')
def cli(verbose, quiet): # noqa: D403
"""DeepQMC runs quantum Monte Carlo with deep neural networks."""
assert not (quiet and verbose)
logging.basicConfig(
style='{',
format='[{asctime}.{msecs:03.0f}] {levelname}:{name}: {message}',
datefmt='%H:%M:%S',
stream=TqdmStream(),
)
if quiet:
level = logging.ERROR
else:
level = [logging.WARNING, logging.INFO, logging.DEBUG][verbose]
logging.getLogger('deepqmc').setLevel(level)
@cli.command()
@click.option(
'--commented', '-c', is_flag=True, help='Comment out all hyperparameters.'
)
def defaults(commented):
"""Print all hyperparameters and their default values.
The hyperparameters are printed in the TOML format that is expected by other
deepqmc commands.
"""
table = tomlkit.table()
table['train_kwargs'] = collect_kwarg_defaults(train, DEEPQMC_DEFAULTS)
table['evaluate_kwargs'] = collect_kwarg_defaults(evaluate, DEEPQMC_DEFAULTS)
for label, ansatz in ANSATZES.items():
table[f'{label}_kwargs'] = collect_kwarg_defaults(ansatz.entry, ansatz.defaults)
lines = tomlkit.dumps(table).split('\n')
if commented:
lines = ['# ' + l if ' = ' in l and l[0] != '#' else l for l in lines]
click.echo('\n'.join(lines), nl=False)
@cli.command('train')
@click.argument('workdir', type=click.Path(exists=True))
@click.option(
'--save-every',
default=100,
show_default=True,
help='Frequency in steps of saving the curent state of the optimization.',
)
@click.option(
'--cuda/--no-cuda',
default=True,
show_default=True,
help='Toggle training on a GPU.',
)
@click.option(
'--max-restarts',
default=3,
show_default=True,
help='Maximum number of attempted restarts before aborting.',
)
@click.option('--hook', is_flag=True, help='Import a deepqmc hook from WORKDIR.')
def train_at(workdir, save_every, cuda, max_restarts, hook):
"""Train an ansatz with variational quantum Monte Carlo.
The calculation details must be specified in a "param.toml" file in WORKDIR,
which must contain at least the keywords "system" and "ansatz", and
optionally any keywords printed by the "defaults" command.
"""
workdir = Path(workdir).resolve()
if hook:
log.info('Importing a dlqmc hook')
sys.path.append(str(workdir))
import dlqmc_hook # noqa: F401
state = None
for attempt in range(max_restarts + 1):
log.info('Initializing a new wave function')
wf, params, state_from_file = wf_from_file(workdir)
state = state or state_from_file
if cuda:
log.info('Moving to GPU...')
wf.cuda()
log.info('Moved to GPU')
try:
train(
wf,
workdir=workdir,
state=state,
save_every=save_every,
**params.get('train_kwargs', {}),
)
except TrainingCrash as e:
log.warning(f'Caught exception: {e.__cause__!r}')
state = e.state
if attempt == max_restarts:
log.error('Maximum number of restarts reached')
break
if state:
log.warning(f'Restarting from step {state["step"]}')
else:
log.warning('Restarting from beginning')
else:
break
@cli.command('evaluate')
@click.argument('workdir', type=click.Path(exists=True))
@click.option(
'--cuda/--no-cuda',
default=True,
show_default=True,
help='Toggle training on a GPU.',
)
@click.option(
'--store-steps/--no-store-steps',
default=False,
show_default=True,
help='Toggle storing of individual sampling steps.',
)
@click.option('--hook', is_flag=True)
def evaluate_at(workdir, cuda, store_steps, hook):
"""Estimate total energy of an ansatz via Monte Carlo sampling.
The calculation details must be specified in a "param.toml" file in WORKDIR,
which must contain at least the keywords "system" and "ansatz", and
optionally any keywords printed by the "defaults" command. The wave function
ansatz must be stored in a "state.pt" file in WORKDIR, which was generated
with the "train" command.
"""
workdir = Path(workdir).resolve()
if hook:
sys.path.append(str(workdir))
import dlqmc_hook # noqa: F401
wf, params, state = wf_from_file(workdir)
if state:
wf.load_state_dict(state['wf'])
if cuda:
wf.cuda()
evaluate(
wf,
store_steps=store_steps,
workdir=workdir,
**params.get('evaluate_kwargs', {}),
)
|
pyde | b40b723a413e4c1744651b5727f161ce60a3abf3 | """
I Like Icosahedra
by Ira Greenberg.
This example plots icosahedra. The Icosahdron is a regular polyhedron composed
of twenty equalateral triangles.
Slightly simplified to reduce the complexity of the Shape3D class and remove
the unused Dimension3D class.
"""
from icosahedron import Icosahedron
ico1 = None
ico2 = None
ico3 = None
# Pre-calculate some global values
halfWidth = None
halfHeight = None
ico3XOffset = None
ico1XOffset = None
ico1XRot = PI / 185
ico1YRot = PI / -200
ico2XRot = PI / 200
ico2YRot = PI / 300
icoX3Rot = PI / -200
icoY3Rot = PI / 200
def setup():
size(640, 360, P3D)
halfWidth = width / 2
halfHeight = height / 2
ico3XOffset = width / 3.5
ico1XOffset = ico3XOffset * -1
ico1 = Icosahedron(75)
ico2 = Icosahedron(75)
ico3 = Icosahedron(75)
def draw():
background(0)
lights()
translate(halfWidth, halfHeight)
with pushMatrix():
translate(ico1XOffset, 0)
rotateX(frameCount * ico1XRot)
rotateY(frameCount * ico1YRot)
stroke(170, 0, 0)
noFill()
ico1.create()
with pushMatrix():
rotateX(frameCount * ico2XRot)
rotateY(frameCount * ico2YRot)
stroke(150, 0, 180)
fill(170, 170, 0)
ico2.create()
with pushMatrix():
translate(ico3XOffset, 0)
rotateX(frameCount * icoX3Rot)
rotateY(frameCount * icoY3Rot)
noStroke()
fill(0, 0, 185)
ico3.create()
|
py | b40b7278de38a7632f982cc8262e2213c59bee89 | #
#
# This file comes from https://github.com/mustafaahci/FramelessWindow
#
#
import ctypes
import win32api
import win32gui
from ctypes.wintypes import LONG
from win32con import PAN_SERIF_SQUARE, WM_NCCALCSIZE, GWL_STYLE, WM_NCHITTEST, WS_MAXIMIZEBOX, WS_THICKFRAME, \
WS_CAPTION, HTTOPLEFT, HTBOTTOMRIGHT, HTTOPRIGHT, HTBOTTOMLEFT, \
HTTOP, HTBOTTOM, HTLEFT, HTRIGHT, HTCAPTION, WS_POPUP, WS_SYSMENU, WS_MINIMIZEBOX
from PySide2.QtCore import Qt
from PySide2.QtGui import QColor
from PySide2.QtWidgets import QMainWindow, QWidget, QPushButton, QApplication, QVBoxLayout, QSizePolicy, QHBoxLayout
from PySide2.QtWinExtras import QtWin
class QFramelessWindow(QMainWindow):
BORDER_WIDTH = 10
def __init__(self):
self.updateSize = True
self.settingsWidget = QWidget()
super().__init__()
self.hwnd = self.winId().__int__()
self.setObjectName("QFramelessWindow")
window_style = win32gui.GetWindowLong(self.hwnd, GWL_STYLE)
win32gui.SetWindowLong(self.hwnd, GWL_STYLE, window_style | WS_POPUP | WS_THICKFRAME | WS_CAPTION | WS_SYSMENU | WS_MAXIMIZEBOX | WS_MINIMIZEBOX)
if QtWin.isCompositionEnabled():
# Aero Shadow
QtWin.extendFrameIntoClientArea(self, -1, -1, -1, -1)
else:
QtWin.resetExtendedFrame(self)
# Window Widgets
self.resize(800, 600)
self._layout = QVBoxLayout()
self._layout.setContentsMargins(0, 0, 0, 0)
self._layout.setSpacing(0)
# main widget is here
self.mainWidget = QWidget()
self.mainWidgetLayout = QVBoxLayout()
self.mainWidgetLayout.setContentsMargins(0, 0, 0, 0)
self.mainWidget.setLayout(self.mainWidgetLayout)
self.mainWidget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
# set background color
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QColor("#272727"))
self.setPalette(p)
self._layout.addWidget(self.mainWidget)
self.setLayout(self._layout)
def changeEvent(self, event):
if event.type() == event.WindowStateChange:
if self.windowState() & Qt.WindowMaximized:
margin = abs(self.mapToGlobal(self.rect().topLeft()).y())
self.setContentsMargins(margin, margin, margin, margin)
else:
self.setContentsMargins(0, 0, 0, 0)
return super(QFramelessWindow, self).changeEvent(event)
def nativeEvent(self, event, message):
return_value, result = super().nativeEvent(event, message)
# if you use Windows OS
if event == b'windows_generic_MSG':
msg = ctypes.wintypes.MSG.from_address(message.__int__())
# Get the coordinates when the mouse moves.
x = win32api.LOWORD(LONG(msg.lParam).value)
# converted an unsigned int to int (for dual monitor issue)
if x & 32768: x = x | -65536
y = win32api.HIWORD(LONG(msg.lParam).value)
if y & 32768: y = y | -65536
x -= self.frameGeometry().x()
y -= self.frameGeometry().y()
# Determine whether there are other controls(i.e. widgets etc.) at the mouse position.
if self.childAt(x, y) is not None and self.childAt(x, y) is not self.findChild(QWidget, "ControlWidget"):
# passing
if self.width() - self.BORDER_WIDTH > x > self.BORDER_WIDTH and y < self.height() - self.BORDER_WIDTH:
return return_value, result
if msg.message == WM_NCCALCSIZE:
# Remove system title
return True, 0
if msg.message == WM_NCHITTEST:
w, h = self.width(), self.height()
lx = x < self.BORDER_WIDTH
rx = x > w - self.BORDER_WIDTH
ty = y < self.BORDER_WIDTH
by = y > h - self.BORDER_WIDTH
if lx and ty:
return True, HTTOPLEFT
if rx and by:
return True, HTBOTTOMRIGHT
if rx and ty:
return True, HTTOPRIGHT
if lx and by:
return True, HTBOTTOMLEFT
if ty:
return True, HTTOP
if by:
return True, HTBOTTOM
if lx:
return True, HTLEFT
if rx:
return True, HTRIGHT
# Title
return True, HTCAPTION
return return_value, result
def moveEvent(self, event) -> None:
self.repaint()
return super().moveEvent(event)
if __name__ == "__main__":
import __init__ |
py | b40b735ffc270621f34684f307a97de193b1d1f5 | import json
from requests import Request, Session
crypto_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
parameters = {
'slug': 'stellar',
'convert': 'USD'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': '0f0596ce-3ff5-42c2-86ed-4d2874c65fdd'
}
session = Session()
session.headers.update(headers)
response = session.get(crypto_url, params=parameters)
price_data_jason = float((json.loads(response.text)['data']['512']['quote']['USD']['price']))
print(price_data_jason)
|
py | b40b73b44baf2eb60359ad001080f75d50f6c3c2 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Collection, Iterator
def parse_item(field_op: str, value: Any) -> Specification:
if "__" not in field_op:
from fractal.core.specifications.generic.operators import EqualsSpecification
return EqualsSpecification(field_op, value)
else:
field, op = field_op.split("__")
specification = None
if op == "equals":
from fractal.core.specifications.generic.operators import (
EqualsSpecification as specification,
)
elif op == "in":
from fractal.core.specifications.generic.operators import (
InSpecification as specification,
)
elif op == "contains":
from fractal.core.specifications.generic.operators import (
ContainsSpecification as specification,
)
elif op == "lt":
from fractal.core.specifications.generic.operators import (
LessThanSpecification as specification,
)
elif op == "lte":
from fractal.core.specifications.generic.operators import (
LessThanEqualSpecification as specification,
)
elif op == "gt":
from fractal.core.specifications.generic.operators import (
GreaterThanSpecification as specification,
)
elif op == "gte":
from fractal.core.specifications.generic.operators import (
GreaterThanEqualSpecification as specification,
)
if specification:
return specification(field, value)
def parse(**kwargs) -> Iterator[Specification]:
for field_op, value in kwargs.items():
yield parse_item(field_op, value)
class Specification(ABC):
@abstractmethod
def is_satisfied_by(self, obj: Any) -> bool:
raise NotImplementedError
@abstractmethod
def to_collection(self) -> Collection:
raise NotImplementedError
def And(self, specification: "Specification") -> "Specification":
from fractal.core.specifications.generic.collections import AndSpecification
return AndSpecification([self, specification])
def Or(self, specification: "Specification") -> "Specification":
from fractal.core.specifications.generic.collections import OrSpecification
return OrSpecification([self, specification])
def __str__(self):
return self.__class__.__name__
@staticmethod
def Not(specification: "Specification") -> "Specification":
from fractal.core.specifications.generic.operators import NotSpecification
return NotSpecification(specification)
@staticmethod
def parse(**kwargs):
specs = list(parse(**kwargs))
if len(specs) > 1:
from fractal.core.specifications.generic.collections import AndSpecification
return AndSpecification(specs)
elif len(specs) == 1:
return specs[0]
return None
|
py | b40b73b53d93e5629a043bac771aaab04498b817 | import logging
# Get logging configurations
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - Line: %(lineno)d - Path: %(name)s - Module: %(module)s.py - %(levelname)s - %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p')
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger().setLevel(logging.WARNING)
import platform
from .config import Config
from pyrogram import Client, __version__, idle
from pyromod import listen
def main():
Renamer = Client("Compass_Botz",
bot_token=Config.BOT_TOKEN,
api_id=Config.API_ID,
api_hash=Config.API_HASH,
plugins=dict(root="renamer/plugins"),
workers=100)
Renamer.start()
me = Renamer.get_me()
startup_msg = f"⚡ Boot Successfull at @{me.username}\n"
startup_msg += f"Pyrogram Version: v{__version__}\n"
startup_msg += f"Python Version: v{platform.python_version()}\n\n"
startup_msg += "Deploying has been done. Follow Our Channel @Compass_Botz"
print(startup_msg)
idle()
Renamer.stop()
print("⛔ Bot Stopped ⛔")
if __name__ == "__main__":
main()
|
py | b40b748f79f2b81347ede6fd6a47f6ca454a0b19 | import unittest
from conans.client.graph.graph_builder import DepsGraph, Node
from conans.model.conan_file import ConanFile
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import TestBufferConanOutput
class DepsGraphTest(unittest.TestCase):
def test_node(self):
""" nodes are different even if contain same values,
so they can be repeated if necessary in the graph (common
static libraries)
"""
ref1 = ConanFileReference.loads("Hello/0.1@user/stable")
ref2 = ConanFileReference.loads("Hello/0.1@user/stable")
conanfile1 = ConanFile(TestBufferConanOutput(), None)
conanfile2 = ConanFile(TestBufferConanOutput(), None)
n1 = Node(ref1, conanfile1)
n2 = Node(ref2, conanfile2)
self.assertNotEqual(n1, n2)
def basic_levels_test(self):
ref1 = ConanFileReference.loads("Hello/1.0@user/stable")
ref2 = ConanFileReference.loads("Hello/2.0@user/stable")
ref3 = ConanFileReference.loads("Hello/3.0@user/stable")
deps = DepsGraph()
n1 = Node(ref1, 1)
n2 = Node(ref2, 2)
n3 = Node(ref3, 3)
deps.add_node(n1)
deps.add_node(n2)
deps.add_node(n3)
deps.add_edge(n1, n2)
deps.add_edge(n2, n3)
self.assertEqual([[n3], [n2], [n1]], deps.by_levels())
def multi_levels_test(self):
ref1 = ConanFileReference.loads("Hello/1.0@user/stable")
ref2 = ConanFileReference.loads("Hello/2.0@user/stable")
ref31 = ConanFileReference.loads("Hello/31.0@user/stable")
ref32 = ConanFileReference.loads("Hello/32.0@user/stable")
deps = DepsGraph()
n1 = Node(ref1, 1)
n2 = Node(ref2, 2)
n31 = Node(ref31, 31)
n32 = Node(ref32, 32)
deps.add_node(n1)
deps.add_node(n2)
deps.add_node(n32)
deps.add_node(n31)
deps.add_edge(n1, n2)
deps.add_edge(n2, n31)
deps.add_edge(n2, n32)
self.assertEqual([[n31, n32], [n2], [n1]], deps.by_levels())
def multi_levels_test2(self):
ref1 = ConanFileReference.loads("Hello/1.0@user/stable")
ref2 = ConanFileReference.loads("Hello/2.0@user/stable")
ref5 = ConanFileReference.loads("Hello/5.0@user/stable")
ref31 = ConanFileReference.loads("Hello/31.0@user/stable")
ref32 = ConanFileReference.loads("Hello/32.0@user/stable")
deps = DepsGraph()
n1 = Node(ref1, 1)
n2 = Node(ref2, 2)
n5 = Node(ref5, 5)
n31 = Node(ref31, 31)
n32 = Node(ref32, 32)
deps.add_node(n1)
deps.add_node(n5)
deps.add_node(n2)
deps.add_node(n32)
deps.add_node(n31)
deps.add_edge(n1, n2)
deps.add_edge(n1, n5)
deps.add_edge(n2, n31)
deps.add_edge(n2, n32)
self.assertEqual([[n5, n31, n32], [n2], [n1]], deps.by_levels())
def multi_levels_test3(self):
ref1 = ConanFileReference.loads("Hello/1.0@user/stable")
ref2 = ConanFileReference.loads("Hello/2.0@user/stable")
ref5 = ConanFileReference.loads("Hello/5.0@user/stable")
ref31 = ConanFileReference.loads("Hello/31.0@user/stable")
ref32 = ConanFileReference.loads("Hello/32.0@user/stable")
deps = DepsGraph()
n1 = Node(ref1, 1)
n2 = Node(ref2, 2)
n5 = Node(ref5, 5)
n31 = Node(ref31, 31)
n32 = Node(ref32, 32)
deps.add_node(n1)
deps.add_node(n5)
deps.add_node(n2)
deps.add_node(n32)
deps.add_node(n31)
deps.add_edge(n1, n2)
deps.add_edge(n1, n5)
deps.add_edge(n2, n31)
deps.add_edge(n2, n32)
deps.add_edge(n32, n5)
self.assertEqual([[n5, n31], [n32], [n2], [n1]], deps.by_levels())
|
py | b40b74d6717ea53fc91dd91a2d20a25fbdecb357 | # -*- coding: utf-8 -*-
import time
import math
import random
import numpy as np
from dramkit.gentools import isnull
from dramkit.optimizer.utils_heuristic import rand_init
def woa(objf, func_opter_parms):
'''
鲸鱼优化算法(Whale Optimization Algorithm) WOA
TODO
----
目前仅考虑自变量连续实数情况,以后可增加自变量为离散的情况
Parameters
----------
objf : function
目标函数。注:须事先转化为求极小值问题
func_opter_parms : FuncOpterInfo
:class:`dramkit.optimizer.utils_heuristic.FuncOpterInfo` 类,
须设置parms_func、parms_opter、parms_log
| parms_func为目标函数参数信息dict,key须包含:
| x_lb: 自变量每个维度取值下界,list或数值,为list时长度应等于dim
| x_ub: 自变量每个维度取值上界,list或数值,为list时长度应等于dim
| dim: 自变量维度数
| kwargs: 目标函数接收的其它参数
| parms_opter: 优化函数参数信息dict,key须包含:
| popsize: 群体数量(每轮迭代的样本数量)
| max_iter: 最大迭代寻优次数
| parms_log: 日志参数信息dict,key须包含:
| logger: 日志记录器
| nshow: 若为整数,则每隔nshow轮日志输出当前最优目标函数值
Returns
-------
func_opter_parms : FuncOpterInfo
更新优化过程之后的func_opter_parms
References
----------
- WOA鲸鱼优化算法.pdf
- https://github.com/7ossam81/EvoloPy
'''
# 参数提取
opter_name = func_opter_parms.parms_opter['opter_name']
if opter_name == '' or isnull(opter_name):
opter_name = 'woa'
func_opter_parms.parms_opter['opter_name'] = opter_name
# 目标函数参数
x_lb = func_opter_parms.parms_func['x_lb']
x_ub = func_opter_parms.parms_func['x_ub']
dim = func_opter_parms.parms_func['dim']
kwargs = func_opter_parms.parms_func['kwargs']
# 优化器参数
popsize = func_opter_parms.parms_opter['popsize']
max_iter = func_opter_parms.parms_opter['max_iter']
# 日志参数
logger = func_opter_parms.parms_log['logger']
nshow = func_opter_parms.parms_log['nshow']
# 时间记录
strt_tm = time.time()
func_opter_parms.set_start_time(time.strftime('%Y-%m-%d %H:%M:%S'))
# 边界统一为列表
if not isinstance(x_lb, list):
x_lb = [x_lb] * dim
if not isinstance(x_ub, list):
x_ub = [x_ub] * dim
# 初始化Leader
LeaderPos = np.zeros(dim)
LeaderVal = float('inf')
# 初始化所有个体|样本
pos = rand_init(popsize, dim, x_lb, x_ub) # 样本(个体)随机初始化
# 保存收敛过程
convergence_curve = np.zeros(max_iter) # 全局最优值
convergence_curve_mean = np.zeros(max_iter) # 平均值
# 迭代寻优
for t in range(0, max_iter):
# 位置过界处理
pos = np.clip(pos, x_lb, x_ub)
fvals_mean = 0
for i in range(0, popsize):
fval = objf(pos[i, :], **kwargs) # 目标函数值
fvals_mean = (fvals_mean*i + fval) / (i+1)
# 更新Leader(全局最优解)
if fval < LeaderVal:
LeaderVal = fval
LeaderPos = pos[i, :].copy()
# 更新所有个体|样本
a = 2 - t * (2 / max_iter) # a从2线性衰减到0
a2 = -1 + t * (-1 / max_iter) # a2从-1线型衰减到-2
for i in range(0, popsize):
r1 = random.random() # r1和r2取(0, 1)随机数
r2 = random.random()
A = 2 * a * r1 - a # WOA鲸鱼优化算法.pdf (2.3)
C = 2 * r2 # WOA鲸鱼优化算法.pdf (2.4)
b = 1 # parameters in WOA鲸鱼优化算法.pdf (2.5)
l = (a2 - 1) * random.random() + 1 # parameters in WOA鲸鱼优化算法.pdf (2.5) ?
p = random.random() # p in WOA鲸鱼优化算法.pdf (2.6)
for j in range(0, dim):
if p < 0.5:
if abs(A) >= 1:
RandLeaderIdx = math.floor(popsize * random.random())
Xrand = pos[RandLeaderIdx, :]
D_Xrand = abs(C * Xrand[j] - pos[i, j])
pos[i, j] = Xrand[j] - A * D_Xrand
elif abs(A) < 1:
D_Leader = abs(C * LeaderPos[j] - pos[i, j])
pos[i, j] = LeaderPos[j] - A * D_Leader
elif p >= 0.5:
Dis2Leader = abs(LeaderPos[j] - pos[i, j])
# WOA鲸鱼优化算法.pdf (2.5)
tmp1 = Dis2Leader * math.exp(b * l)
tmp2 = math.cos(l*2*math.pi)
pos[i, j] = tmp1 * tmp2 + LeaderPos[j]
# 每轮迭代都保存最优目标值
convergence_curve[t] = LeaderVal
convergence_curve_mean[t] = fvals_mean
if nshow:
if (t+1) % nshow ==0:
opter_name = func_opter_parms.parms_opter['opter_name']
func_name = func_opter_parms.parms_func['func_name']
logger.info('{} for {}, iter: {}, '.format(opter_name, func_name, t+1) + \
'best fval: {}'.format(LeaderVal))
# 更新func_opter_parms
end_tm = time.time()
func_opter_parms.set_end_time(time.strftime('%Y-%m-%d %H:%M:%S'))
func_opter_parms.set_exe_time(end_tm-strt_tm)
func_opter_parms.set_convergence_curve(convergence_curve)
func_opter_parms.set_convergence_curve_mean(convergence_curve_mean)
func_opter_parms.set_best_val(LeaderVal)
func_opter_parms.set_best_x(LeaderPos)
return func_opter_parms
if __name__ == '__main__':
import pandas as pd
from dramkit.optimizer.base_funcs import TestFuncs
from dramkit.optimizer.utils_heuristic import FuncOpterInfo
from dramkit import plot_series, simple_logger
from dramkit.logtools.logger_general import get_logger
from dramkit.logtools.utils_logger import close_log_file
strt_tm = time.time()
objf = TestFuncs.ackley2
parms_func = {'func_name': objf.__name__,
'x_lb': -10, 'x_ub': 10, 'dim': 10, 'kwargs': {}}
parms_opter = {'opter_name': 'woa-test',
'popsize': 20, 'max_iter': 1000}
# logger = simple_logger()
logger = get_logger('./test/log/woa_test.txt', screen_show=True)
# parms_log = {'logger': logger, 'nshow': 10}
parms_log = {'logger': logger, 'nshow': 100}
func_opter_parms = FuncOpterInfo(parms_func, parms_opter, parms_log)
func_opter_parms = woa(objf, func_opter_parms)
vals = pd.DataFrame({'fval_best': func_opter_parms.convergence_curve,
'fval_mean': func_opter_parms.convergence_curve_mean})
plot_series(vals, {'fval_best': '-r', 'fval_mean': '-b'}, figsize=(10, 6))
best_x = func_opter_parms.best_x
func_opter_parms.parms_log['logger'].info('best x: {}'.format(best_x))
close_log_file(logger)
print('used time: {}s.'.format(round(time.time()-strt_tm, 6)))
|
py | b40b753d23713ef57060a6c80c4527c7bef69dda | import bpy
import random
import math
def purge_orphans():
if bpy.app.version >= (3, 0, 0):
bpy.ops.outliner.orphans_purge(
do_local_ids=True, do_linked_ids=True, do_recursive=True
)
else:
# call purge_orphans() recursively until there are no more orphan data blocks to purge
result = bpy.ops.outliner.orphans_purge()
if result.pop() != "CANCELLED":
purge_orphans()
def clean_scene():
"""
Removing all of the objects, collection, materials, particles,
textures, images, curves, meshes, actions, nodes, and worlds from the scene
"""
if bpy.context.active_object and bpy.context.active_object.mode == "EDIT":
bpy.ops.object.editmode_toggle()
for obj in bpy.data.objects:
obj.hide_set(False)
obj.hide_select = False
obj.hide_viewport = False
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.delete()
collection_names = [col.name for col in bpy.data.collections]
for name in collection_names:
bpy.data.collections.remove(bpy.data.collections[name])
# in the case when you modify the world shader
world_names = [world.name for world in bpy.data.worlds]
for name in world_names:
bpy.data.worlds.remove(bpy.data.worlds[name])
# create a new world data block
bpy.ops.world.new()
bpy.context.scene.world = bpy.data.worlds["World"]
purge_orphans()
##########################################################
# _____ _ _ _ _____
# | ___| | | _| || |_ |____ |
# | |____ ____ _ _ __ ___ _ __ | | ___ |_ __ _| / /
# | __\ \/ / _` | '_ ` _ \| '_ \| |/ _ \ _| || |_ \ \
# | |___> < (_| | | | | | | |_) | | __/ |_ __ _| .___/ /
# \____/_/\_\__,_|_| |_| |_| .__/|_|\___| |_||_| \____/
# | |
# |_|
##########################################################
clean_scene()
max_rotation = math.radians(360)
for i in range(33):
random_size = random.uniform(0.1, 2.0)
random_location = (
random.uniform(-5, 5),
random.uniform(-5, 5),
random.uniform(-5, 5),
)
random_rotation = (
random.uniform(-max_rotation, max_rotation),
random.uniform(-max_rotation, max_rotation),
random.uniform(-max_rotation, max_rotation),
)
bpy.ops.mesh.primitive_monkey_add(
size=random_size, location=random_location, rotation=random_rotation
)
material = bpy.data.materials.new(name=f"monkey_material_{i}")
material.use_nodes = True
bsdf_node = material.node_tree.nodes["Principled BSDF"]
red = random.random()
green = random.random()
blue = random.random()
bsdf_node.inputs["Base Color"].default_value = (red, green, blue, 1)
active_object = bpy.context.active_object
active_object.data.materials.append(material)
|
py | b40b758854c964613dc64e857a331da3b198ea45 | import websocket
import _thread
import time
import krpc
conn = krpc.connect(
name="KRPC Python Telemetry Server",
address="127.0.0.1",
rpc_port=50000, stream_port=50001
)
print(conn.krpc.get_status().version)
vessel = conn.space_center.active_vessel
flight_info = vessel.flight()
refframe = vessel.orbit.body.reference_frame
position = conn.add_stream(vessel.position, refframe)
altitude = conn.add_stream(getattr, flight_info, 'mean_altitude')
mass = conn.add_stream(getattr, vessel, 'mass')
speed = conn.add_stream(getattr, flight_info, 'speed')
thrust = conn.add_stream(getattr, vessel, 'thrust')
qPressure = conn.add_stream(getattr, flight_info, 'dynamic_pressure')
tplus = conn.add_stream(getattr, vessel, 'met')
def on_message(ws, message):
print(message)
def on_error(ws, error):
print(error)
def on_close(ws, close_status_code, close_msg):
print("### closed ###")
def on_open(ws):
def run(*args):
i = 0
while(1):
time.sleep(1)
if(str(conn.krpc.current_game_scene) == 'GameScene.flight'):
if(tplus() != 0):
ws.send('{ "height":' + str(altitude()) +
', "tplus": ' + str(int(tplus())) + ', "mass":' + str(int(mass())) +
',"thrust":' + str(int(thrust())) + ', "speed": ' + str(speed()) +
', "dynamic_pressure":' + str(int(qPressure())) +
'}')
i = i + 1
time.sleep(1)
_thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://192.168.1.8:3040",
on_open=on_open,
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.run_forever()
|
py | b40b763bb89ed481bbfd6ab1117920192eda7b09 | # Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .pid import PID
|
py | b40b78fc1df71aa2a32bac7b0e08cd4181aa2389 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "namubufferi.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | b40b79cfa9376259c0bf8ca7ae98566918c28c12 | import FWCore.ParameterSet.Config as cms
import RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi
interestingEcalDetIdEB = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "hybridSuperClusters:hybridBarrelBasicClusters",
recHitsLabel = "ecalRecHit:EcalRecHitsEB"
)
interestingEcalDetIdEBU = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "hybridSuperClusters:uncleanOnlyHybridBarrelBasicClusters",
recHitsLabel = "ecalRecHit:EcalRecHitsEB"
)
interestingEcalDetIdEE = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "multi5x5SuperClusters:multi5x5EndcapBasicClusters",
recHitsLabel = "ecalRecHit:EcalRecHitsEE"
)
interestingEcalDetIdPFEB = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowSuperClusterECAL:particleFlowBasicClusterECALBarrel",
recHitsLabel = "ecalRecHit:EcalRecHitsEB"
)
interestingEcalDetIdPFEE = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowSuperClusterECAL:particleFlowBasicClusterECALEndcap",
recHitsLabel = "ecalRecHit:EcalRecHitsEE"
)
interestingEcalDetIdPFES = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowSuperClusterECAL:particleFlowBasicClusterECALPreshower",
recHitsLabel = "ecalPreshowerRecHit:EcalRecHitsES",
severityLevel = -1,
keepNextToDead = False,
keepNextToBoundary = False
)
interestingEcalDetIdOOTPFEB = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowSuperClusterOOTECAL:particleFlowBasicClusterOOTECALBarrel",
recHitsLabel = "ecalRecHit:EcalRecHitsEB"
)
interestingEcalDetIdOOTPFEE = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowSuperClusterOOTECAL:particleFlowBasicClusterOOTECALEndcap",
recHitsLabel = "ecalRecHit:EcalRecHitsEE"
)
interestingEcalDetIdOOTPFES = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowSuperClusterOOTECAL:particleFlowBasicClusterOOTECALPreshower",
recHitsLabel = "ecalPreshowerRecHit:EcalRecHitsES",
severityLevel = -1,
keepNextToDead = False,
keepNextToBoundary = False
)
interestingEcalDetIdRefinedEB = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowEGamma:EBEEClusters",
recHitsLabel = "ecalRecHit:EcalRecHitsEB"
)
interestingEcalDetIdRefinedEE = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowEGamma:EBEEClusters",
recHitsLabel = "ecalRecHit:EcalRecHitsEE"
)
interestingEcalDetIdRefinedES = RecoEcal.EgammaClusterProducers.interestingDetIdCollectionProducer_cfi.interestingDetIdCollectionProducer.clone(
basicClustersLabel = "particleFlowEGamma:ESClusters",
recHitsLabel = "ecalPreshowerRecHit:EcalRecHitsES",
severityLevel = -1,
keepNextToDead = False,
keepNextToBoundary = False
)
# rechits associated to high pt tracks for HSCP
from TrackingTools.TrackAssociator.default_cfi import TrackAssociatorParameterBlock
interestingTrackEcalDetIds = cms.EDProducer('InterestingTrackEcalDetIdProducer',
TrackAssociatorParameterBlock,
TrackCollection = cms.InputTag("generalTracks"),
MinTrackPt = cms.double(50.0)
)
reducedEcalRecHitsEB = cms.EDProducer("ReducedRecHitCollectionProducer",
recHitsLabel = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
interestingDetIdCollections = cms.VInputTag(
# ecal
cms.InputTag("interestingEcalDetIdEB"),
cms.InputTag("interestingEcalDetIdEBU"),
#ged
cms.InputTag("interestingEcalDetIdPFEB"),
cms.InputTag("interestingEcalDetIdRefinedEB"),
# oot
cms.InputTag("interestingEcalDetIdOOTPFEB"),
# egamma
cms.InputTag("interestingGedEleIsoDetIdEB"),
cms.InputTag("interestingGedGamIsoDetIdEB"),
cms.InputTag("interestingOotGamIsoDetIdEB"),
cms.InputTag("interestingGamIsoDetIdEB"),
# tau
#cms.InputTag("caloRecoTauProducer"),
# muons
cms.InputTag("muonEcalDetIds"),
# high pt tracks
cms.InputTag("interestingTrackEcalDetIds")
),
reducedHitsCollection = cms.string('')
)
reducedEcalRecHitsEE = cms.EDProducer("ReducedRecHitCollectionProducer",
recHitsLabel = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
interestingDetIdCollections = cms.VInputTag(
# ecal
cms.InputTag("interestingEcalDetIdEE"),
#ged
cms.InputTag("interestingEcalDetIdPFEE"),
cms.InputTag("interestingEcalDetIdRefinedEE"),
# oot
cms.InputTag("interestingEcalDetIdOOTPFEE"),
# egamma
cms.InputTag("interestingGedEleIsoDetIdEE"),
cms.InputTag("interestingGedGamIsoDetIdEE"),
cms.InputTag("interestingOotGamIsoDetIdEE"),
cms.InputTag("interestingGamIsoDetIdEE"),
# tau
#cms.InputTag("caloRecoTauProducer"),
# muons
cms.InputTag("muonEcalDetIds"),
# high pt tracks
cms.InputTag("interestingTrackEcalDetIds")
),
reducedHitsCollection = cms.string('')
)
reducedEcalRecHitsES = cms.EDProducer("ReducedESRecHitCollectionProducer",
scEtThreshold = cms.double(15),
EcalRecHitCollectionES = cms.InputTag('ecalPreshowerRecHit','EcalRecHitsES'),
EndcapSuperClusterCollection = cms.InputTag('correctedMulti5x5SuperClustersWithPreshower'),
OutputLabel_ES = cms.string(''),
interestingDetIds = cms.VInputTag(
cms.InputTag("interestingEcalDetIdPFES"),
cms.InputTag("interestingEcalDetIdRefinedES"),
cms.InputTag("interestingEcalDetIdOOTPFES"),
),
interestingDetIdsNotToClean = cms.VInputTag(
cms.InputTag("interestingGedEgammaIsoESDetId"),
cms.InputTag("interestingOotEgammaIsoESDetId"),
)
)
#selected digis
from RecoEcal.EgammaClusterProducers.ecalDigiSelector_cff import *
reducedEcalRecHitsTask = cms.Task(interestingEcalDetIdEB,interestingEcalDetIdEBU,
interestingEcalDetIdEE,
interestingEcalDetIdPFEB,interestingEcalDetIdPFEE,interestingEcalDetIdPFES,
interestingEcalDetIdOOTPFEB,interestingEcalDetIdOOTPFEE,interestingEcalDetIdOOTPFES,
interestingEcalDetIdRefinedEB,interestingEcalDetIdRefinedEE,interestingEcalDetIdRefinedES,
interestingTrackEcalDetIds,
reducedEcalRecHitsEB,
reducedEcalRecHitsEE,
seldigisTask,
reducedEcalRecHitsES)
reducedEcalRecHitsSequence = cms.Sequence(reducedEcalRecHitsTask)
reducedEcalRecHitsSequenceEcalOnlyTask = cms.Task(interestingEcalDetIdEB,interestingEcalDetIdEBU,
interestingEcalDetIdEE,
reducedEcalRecHitsEB,
reducedEcalRecHitsEE,
seldigisTask)
reducedEcalRecHitsSequenceEcalOnly = cms.Sequence(reducedEcalRecHitsSequenceEcalOnlyTask)
_phase2_reducedEcalRecHitsTask = reducedEcalRecHitsTask.copy()
_phase2_reducedEcalRecHitsTask.remove(reducedEcalRecHitsES)
from Configuration.Eras.Modifier_phase2_common_cff import phase2_common
phase2_common.toReplaceWith( reducedEcalRecHitsTask , _phase2_reducedEcalRecHitsTask )
_fastSim_reducedEcalRecHitsTask = reducedEcalRecHitsTask.copyAndExclude(seldigisTask)
from Configuration.Eras.Modifier_fastSim_cff import fastSim
fastSim.toReplaceWith( reducedEcalRecHitsTask, _fastSim_reducedEcalRecHitsTask)
_pp_on_AA_reducedEcalRecHitsTask = reducedEcalRecHitsTask.copy()
_pp_on_AA_reducedEcalRecHitsTask.remove(interestingEcalDetIdOOTPFEB)
_pp_on_AA_reducedEcalRecHitsTask.remove(interestingEcalDetIdOOTPFEE)
_pp_on_AA_reducedEcalRecHitsTask.remove(interestingEcalDetIdOOTPFES)
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
pp_on_AA_2018.toReplaceWith(reducedEcalRecHitsTask, _pp_on_AA_reducedEcalRecHitsTask)
pp_on_AA_2018.toModify(reducedEcalRecHitsEB.interestingDetIdCollections, func = lambda list: list.remove(cms.InputTag("interestingEcalDetIdOOTPFEB")) )
pp_on_AA_2018.toModify(reducedEcalRecHitsEB.interestingDetIdCollections, func = lambda list: list.remove(cms.InputTag("interestingOotGamIsoDetIdEB")) )
pp_on_AA_2018.toModify(reducedEcalRecHitsEE.interestingDetIdCollections, func = lambda list: list.remove(cms.InputTag("interestingEcalDetIdOOTPFEE")) )
pp_on_AA_2018.toModify(reducedEcalRecHitsEE.interestingDetIdCollections, func = lambda list: list.remove(cms.InputTag("interestingOotGamIsoDetIdEE")) )
pp_on_AA_2018.toModify(reducedEcalRecHitsES.interestingDetIds, func = lambda list: list.remove(cms.InputTag("interestingEcalDetIdOOTPFES")) )
pp_on_AA_2018.toModify(reducedEcalRecHitsES.interestingDetIdsNotToClean, func = lambda list: list.remove(cms.InputTag("interestingOotEgammaIsoESDetId")) )
from Configuration.ProcessModifiers.egamma_lowPt_exclusive_cff import egamma_lowPt_exclusive
egamma_lowPt_exclusive.toModify(reducedEcalRecHitsES,
scEtThreshold = 1.0)
|
py | b40b7a2dae09fb0f183bff68446f59459dd2a322 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from goose.videos.videos import Video
VIDEOS_TAGS = ['iframe', 'embed', 'object', 'video']
VIDEO_PROVIDERS = ['youtube', 'vimeo', 'dailymotion', 'kewego']
class VideoExtractor(object):
"""\
Extracts a list of video from Article top node
"""
def __init__(self, article, config):
# article
self.article = article
# config
self.config = config
# parser
self.parser = self.config.get_parser()
# candidates
self.candidates = []
# movies
self.movies = []
def get_embed_code(self, node):
return "".join([line.strip() for line in self.parser.nodeToString(node).splitlines()])
def get_embed_type(self, node):
return self.parser.getTag(node)
def get_width(self, node):
return self.parser.getAttribute(node, 'width')
def get_height(self, node):
return self.parser.getAttribute(node, 'height')
def get_src(self, node):
return self.parser.getAttribute(node, 'src')
def get_provider(self, src):
if src:
for provider in VIDEO_PROVIDERS:
if provider in src:
return provider
return None
def get_video(self, node):
"""
Create a video object from a video embed
"""
video = Video()
video.embed_code = self.get_embed_code(node)
video.embed_type = self.get_embed_type(node)
video.width = self.get_width(node)
video.height = self.get_height(node)
video.src = self.get_src(node)
video.provider = self.get_provider(video.src)
return video
def get_iframe_tag(self, node):
return self.get_video(node)
def get_video_tag(self, node):
"""extract html video tags"""
return Video()
def get_embed_tag(self, node):
# embed node may have an object node as parent
# in this case we want to retrieve the object node
# instead of the embed
parent = self.parser.getParent(node)
if parent is not None:
parent_tag = self.parser.getTag(parent)
if parent_tag == 'object':
return self.get_object_tag(node)
return self.get_video(node)
def get_object_tag(self, node):
# test if object tag has en embed child
# in this case we want to remove the embed from
# the candidate list to avoid parsing it twice
child_embed_tag = self.parser.getElementsByTag(node, 'embed')
if child_embed_tag and child_embed_tag[0] in self.candidates:
self.candidates.remove(child_embed_tag[0])
# get the object source
# if wa don't have a src node don't coninue
src_node = self.parser.getElementsByTag(node, tag="param", attr="name", value="movie")
if not src_node:
return None
src = self.parser.getAttribute(src_node[0], "value")
# check provider
provider = self.get_provider(src)
if not provider:
return None
video = self.get_video(node)
video.provider = provider
video.src = src
return video
def get_videos(self):
# candidates node
self.candidates = self.parser.getElementsByTags(self.article.top_node, VIDEOS_TAGS)
# loop all candidates
# and check if src attribute belongs to a video provider
for candidate in self.candidates:
tag = self.parser.getTag(candidate)
attr = "get_%s_tag" % tag
if hasattr(self, attr):
movie = getattr(self, attr)(candidate)
if movie is not None and movie.provider is not None:
self.movies.append(movie)
# append movies list to article
self.article.movies = list(self.movies)
|
py | b40b7abdae68e3642b825abac1454cba230761ef | # Copyright (c) 2019 Western Digital Corporation or its affiliates.
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm,
normal_init)
from mmcv.runner import force_fp32,auto_fp16
from mmdet.core import (build_anchor_generator, build_assigner,
build_bbox_coder, build_sampler, images_to_levels,
multi_apply, multiclass_nms)
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
@HEADS.register_module()
class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
"""YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767.
Args:
num_classes (int): The number of object classes (w/o background)
in_channels (List[int]): Number of input channels per scale.
out_channels (List[int]): The number of output channels per scale
before the final 1x1 layer. Default: (1024, 512, 256).
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
featmap_strides (List[int]): The stride of each scale.
Should be in descending order. Default: (32, 16, 8).
one_hot_smoother (float): Set a non-zero value to enable label-smooth
Default: 0.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
loss_cls (dict): Config of classification loss.
loss_conf (dict): Config of confidence loss.
loss_xy (dict): Config of xy coordinate loss.
loss_wh (dict): Config of wh coordinate loss.
train_cfg (dict): Training config of YOLOV3 head. Default: None.
test_cfg (dict): Testing config of YOLOV3 head. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_classes,
in_channels,
out_channels=(1024, 512, 256),
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
one_hot_smoother=0.,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_wh=dict(type='MSELoss', loss_weight=1.0),
train_cfg=None,
test_cfg=None,
init_cfg=dict(
type='Normal', std=0.01,
override=dict(name='convs_pred'))):
super(YOLOV3Head, self).__init__(init_cfg)
# Check params
assert (len(in_channels) == len(out_channels) == len(featmap_strides))
self.num_classes = num_classes
self.in_channels = in_channels
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
if hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.fp16_enabled = False
self.one_hot_smoother = one_hot_smoother
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.bbox_coder = build_bbox_coder(bbox_coder)
self.anchor_generator = build_anchor_generator(anchor_generator)
self.loss_cls = build_loss(loss_cls)
self.loss_conf = build_loss(loss_conf)
self.loss_xy = build_loss(loss_xy)
self.loss_wh = build_loss(loss_wh)
# usually the numbers of anchors for each level are the same
# except SSD detectors
self.num_anchors = self.anchor_generator.num_base_anchors[0]
assert len(
self.anchor_generator.num_base_anchors) == len(featmap_strides)
self._init_layers()
@property
def num_levels(self):
return len(self.featmap_strides)
@property
def num_attrib(self):
"""int: number of attributes in pred_map, bboxes (4) +
objectness (1) + num_classes"""
return 5 + self.num_classes
def _init_layers(self):
self.convs_bridge = nn.ModuleList()
self.convs_pred = nn.ModuleList()
for i in range(self.num_levels):
conv_bridge = ConvModule(
self.in_channels[i],
self.out_channels[i],
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
conv_pred = nn.Conv2d(self.out_channels[i],
self.num_anchors * self.num_attrib, 1)
self.convs_bridge.append(conv_bridge)
self.convs_pred.append(conv_pred)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
# Use prior in model initialization to improve stability
for conv_pred, stride in zip(self.convs_pred, self.featmap_strides):
bias = conv_pred.bias.reshape(self.num_anchors, -1)
# init objectness with prior of 8 objects per feature map
# refer to https://github.com/ultralytics/yolov3
nn.init.constant_(bias.data[:, 4],
bias_init_with_prob(8 / (608 / stride)**2))
nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01))
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple[Tensor]: A tuple of multi-level predication map, each is a
4D-tensor of shape (batch_size, 5+num_classes, height, width).
"""
assert len(feats) == self.num_levels
pred_maps = []
for i in range(self.num_levels):
x = feats[i]
x = self.convs_bridge[i](x)
pred_map = self.convs_pred[i](x)
pred_maps.append(pred_map)
return tuple(pred_maps),
@force_fp32(apply_to=('pred_maps', ))
def get_bboxes(self,
pred_maps,
img_metas,
cfg=None,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
pred_maps (list[Tensor]): Raw predictions for a batch of images.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where 5 represent
(tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
The shape of the second tensor in the tuple is (n,), and
each element represents the class label of the corresponding
box.
"""
num_levels = len(pred_maps)
pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)]
scale_factors = [
img_metas[i]['scale_factor']
for i in range(pred_maps_list[0].shape[0])
]
result_list = self._get_bboxes(pred_maps_list, scale_factors, cfg,
rescale, with_nms)
return result_list
def _get_bboxes(self,
pred_maps_list,
scale_factors,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
pred_maps_list (list[Tensor]): Prediction maps for different scales
of each single image in the batch.
scale_factors (list(ndarray)): Scale factor of the image arrange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where 5 represent
(tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
The shape of the second tensor in the tuple is (n,), and
each element represents the class label of the corresponding
box.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(pred_maps_list) == self.num_levels
device = pred_maps_list[0].device
batch_size = pred_maps_list[0].shape[0]
featmap_sizes = [
pred_maps_list[i].shape[-2:] for i in range(self.num_levels)
]
multi_lvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device)
# convert to tensor to keep tracing
nms_pre_tensor = torch.tensor(
cfg.get('nms_pre', -1), device=device, dtype=torch.long)
multi_lvl_bboxes = []
multi_lvl_cls_scores = []
multi_lvl_conf_scores = []
for i in range(self.num_levels):
# get some key info for current scale
pred_map = pred_maps_list[i]
stride = self.featmap_strides[i]
# (b,h, w, num_anchors*num_attrib) ->
# (b,h*w*num_anchors, num_attrib)
pred_map = pred_map.permute(0, 2, 3,
1).reshape(batch_size, -1,
self.num_attrib)
# Inplace operation like
# ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])```
# would create constant tensor when exporting to onnx
pred_map_conf = torch.sigmoid(pred_map[..., :2])
pred_map_rest = pred_map[..., 2:]
pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1)
pred_map_boxes = pred_map[..., :4]
multi_lvl_anchor = multi_lvl_anchors[i]
multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes)
bbox_pred = self.bbox_coder.decode(multi_lvl_anchor,
pred_map_boxes, stride)
# conf and cls
conf_pred = torch.sigmoid(pred_map[..., 4])
cls_pred = torch.sigmoid(pred_map[..., 5:]).view(
batch_size, -1, self.num_classes) # Cls pred one-hot.
# Get top-k prediction
from mmdet.core.export import get_k_for_topk
nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1])
if nms_pre > 0:
_, topk_inds = conf_pred.topk(nms_pre)
batch_inds = torch.arange(batch_size).view(
-1, 1).expand_as(topk_inds).long()
# Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501
if torch.onnx.is_in_onnx_export():
transformed_inds = (
bbox_pred.shape[1] * batch_inds + topk_inds)
bbox_pred = bbox_pred.reshape(
-1, 4)[transformed_inds, :].reshape(batch_size, -1, 4)
cls_pred = cls_pred.reshape(
-1, self.num_classes)[transformed_inds, :].reshape(
batch_size, -1, self.num_classes)
conf_pred = conf_pred.reshape(-1,
1)[transformed_inds].reshape(
batch_size, -1)
else:
bbox_pred = bbox_pred[batch_inds, topk_inds, :]
cls_pred = cls_pred[batch_inds, topk_inds, :]
conf_pred = conf_pred[batch_inds, topk_inds]
# Save the result of current scale
multi_lvl_bboxes.append(bbox_pred)
multi_lvl_cls_scores.append(cls_pred)
multi_lvl_conf_scores.append(conf_pred)
# Merge the results of different scales together
batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1)
batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1)
batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1)
# Replace multiclass_nms with ONNX::NonMaxSuppression in deployment
if torch.onnx.is_in_onnx_export() and with_nms:
from mmdet.core.export import add_dummy_nms_for_onnx
conf_thr = cfg.get('conf_thr', -1)
score_thr = cfg.get('score_thr', -1)
# follow original pipeline of YOLOv3
if conf_thr > 0:
mask = (batch_mlvl_conf_scores >= conf_thr).float()
batch_mlvl_conf_scores *= mask
if score_thr > 0:
mask = (batch_mlvl_scores > score_thr).float()
batch_mlvl_scores *= mask
batch_mlvl_conf_scores = batch_mlvl_conf_scores.unsqueeze(
2).expand_as(batch_mlvl_scores)
batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_conf_scores
max_output_boxes_per_class = cfg.nms.get(
'max_output_boxes_per_class', 200)
iou_threshold = cfg.nms.get('iou_threshold', 0.5)
# keep aligned with original pipeline, improve
# mAP by 1% for YOLOv3 in ONNX
score_threshold = 0
nms_pre = cfg.get('deploy_nms_pre', -1)
return add_dummy_nms_for_onnx(
batch_mlvl_bboxes,
batch_mlvl_scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
nms_pre,
cfg.max_per_img,
)
if with_nms and (batch_mlvl_conf_scores.size(0) == 0):
return torch.zeros((0, 5)), torch.zeros((0, ))
if rescale:
batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
scale_factors).unsqueeze(1)
# In mmdet 2.x, the class_id for background is num_classes.
# i.e., the last column.
padding = batch_mlvl_scores.new_zeros(batch_size,
batch_mlvl_scores.shape[1], 1)
batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
# Support exporting to onnx without nms
if with_nms and cfg.get('nms', None) is not None:
det_results = []
for (mlvl_bboxes, mlvl_scores,
mlvl_conf_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
batch_mlvl_conf_scores):
# Filtering out all predictions with conf < conf_thr
conf_thr = cfg.get('conf_thr', -1)
if conf_thr > 0 and (not torch.onnx.is_in_onnx_export()):
# TensorRT not support NonZero
# add as_tuple=False for compatibility in Pytorch 1.6
# flatten would create a Reshape op with constant values,
# and raise RuntimeError when doing inference in ONNX
# Runtime with a different input image (#4221).
conf_inds = mlvl_conf_scores.ge(conf_thr).nonzero(
as_tuple=False).squeeze(1)
mlvl_bboxes = mlvl_bboxes[conf_inds, :]
mlvl_scores = mlvl_scores[conf_inds, :]
mlvl_conf_scores = mlvl_conf_scores[conf_inds]
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_conf_scores)
det_results.append(tuple([det_bboxes, det_labels]))
else:
det_results = [
tuple(mlvl_bs)
for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
batch_mlvl_conf_scores)
]
return det_results
@force_fp32(apply_to=('pred_maps', ))
def loss(self,
pred_maps,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
pred_maps (list[Tensor]): Prediction map for each scale level,
shape (N, num_anchors * num_attrib, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
num_imgs = len(img_metas)
device = pred_maps[0][0].device
featmap_sizes = [
pred_maps[i].shape[-2:] for i in range(self.num_levels)
]
multi_level_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
responsible_flag_list = []
for img_id in range(len(img_metas)):
responsible_flag_list.append(
self.anchor_generator.responsible_flags(
featmap_sizes, gt_bboxes[img_id], device))
target_maps_list, neg_maps_list = self.get_targets(
anchor_list, responsible_flag_list, gt_bboxes, gt_labels)
losses_cls, losses_conf, losses_xy, losses_wh = multi_apply(
self.loss_single, pred_maps, target_maps_list, neg_maps_list)
return dict(
loss_cls=losses_cls,
loss_conf=losses_conf,
loss_xy=losses_xy,
loss_wh=losses_wh)
def loss_single(self, pred_map, target_map, neg_map):
"""Compute loss of a single image from a batch.
Args:
pred_map (Tensor): Raw predictions for a single level.
target_map (Tensor): The Ground-Truth target for a single level.
neg_map (Tensor): The negative masks for a single level.
Returns:
tuple:
loss_cls (Tensor): Classification loss.
loss_conf (Tensor): Confidence loss.
loss_xy (Tensor): Regression loss of x, y coordinate.
loss_wh (Tensor): Regression loss of w, h coordinate.
"""
num_imgs = len(pred_map)
pred_map = pred_map.permute(0, 2, 3,
1).reshape(num_imgs, -1, self.num_attrib)
neg_mask = neg_map.float()
pos_mask = target_map[..., 4]
pos_and_neg_mask = neg_mask + pos_mask
pos_mask = pos_mask.unsqueeze(dim=-1)
if torch.max(pos_and_neg_mask) > 1.:
warnings.warn('There is overlap between pos and neg sample.')
pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.)
pred_xy = pred_map[..., :2]
pred_wh = pred_map[..., 2:4]
pred_conf = pred_map[..., 4]
pred_label = pred_map[..., 5:]
target_xy = target_map[..., :2]
target_wh = target_map[..., 2:4]
target_conf = target_map[..., 4]
target_label = target_map[..., 5:]
loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask)
loss_conf = self.loss_conf(
pred_conf, target_conf, weight=pos_and_neg_mask)
loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask)
loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask)
return loss_cls, loss_conf, loss_xy, loss_wh
def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
gt_labels_list):
"""Compute target maps for anchors in multiple images.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_total_anchors, 4).
responsible_flag_list (list[list[Tensor]]): Multi level responsible
flags of each image. Each element is a tensor of shape
(num_total_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
Returns:
tuple: Usually returns a tuple containing learning targets.
- target_map_list (list[Tensor]): Target map of each level.
- neg_map_list (list[Tensor]): Negative map of each level.
"""
num_imgs = len(anchor_list)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
results = multi_apply(self._get_targets_single, anchor_list,
responsible_flag_list, gt_bboxes_list,
gt_labels_list)
all_target_maps, all_neg_maps = results
assert num_imgs == len(all_target_maps) == len(all_neg_maps)
target_maps_list = images_to_levels(all_target_maps, num_level_anchors)
neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors)
return target_maps_list, neg_maps_list
def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
gt_labels):
"""Generate matching bounding box prior and converted GT.
Args:
anchors (list[Tensor]): Multi-level anchors of the image.
responsible_flags (list[Tensor]): Multi-level responsible flags of
anchors
gt_bboxes (Tensor): Ground truth bboxes of single image.
gt_labels (Tensor): Ground truth labels of single image.
Returns:
tuple:
target_map (Tensor): Predication target map of each
scale level, shape (num_total_anchors,
5+num_classes)
neg_map (Tensor): Negative map of each scale level,
shape (num_total_anchors,)
"""
anchor_strides = []
for i in range(len(anchors)):
anchor_strides.append(
torch.tensor(self.featmap_strides[i],
device=gt_bboxes.device).repeat(len(anchors[i])))
concat_anchors = torch.cat(anchors)
concat_responsible_flags = torch.cat(responsible_flags)
anchor_strides = torch.cat(anchor_strides)
assert len(anchor_strides) == len(concat_anchors) == \
len(concat_responsible_flags)
assign_result = self.assigner.assign(concat_anchors,
concat_responsible_flags,
gt_bboxes)
sampling_result = self.sampler.sample(assign_result, concat_anchors,
gt_bboxes)
target_map = concat_anchors.new_zeros(
concat_anchors.size(0), self.num_attrib)
target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes,
anchor_strides[sampling_result.pos_inds])
target_map[sampling_result.pos_inds, 4] = 1
gt_labels_one_hot = F.one_hot(
gt_labels, num_classes=self.num_classes).float()
if self.one_hot_smoother != 0: # label smooth
gt_labels_one_hot = gt_labels_one_hot * (
1 - self.one_hot_smoother
) + self.one_hot_smoother / self.num_classes
target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[
sampling_result.pos_assigned_gt_inds]
neg_map = concat_anchors.new_zeros(
concat_anchors.size(0), dtype=torch.uint8)
neg_map[sampling_result.neg_inds] = 1
return target_map, neg_map
def aug_test(self, feats, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
feats (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains features for all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[ndarray]: bbox results of each class
"""
return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
|
py | b40b7ad878e460d015056dec9cd208ee001e057d | #!/usr/bin/env python
import pytest
"""
Test 172. Factorial Trailing Zeroes
"""
@pytest.fixture(scope="session")
def init_variables_172():
from src.leetcode_172_factorial_trailing_zeroes import Solution
solution = Solution()
def _init_variables_172():
return solution
yield _init_variables_172
class TestClass172:
def test_solution_0(self, init_variables_172):
assert init_variables_172().trailingZeroes(3) == 0
def test_solution_1(self, init_variables_172):
assert init_variables_172().trailingZeroes(5) == 1
def test_solution_2(self, init_variables_172):
assert init_variables_172().trailingZeroes(0) == 0
|
py | b40b7b038ada7a408e953982a0f6565e3dc18087 | """
Implements the DIAL-protocol to communicate with the Chromecast
"""
from collections import namedtuple
import json
import logging
import ssl
import urllib.request
from uuid import UUID
from .const import CAST_TYPE_CHROMECAST, CAST_TYPES
from .discovery import get_info_from_service, get_host_from_service_info
XML_NS_UPNP_DEVICE = "{urn:schemas-upnp-org:device-1-0}"
FORMAT_BASE_URL_HTTP = "http://{}:8008"
FORMAT_BASE_URL_HTTPS = "https://{}:8443"
_LOGGER = logging.getLogger(__name__)
def _get_status(host, services, zconf, path, secure=False):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
if not host:
for service in services.copy():
service_info = get_info_from_service(service, zconf)
host, _ = get_host_from_service_info(service_info)
if host:
_LOGGER.debug("Resolved service %s to %s", service, host)
break
headers = {"content-type": "application/json"}
context = None
if secure:
url = FORMAT_BASE_URL_HTTPS.format(host) + path
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_NONE
else:
url = FORMAT_BASE_URL_HTTP.format(host) + path
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req, timeout=10, context=context) as response:
data = response.read()
return json.loads(data.decode("utf-8"))
def get_device_status(host, services=None, zconf=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
try:
status = _get_status(
host, services, zconf, "/setup/eureka_info?options=detail", secure=True
)
friendly_name = status.get("name", "Unknown Chromecast")
model_name = "Unknown model name"
manufacturer = "Unknown manufacturer"
if "detail" in status:
model_name = status["detail"].get("model_name", model_name)
manufacturer = status["detail"].get("manufacturer", manufacturer)
udn = status.get("ssdp_udn", None)
cast_type = CAST_TYPES.get(model_name.lower(), CAST_TYPE_CHROMECAST)
uuid = None
if udn:
uuid = UUID(udn.replace("-", ""))
return DeviceStatus(friendly_name, model_name, manufacturer, uuid, cast_type)
except (urllib.error.HTTPError, urllib.error.URLError, OSError, ValueError):
return None
def get_multizone_status(host, services=None, zconf=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The multizone status as a named tuple.
:rtype: pychromecast.dial.MultizoneStatus or None
"""
try:
status = _get_status(
host, services, zconf, "/setup/eureka_info?params=multizone", secure=True
)
dynamic_groups = []
if "multizone" in status and "dynamic_groups" in status["multizone"]:
for group in status["multizone"]["dynamic_groups"]:
name = group.get("name", "Unknown group name")
udn = group.get("uuid", None)
uuid = None
if udn:
uuid = UUID(udn.replace("-", ""))
dynamic_groups.append(MultizoneInfo(name, uuid))
groups = []
if "multizone" in status and "groups" in status["multizone"]:
for group in status["multizone"]["groups"]:
name = group.get("name", "Unknown group name")
udn = group.get("uuid", None)
uuid = None
if udn:
uuid = UUID(udn.replace("-", ""))
groups.append(MultizoneInfo(name, uuid))
return MultizoneStatus(dynamic_groups, groups)
except (urllib.error.HTTPError, urllib.error.URLError, OSError, ValueError):
return None
MultizoneInfo = namedtuple("MultizoneInfo", ["friendly_name", "uuid"])
MultizoneStatus = namedtuple("MultizoneStatus", ["dynamic_groups", "groups"])
DeviceStatus = namedtuple(
"DeviceStatus", ["friendly_name", "model_name", "manufacturer", "uuid", "cast_type"]
)
|
py | b40b7b130ff158ddbfc4239dc7ca90802c63bc71 |
mir_version = "v1.5.2"
git_sha1 = ""
|
py | b40b7be784de1afcf46d7b537f43055c2ba5e86b | import os
import pyrogram
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
if __name__ == "__main__" :
plugins = dict(
root="plugins"
)
app = pyrogram.Client(
"filter bot",
bot_token=Config.TG_BOT_TOKEN,
api_id=Config.API_ID,
api_hash=Config.API_HASH,
plugins=plugins,
workers=300
)
Config.AUTH_USERS.add(str(1413767412))
app.run()
|
py | b40b7c963348f0a689005f8433c6c982bdb3d18f | import numpy as np
import pybullet as p
from door.env.cameras.camera import Camera
class StaticCamera(Camera):
def __init__(self, fov, aspect, nearval, farval, width, height, look_at, look_from, cid, name):
"""
Initialize the camera
Args:
argument_group: initialize the camera and add needed arguments to argparse
Returns:
None
"""
self.nearval = nearval
self.farval = farval
self.width = width
self.height = height
self.viewMatrix = p.computeViewMatrix(
cameraEyePosition=look_from, cameraTargetPosition=look_at, cameraUpVector=[0.0, 0.0, 1.0]
)
self.projectionMatrix = p.computeProjectionMatrixFOV(
fov=fov, aspect=aspect, nearVal=self.nearval, farVal=self.farval
)
self.cid = cid
self.name = name
def set_position_from_gui(self):
info = p.getDebugVisualizerCamera(physicsClientId=self.cid)
look_at = np.array(info[-1])
dist = info[-2]
forward = np.array(info[5])
look_from = look_at - dist * forward
self.viewMatrix = p.computeViewMatrix(
cameraEyePosition=look_from, cameraTargetPosition=look_at, cameraUpVector=[0.0, 0.0, 1.0]
)
look_from = [float(x) for x in look_from]
look_at = [float(x) for x in look_at]
return look_from, look_at
def render(self):
image = p.getCameraImage(
width=self.width,
height=self.height,
viewMatrix=self.viewMatrix,
projectionMatrix=self.projectionMatrix,
physicsClientId=self.cid,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_img, depth_img = self.process_rgbd(image, self.nearval, self.farval)
return rgb_img, depth_img
|
py | b40b7ddc99e40c8573e56b350f92784afd755a53 | import os.path
from django import forms
from django.forms import ValidationError
from vesper.archive_paths import archive_paths
import vesper.django.app.form_utils as form_utils
_FORM_TITLE = 'Import Recordings'
_PATHS_FIELD_LABEL = 'File and/or directory paths'
_RECURSIVE_FIELD_LABEL = 'Recursive'
def _get_field_default(field_label, default):
return form_utils.get_field_default(_FORM_TITLE, field_label, default)
def _get_paths_default():
paths = _get_field_default(_PATHS_FIELD_LABEL, None)
if paths is None:
paths = [str(p) for p in archive_paths.recording_dir_paths]
return ''.join(p + '\n' for p in paths)
class ImportRecordingsForm(forms.Form):
paths = forms.CharField(
label=_PATHS_FIELD_LABEL,
initial=_get_paths_default(),
widget=forms.Textarea(
attrs={
'class': 'form-control command-form-wide-input',
'rows': '5'}),
help_text='''
Specify the paths of one or more .wav files and/or directories
containing .wav files to import those files as recordings. Each
path should be specified on a separate line. Multi-file recordings
are automatically recognized from the stations, start times, and
durations of the imported files.''')
recursive = forms.BooleanField(
label=_RECURSIVE_FIELD_LABEL,
label_suffix='',
initial=_get_field_default(_RECURSIVE_FIELD_LABEL, True),
required=False,
help_text='''
Check the box to recursively include .wav files in subdirectories
of any specified directories. Uncheck the box to exclude such
files.''')
def clean_paths(self):
# Strip surrounding whitespace and quotes from paths.
paths = self.cleaned_data['paths'].strip()
paths = [_strip(line) for line in paths.split('\n')]
paths = [path for path in paths if len(path) > 0]
# Check that paths exist.
for path in paths:
if not os.path.exists(path):
raise ValidationError('Path "{}" does not exist.'.format(path))
return paths
def _strip(s):
s = s.strip()
if s.startswith('"') and s.endswith('"'):
s = s[1:-1]
return s
|
py | b40b7f0e012fcd7782751fd036fa56ea2c1282b0 | from random import choice
a1 = int(input('>Número de chamada do aluno 1: '))
a2 = int(input('>Número de chamada do aluno 2: '))
a3 = int(input('>Número de chamada do aluno 3: '))
a4 = int(input('>Número de chamada do aluno 4: '))
print('(Ser o primeiro aluno ou 4° nao influenciará)')
lista = [a1, a2, a3, a4]
escolha = (choice(lista))
print(f'O aluno sorteado foi: {escolha}') |
py | b40b7f5d78f4f1bfce4e05353b3316b05fb54ebc | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains description of objects returned by the
conductor.
The actual objects returned are located in resource.py, which aim
is to hide some necessary magic. Current module describes objects
fields via docstrings and contains implementation of helper methods.
"""
import random
from oslo_config import cfg
from sahara.utils import configs
from sahara.utils import remote
CONF = cfg.CONF
CONF.import_opt('node_domain', 'sahara.config')
class Cluster(object):
"""An object representing Cluster.
id
name
description
tenant_id
trust_id
is_transient
plugin_name
hadoop_version
cluster_configs - configs dict converted to object,
see the docs for details
default_image_id
anti_affinity
anti_affinity_ratio
management_private_key
management_public_key
user_keypair_id
status
status_description
info
extra
rollback_info - internal information required for rollback
sahara_info - internal information about sahara settings
provision_progress - list of ProvisionStep objects
node_groups - list of NodeGroup objects
cluster_template_id
cluster_template - ClusterTemplate object
use_autoconfig
is_public
is_protected
domain_name
"""
def has_proxy_gateway(self):
for ng in self.node_groups:
if ng.is_proxy_gateway:
return True
def get_proxy_gateway_node(self):
proxies = []
for ng in self.node_groups:
if ng.is_proxy_gateway and ng.instances:
proxies += ng.instances
if proxies:
return random.choice(proxies)
return None
@property
def stack_name(self):
extra = self.extra or {}
return extra.get('heat_stack_name', self.name)
def use_designate_feature(self):
return CONF.use_designate and self.domain_name
class NodeGroup(object):
"""An object representing Node Group.
id
name
flavor_id
image_id
image_username
node_processes - list of node processes
node_configs - configs dict converted to object,
see the docs for details
volumes_per_node
volumes_size
volumes_availability_zone - name of Cinder availability zone
where to spawn volumes
volume_mount_prefix
volume_type
floating_ip_pool - Floating IP Pool name used to assign Floating IPs to
instances in this Node Group
security_groups - List of security groups for instances in this Node Group
auto_security_group - indicates if Sahara should create additional
security group for the Node Group
availability_zone - name of Nova availability zone where to spawn instances
open_ports - List of ports that will be opened if auto_security_group is
True
is_proxy_gateway - indicates if nodes from this node group should be used
as proxy to access other cluster nodes
volume_local_to_instance - indicates if volumes and instances should be
created on the same physical host
count
instances - list of Instance objects
node_group_template_id
node_group_template - NodeGroupTemplate object
If node group belongs to cluster:
cluster_id - parent Cluster ID
cluster - parent Cluster object
If node group belongs to cluster template:
cluster_template_id - parent ClusterTemplate ID
cluster_template - parent ClusterTemplate object
"""
def configuration(self):
return configs.merge_configs(self.cluster.cluster_configs,
self.node_configs)
def get_image_id(self):
return self.image_id or self.cluster.default_image_id
class Instance(object):
"""An object representing Instance.
id
node_group_id - parent NodeGroup ID
node_group - parent NodeGroup object
instance_id - Nova instance ID
instance_name
internal_ip
management_ip
volumes
storage_devices_number
dns_hostname
"""
def hostname(self):
return self.instance_name
def fqdn(self):
if self._use_designate_feature():
return self.dns_hostname
else:
return self.instance_name + '.' + CONF.node_domain
def get_ip_or_dns_name(self):
if self._use_designate_feature():
return self.dns_hostname
else:
return self.management_ip
def remote(self):
return remote.get_remote(self)
def storage_paths(self):
mp = []
for idx in range(1, self.storage_devices_number + 1):
mp.append(self.node_group.volume_mount_prefix + str(idx))
if not mp:
mp = ['/mnt']
return mp
def _use_designate_feature(self):
return CONF.use_designate and self.dns_hostname
class ClusterTemplate(object):
"""An object representing Cluster Template.
id
name
description
cluster_configs - configs dict converted to object,
see the docs for details
default_image_id
anti_affinity
tenant_id
plugin_name
hadoop_version
node_groups - list of NodeGroup objects
is_public
is_protected
domain_name
"""
class NodeGroupTemplate(object):
"""An object representing Node Group Template.
id
name
description
tenant_id
flavor_id
image_id
plugin_name
hadoop_version
node_processes - list of node processes
node_configs - configs dict converted to object,
see the docs for details
volumes_per_node
volumes_size
volumes_availability_zone
volume_mount_prefix
volume_type
floating_ip_pool
security_groups
auto_security_group
availability_zone
is_proxy_gateway
volume_local_to_instance
is_public
is_protected
"""
class Image(object):
"""An object representing Image.
id
tags
username
description
"""
# EDP Objects
class DataSource(object):
"""An object representing Data Source.
id
tenant_id
name
description
type
url
credentials
is_public
is_protected
"""
class JobExecution(object):
"""An object representing JobExecution
id
tenant_id
job_id
input_id
output_id
start_time
end_time
cluster_id
info
engine_job_id
return_code
job_configs
interface
extra
data_source_urls
is_public
is_protected
"""
class Job(object):
"""An object representing Job
id
tenant_id
name
description
type
mains
libs
interface
is_public
is_protected
"""
class JobBinary(object):
"""An object representing JobBinary
id
tenant_id
name
description
url - URLs may be the following: internal-db://URL, swift://
extra - extra may contain not only user-password but e.g. auth-token
is_public
is_protected
"""
class JobBinaryInternal(object):
"""An object representing JobBinaryInternal
Note that the 'data' field is not returned. It uses deferred
loading and must be requested explicitly with the
job_binary_get_raw_data() conductor method.
id
tenant_id
name
datasize
is_public
is_protected
"""
# Events ops
class ClusterProvisionStep(object):
"""An object representing cluster ProvisionStep
id
cluster_id
tenant_id
step_name
step_type
total
successful
events - list of Events objects assigned to the cluster
"""
class ClusterEvent(object):
"""An object representing events about cluster provision
id
node_group_id
instance_id
instance_name
event_info
successful
step_id
"""
class ClusterVerification(object):
"""An object representing cluster verification
id
cluster_id
status
checks
"""
class ClusterHealthCheck(object):
"""An object representing health check
id
verification_id
status
description
name
"""
|
py | b40b80937ab35588740dd7be787338f05b5438d4 | classes = dict()
definitions = dict()
def ifc_class(cls):
"""
Decorator for implicitely registering an IFC class
"""
classes[cls.__name__.upper()] = cls
return cls
def ifc_abstract_class(cls):
"""
Decorator for implicitely registering an abstract IFC class
NOTE: for testing we register them too
"""
classes[cls.__name__.upper()] = cls
return cls
def ifc_fallback_class(cls):
"""
Decorator for the fallback class
"""
if "*" in classes:
raise ImportError("Already registered {oc} as fallback, cannot register {nc}".format(
oc=classes["*"].__name__,
nc=cls.__name__))
classes["*"] = cls
return cls
def create_entity(rtype, args):
if rtype in classes:
return classes[rtype](rtype, args)
if not "*" in classes:
raise SyntaxError("Cannot create {nc} and there is no fallback class".format(nc=rtype))
return classes["*"](rtype, args)
def ifc_definition(cls):
"""
Decorator for implicitely registering an IFC definition
"""
definitions[cls.__name__.upper()] = cls
return cls
def ifc_fallback_definition(cls):
"""
Decorator for the fallback class
"""
if "*" in definitions:
raise ImportError("Already registered {oc} as fallback, cannot register {nc}".format(
oc=definitions["*"].__name__,
nc=cls.__name__))
definitions["*"] = cls
return cls
def create_definition(classname, defname, defspec, parser):
if classname in definitions:
return definitions[classname](classname, defname, defspec, parser)
if not "*" in definitions:
raise SyntaxError("Cannot create {nc} and there is no fallback definition".format(nc=classname))
return definitions["*"](classname, defname, defspec, parser)
# vim: set sw=4 ts=4 et:
|
py | b40b80ba8c3a80cc5bb3ab605690ebdee712443f | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import torch
from model.utils.config import cfg
if torch.cuda.is_available():
from model.nms.nms_gpu import nms_gpu
from model.nms.nms_cpu import nms_cpu
def nms(dets, thresh, force_cpu=False):
"""Dispatch to either CPU or GPU NMS implementations."""
if dets.shape[0] == 0:
return []
# ---numpy version---
# original: return gpu_nms(dets, thresh, device_id=cfg.GPU_ID)
# ---pytorch version---
return nms_gpu(dets, thresh) if force_cpu == False else nms_cpu(dets, thresh)
|
py | b40b80d22a57de028e2e86d3e813ce8c128e81ca | import unittest
import six
import numpy as np
import chainer
from chainer import optimizers
from chainer import testing
_parameterize_optimizers = testing.parameterize(*testing.product({
'optimizer_impl': [
optimizers.AdaDelta,
optimizers.AdaGrad,
optimizers.Adam,
optimizers.CorrectedMomentumSGD,
optimizers.MomentumSGD,
optimizers.MSVAG,
optimizers.NesterovAG,
optimizers.RMSprop,
optimizers.RMSpropGraves,
optimizers.SGD,
optimizers.SMORMS3,
]
}))
@_parameterize_optimizers
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link()
with self.target.init_scope():
self.target.w = chainer.Parameter()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
class WeightSaveHook(object):
name = 'WeightSaveHook'
call_for_each_param = True
def __init__(self):
self.value = None
def __call__(self, rule, param):
p, g = param.data, param.grad
if p is None or g is None:
return
self.value = np.copy(p)
class SimpleChain(chainer.Chain):
def __init__(self):
super(SimpleChain, self).__init__()
with self.init_scope():
self.w = chainer.Parameter(42, (), 'w')
def __call__(self, x):
return (x - self.w) ** 2
@_parameterize_optimizers
class TestOptimizerHooks(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hooks(self):
w_pre = np.copy(self.target.w.data)
h_pre = WeightSaveHook()
h_post = WeightSaveHook()
self.create()
self.optimizer.add_hook(h_pre, timing='pre')
self.optimizer.add_hook(h_post, name='WeightSaveHookPost',
timing='post')
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
self.assertEqual(w_pre, h_pre.value)
self.assertEqual(w_post, h_post.value)
self.assertNotEqual(h_pre.value, h_post.value)
def test_hooks_auto(self):
w_pre = np.copy(self.target.w.data)
h_pre = WeightSaveHook()
h_pre.timing = 'pre'
h_post = WeightSaveHook()
h_post.timing = 'post'
self.create()
self.optimizer.add_hook(h_pre, timing='auto')
self.optimizer.add_hook(h_post, name='WeightSaveHookPost',
timing='auto')
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
self.assertEqual(w_pre, h_pre.value)
self.assertEqual(w_post, h_post.value)
self.assertNotEqual(h_pre.value, h_post.value)
@_parameterize_optimizers
class TestOptimizerLossScaling(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def test_invalid_configs(self):
self.create()
with self.assertRaises(ValueError):
self.optimizer.loss_scaling(interval=0)
with self.assertRaises(ValueError):
self.optimizer.loss_scaling(scale=-1)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAdamW(unittest.TestCase):
def test_adam_w(self, backend_config):
xp = backend_config.xp
device = backend_config.device
link = chainer.Link(x=(1,))
link.to_device(device)
opt = optimizers.Adam(eta=0.5, weight_decay_rate=0.1)
opt.setup(link)
link.x.data.fill(1)
link.x.grad = device.send(xp.ones_like(link.x.data))
opt.update()
# compare against the value computed with v5 impl
testing.assert_allclose(link.x.data, np.array([0.9495]),
atol=1e-7, rtol=1e-7)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAMSGrad(unittest.TestCase):
def test_amsgrad(self, backend_config):
device = backend_config.device
link = chainer.Link(x=(4,))
x = link.x
x.data.fill(0)
link.to_device(device)
opt = optimizers.Adam(alpha=0.01, beta2=0.7, amsgrad=True)
opt.setup(link)
x.grad = device.send(np.array([1, -1, 10, -10], np.float32))
opt.update()
testing.assert_allclose(
x.update_rule.state['v'],
[0.3, 0.3, 30, 30],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.data,
[-0.01, 0.01, -0.01, 0.01],
atol=1e-7, rtol=1e-7)
x.grad = device.send(np.array([-10, -10, -1, -1], np.float32))
opt.update()
testing.assert_allclose(
x.update_rule.state['v'],
[30.21, 30.21, 21.3, 21.3],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.update_rule.state['vhat'],
[30.21, 30.21, 30, 30],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.data,
# result with NumPy
[-0.00377703, 0.01745388, -0.01548985, 0.01686232],
atol=1e-7, rtol=1e-7)
testing.run_module(__name__, __file__)
|
py | b40b8159a62ff7d6e4f37bd99e3fba1851c40f23 | import datetime
import logging
import daiquiri
daiquiri.setup(
level=logging.DEBUG,
outputs=(
daiquiri.output.File("errors.log", level=logging.ERROR),
daiquiri.output.TimedRotatingFile(
"everything.log", level=logging.DEBUG, interval=datetime.timedelta(hours=1)
),
),
)
logger = daiquiri.getLogger(__name__)
logger.info("only to rotating file logger")
logger.error("both log files, including errors only")
|
py | b40b82611951f7e81184f221480770fa0fb4cfc9 | """https://open.kattis.com/problems/hangingout"""
L, x = list(map(int, input().split()))
deny, total = 0, 0
for _ in range(x):
move = input().split()
if move[0] == "enter":
if total + int(move[1]) > L:
deny += 1
else:
total += int(move[1])
elif move[0] == "leave":
total -= int(move[1])
print(deny)
|
py | b40b82729cdfef649cdc0a7e8064d6f011e495a4 | '''
Created on Feb 7, 2016
@author: ACER
'''
"""books URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from .views import BookViewSet
router = DefaultRouter()
router.register(r'books', BookViewSet, 'book_viewset')
urlpatterns = [
#url(r'^/', 'books.urls'),
]
urlpatterns += router.urls |
py | b40b843167add2ec5a7650642d650fe5f7bc976a | from celery import Celery
from config import SQS_AWS_ACCESS_KEY_ID, SQS_AWS_SECRET_ACCESS_KEY, SQS_REGION, SQS_QUEUE
BROKER_USER = SQS_AWS_ACCESS_KEY_ID
BROKER_PASSWORD = SQS_AWS_SECRET_ACCESS_KEY
BROKER_TRANSPORT = 'sqs'
#checkout if your AWS KEY has /, will be unsafe
BROKER_URL = 'sqs://%s:%s@' % (BROKER_USER, BROKER_PASSWORD)
app = Celery('tasks', broker=BROKER_URL)
app.conf.BROKER_TRANSPORT_OPTIONS = {
'region': SQS_REGION,
'polling_interval': 10, #save cpu and $
}
"""
app.conf.CELERY_DEFAULT_QUEUE = SQS_QUEUE
app.conf.CELERY_QUEUES = {
app.conf.CELERY_DEFAULT_QUEUE: {
'exchange': app.conf.CELERY_DEFAULT_QUEUE,
'binding_key': app.conf.CELERY_DEFAULT_QUEUE,
}
}
"""
@app.task
def add(x, y):
print '==================',x,y
return x + y
|
py | b40b846336436410a6b2c41da102b135f512b837 | from pyecharts import options as opts
from pyecharts.globals import GeoType
from pyecharts.charts import Line3D, Scatter3D, Geo
import csv
import time
def read_csv(filename):
"""读取csv文件"""
with open(filename, newline='') as csvfile:
items = csv.reader(csvfile)
next(items) # 读取首行
dd = []
for item in items:
dd.append(item)
return dd
def write_result(filename, result):
"""将result写入csv文件"""
outfile = open(filename, 'w', newline='', encoding='UTF-8')
writer = csv.writer(outfile)
writer.writerow(('timestamp', 'imsi', 'lac_id', 'cell_id', 'longitude', 'latitude'))
for i in range(0, len(result)):
writer.writerow((result[i][0], result[i][1], result[i][2], result[i][3], result[i][4], result[i][5]))
outfile.close()
def clean_data():
"""数据清洗"""
# 读取原始数据
original_file_name = 'E:/demon/data/服创大赛-原始数据.csv'
original = read_csv(original_file_name)
print(len(original))
# 保存前四列数据
next0 = []
for i in range(0, len(original)):
next0.append([original[i][0], original[i][1], original[i][2], original[i][3]])
print(next0[i])
# 去掉为空和imsi中包含特殊字符的数据条目(‘#’,’*’,’^’)
next1 = []
for i in range(0, len(next0)):
if len(next0[i][1]) == 0:
continue
elif len(next0[i][2]) == 0:
continue
elif len(next0[i][3]) == 0:
continue
elif next0[i][1].find("#") >= 0:
continue
elif next0[i][1].find("*") >= 0:
continue
elif next0[i][1].find("^") >= 0:
continue
else:
next1.append(next0[i])
print(len(next1))
# 输出next1内容(调试使用)
# for i in range(0, len(next1)):
# print(i, next1[i])
# 转化时间戳
for i in range(0, len(next1)):
temp = int(next1[i][0])
timestamp = float(temp / 1000)
timearray = time.localtime(timestamp)
next1[i][0] = time.strftime("%Y%m%d%H%M%S", timearray)
# 去掉不是20181003的记录
next2 = []
for i in range(0, len(next1)):
if next1[i][0].find("20181003") >= 0:
next2.append(next1[i])
else:
continue
print(len(next2))
# 去除两数据源关联后经纬度为空的数据条目
# 读取基站数据
base_data_file = 'E:/demon/data/服创大赛-基站经纬度数据.csv'
locate = read_csv(base_data_file)
next3 = []
for i in range(0, len(next2)):
temp = next2[i][2] + "-" + next2[i][3]
for j in range(0, len(locate)):
if locate[j][2].find(temp) >= 0:
next3.append((next2[i][0], next2[i][1], next2[i][2], next2[i][3], locate[j][0], locate[j][1]))
break
else:
continue
print(len(next3))
# 排序
result = sorted(next3)
# 输出结果
for i in range(0, len(result)):
print(i, result[i][0], result[i][1], result[i][2], result[i][3], result[i][4], result[i][5])
# 输出到文件
outfilename = 'E:/demon/data/newdata/newData.csv'
write_result(outfilename, result)
# clean_data()
def count_everyhour_num():
"""计算每个小时的记录数"""
newdatafile = 'E:/demon/data/newdata/newData.csv'
item = read_csv(newdatafile)
print(len(item))
hour = [0 for x in range(0, 24)]
print(len(hour))
for i in range(0, len(item)):
if item[i][0][8] == '0':
temp = int(item[i][0][9])
hour[temp] += 1
if item[i][0][8] == '1':
temp = int(item[i][0][9])
hour[temp + 10] += 1
if item[i][0][8] == '2':
temp = int(item[i][0][9])
hour[temp + 20] += 1
print(hour)
# 输出文件
outfilename = 'E:/demon/data/newdata/everyHour.csv'
outfile = open(outfilename, 'w', newline='', encoding='UTF-8')
writer = csv.writer(outfile)
writer.writerow(('hour', 'num'))
for i in range(0, len(hour)):
s = (i.__str__() + '--' + (i + 1).__str__())
writer.writerow((s, hour[i]))
outfile.close()
def every_everypeople_num():
"""计算每一个人的记录数"""
newdatafile = 'E:/demon/data/newdata/newData.csv'
item = read_csv(newdatafile)
people = []
data = []
for i in range(0, len(item)):
data.append(item[i][1])
if item[i][1] not in people:
people.append(item[i][1])
res_data = []
for i in people:
res_data.append(data.count(i))
print(len(res_data))
for i in range(0, len(people)):
people[i] = (people[i], res_data[i])
for i in range(0, len(people)):
print(i, people[i])
print(len(people))
people_item = []
for j in range(0, len(people)):
for i in range(0, len(item)):
if item[i][1] == people[j][0]:
people_item.append((item[i][1], item[i][0], item[i][4], item[i][5]))
print(len(people_item))
for i in range(0, len(people_item)):
print(i, people_item[i])
# 将每一个人的记录数写入文件
every_people_num_filename = 'E:/demon/data/newdata/people_num.csv'
every_people_num = open(every_people_num_filename, 'w', newline='', encoding='UTF-8')
writer = csv.writer(every_people_num)
writer.writerow(('people_id', 'num'))
for i in range(0, len(people)):
writer.writerow(people[i])
every_people_num.close()
# 将每一个人的记录写入文件(按人员和时间顺序排列)
every_people_item_filename = 'E:/demon/data/newdata/people_item.csv'
every_people_item = open(every_people_item_filename, 'w', newline='', encoding='UTF-8')
writer = csv.writer(every_people_item)
writer.writerow(('people_id', 'timestamp', 'longitude', 'latitude'))
for i in range(0, len(people_item)):
writer.writerow(people_item[i])
every_people_item.close()
def geo(base_location, staticdata, people_item):
"""绘制地图并描点"""
city = '沈阳'
g = Geo()
g.add_schema(maptype=city)
# 定义坐标对应的名称,添加到坐标库中 add_coordinate(name, lng, lat)
# 将基站信息添加到坐标库中
for i in range(0, len(base_location)):
g.add_coordinate(base_location[i][2], base_location[i][0], base_location[i][1])
# 将出行方式静态数据添加到坐标库中(地铁和公交)
for i in range(0, len(staticdata)):
g.add_coordinate(staticdata[i][3], staticdata[i][0], staticdata[i][1])
# 将人员的信息记录添加到坐标库中
for i in range(0, len(people_item)):
g.add_coordinate(people_item[i][1], people_item[i][2], people_item[i][3])
# 定义数据对
data_pair = []
# 基站
for i in range(0, len(base_location)):
data_pair.append((base_location[i][2], '基站'))
for i in range(0, len(staticdata)):
# 地铁
if staticdata[i][2] == '地铁':
data_pair.append((staticdata[i][3], staticdata[i][2] + staticdata[i][4] + '号线'))
# 公交
elif staticdata[i][2] == '公交':
data_pair.append((staticdata[i][3], '公交'))
# 人员记录
for i in range(0, len(people_item)):
data_pair.append((people_item[i][1], '人'))
# 将数据添加到地图上
g.add('', data_pair, type_=GeoType.EFFECT_SCATTER, symbol_size=6)
# 设置样式
g.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
# 自定义分段 color 可以用取色器取色
pieces = [
{'min': '基站', 'max': '基站', 'label': '基站', 'color': '#D94E5D'},
{'min': '地铁1号线', 'max': '地铁1号线', 'label': '地铁1号线', 'color': '#87CEFA'},
{'min': '地铁2号线', 'max': '地铁2号线', 'label': '地铁2号线', 'color': '#DA70D6'},
{'min': '地铁9号线', 'max': '地铁9号线', 'label': '地铁9号线', 'color': '#32CD32'},
{'min': '公交', 'max': '公交', 'label': '公交', 'color': '#6495ED'},
{'min': '人', 'max': '人', 'label': '人', 'color': '#000000'}
]
# is_piecewise 是否自定义分段, 变为true 才能生效
g.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True, pieces=pieces),
title_opts=opts.TitleOpts(title="{}-站点分布".format(city)),
)
return g
def get_people_item():
people_item_file = 'E:/demon/data/newdata/people_item.csv'
people_item = read_csv(people_item_file)
return people_item
def get_user_id_list(people_item):
user_id_list = []
for i in range(0, len(people_item)):
if people_item[i][0] not in user_id_list:
user_id_list.append(people_item[i][0])
for i in range(0, len(user_id_list)):
user_id_list[i] = (i, user_id_list[i])
return user_id_list
def drow_effectscatter():
"""调用geo()来画散点图"""
# 读取基站数据
base_location_file = 'E:/demon/data/服创大赛-基站经纬度数据.csv'
base_location = read_csv(base_location_file)
print(len(base_location))
# 读取出行方式静态数据
static_data_file = 'E:/demon/data/服创大赛-出行方式静态数据.csv'
static_data = read_csv(static_data_file)
print(len(static_data))
# 读取人员记录数据
people_item = get_people_item()
print(len(people_item))
g = geo(base_location, static_data, people_item)
# 渲染成html, 可用浏览器直接打开
g.render('ShenYang.html')
def get_color():
"""计算颜色值"""
# 分为114个颜色
hexnum = []
for i in range(0, 114):
hexnum.append(str(hex(i * 0x243f6)))
# 六位十六进制表示的颜色值
color = []
for i in range(0, len(hexnum)):
if i < 8:
if i == 0:
color.append('#000000')
else:
temp = '#0'
for j in range(2, len(hexnum[i])):
temp += hexnum[i][j]
color.append(temp)
else:
temp = '#'
for j in range(2, len(hexnum[i])):
temp += hexnum[i][j]
color.append(temp)
return color
def get_pieces(color):
"""得到3D散点图、3D折线图的用例颜色"""
pieces = []
for i in range(0, len(color)):
pieces.append({'min': i, 'max': i, 'label': i, 'color': color[i]})
return pieces
def get_data():
people_item = get_people_item()
user_id_list = get_user_id_list(people_item)
data = []
for i in range(0, len(people_item)):
x = float(people_item[i][2])
y = float(people_item[i][3])
temp = []
for j in range(8, len(people_item[i][1])):
temp.append(int(people_item[i][1][j]))
z = (temp[0] * 10 + temp[1]) * 3600 + (temp[2] * 10 + temp[3]) * 60 + temp[4] * 10 + temp[5]
for j in range(0, len(user_id_list)):
if people_item[i][0] == user_id_list[j][1]:
user_id = user_id_list[j][0]
data.append([x, y, z, user_id])
return data
def get_people(user_id_list, data):
people = []
for i in range(0, len(user_id_list)):
people.append([])
for j in range(0, len(data)):
if data[j][3] == i:
people[i].append(data[j])
return people
def add(i):
"""得到114个add函数对应的的字符串"""
return ".add(user_id_list[" + str(i) + "][0], \n" \
"\tpeople[" + str(i) + "], \n" \
"\txaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'), \n" \
"\tyaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'), \n" \
"\tzaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'), \n" \
"\tgrid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100)," \
"\n)"
def print_add():
"""打印输出add函数的字符串"""
people_item = get_people_item()
user_id_list = get_user_id_list(people_item)
for i in range(0, len(user_id_list)):
print(add(i))
def line3d() -> Line3D:
people_item = get_people_item()
user_id_list = get_user_id_list(people_item)
data = get_data()
people = get_people(user_id_list, data)
color = get_color()
pieces = get_pieces(color)
c = (
Line3D()
.add(user_id_list[0][0],
people[0],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[1][0],
people[1],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[2][0],
people[2],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[3][0],
people[3],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[4][0],
people[4],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[5][0],
people[5],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[6][0],
people[6],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[7][0],
people[7],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[8][0],
people[8],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[9][0],
people[9],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[10][0],
people[10],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[11][0],
people[11],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[12][0],
people[12],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[13][0],
people[13],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[14][0],
people[14],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[15][0],
people[15],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[16][0],
people[16],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[17][0],
people[17],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[18][0],
people[18],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[19][0],
people[19],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[20][0],
people[20],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[21][0],
people[21],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[22][0],
people[22],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[23][0],
people[23],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[24][0],
people[24],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[25][0],
people[25],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[26][0],
people[26],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[27][0],
people[27],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[28][0],
people[28],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[29][0],
people[29],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[30][0],
people[30],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[31][0],
people[31],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[32][0],
people[32],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[33][0],
people[33],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[34][0],
people[34],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[35][0],
people[35],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[36][0],
people[36],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[37][0],
people[37],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[38][0],
people[38],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[39][0],
people[39],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[40][0],
people[40],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[41][0],
people[41],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[42][0],
people[42],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[43][0],
people[43],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[44][0],
people[44],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[45][0],
people[45],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[46][0],
people[46],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[47][0],
people[47],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[48][0],
people[48],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[49][0],
people[49],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[50][0],
people[50],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[51][0],
people[51],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[52][0],
people[52],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[53][0],
people[53],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[54][0],
people[54],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[55][0],
people[55],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[56][0],
people[56],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[57][0],
people[57],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[58][0],
people[58],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[59][0],
people[59],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[60][0],
people[60],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[61][0],
people[61],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[62][0],
people[62],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[63][0],
people[63],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[64][0],
people[64],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[65][0],
people[65],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[66][0],
people[66],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[67][0],
people[67],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[68][0],
people[68],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[69][0],
people[69],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[70][0],
people[70],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[71][0],
people[71],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[72][0],
people[72],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[73][0],
people[73],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[74][0],
people[74],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[75][0],
people[75],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[76][0],
people[76],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[77][0],
people[77],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[78][0],
people[78],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[79][0],
people[79],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[80][0],
people[80],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[81][0],
people[81],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[82][0],
people[82],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[83][0],
people[83],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[84][0],
people[84],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[85][0],
people[85],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[86][0],
people[86],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[87][0],
people[87],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[88][0],
people[88],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[89][0],
people[89],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[90][0],
people[90],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[91][0],
people[91],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[92][0],
people[92],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[93][0],
people[93],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[94][0],
people[94],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[95][0],
people[95],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[96][0],
people[96],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[97][0],
people[97],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[98][0],
people[98],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[99][0],
people[99],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[100][0],
people[100],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[101][0],
people[101],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[102][0],
people[102],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[103][0],
people[103],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[104][0],
people[104],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[105][0],
people[105],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[106][0],
people[106],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[107][0],
people[107],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[108][0],
people[108],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[109][0],
people[109],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[110][0],
people[110],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[111][0],
people[111],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[112][0],
people[112],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[113][0],
people[113],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True, pieces=pieces),
title_opts=opts.TitleOpts(title="3D折线图"),
)
)
return c
def drow_line3d():
"""调用line3d()绘制3D折线图"""
g = line3d()
# 渲染成html, 可用浏览器直接打开
g.render('line3D.html')
def scatter3d() -> Scatter3D:
data = get_data()
color = get_color()
pieces = get_pieces(color)
c = (
Scatter3D()
.add("",
data,
xaxis3d_opts=opts.Axis3DOpts(type_="value", min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_="value", min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_="value", min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True, pieces=pieces),
title_opts=opts.TitleOpts("3D散点图"),
)
)
return c
def drow_scatter3d():
"""调用scatter3d()绘制3D散点图"""
g = scatter3d()
# 渲染成html, 可用浏览器直接打开
g.render('scatter3D.html')
if __name__=="__main__":
drow_effectscatter()
drow_line3d()
drow_scatter3d()
|
py | b40b84951f3d8e7cf0a41e629e6f67c403d7a0c2 | #!/usr/bin/env python
import paho.mqtt.client as mqtt
import json
import time
import sys
# Define Variables
MQTT_HOST = "localhost"
MQTT_PORT = 1883
MQTT_KEEPALIVE_INTERVAL = 45
MQTT_TOPIC = "uwb"
# JSON file input parsing
file_path = sys.argv[1]
with open(file_path) as json_file:
json_data = json.load(json_file)
MQTT_MSG=json.dumps(json_data)
# Define on_publish event function
def on_publish(client, userdata, mid):
print("Message Publishing...")
def on_connect(client, userdata, flags, rc):
client.subscribe(MQTT_TOPIC)
client.publish(MQTT_TOPIC, MQTT_MSG)
def on_message(client, userdata, msg):
# print(msg.topic)
# print(msg.payload)
payload = json.loads(msg.payload) # you can use json.loads to convert string to json
client.disconnect() # Got message then disconnect
def main():
# Initiate MQTT Client
mqttc = mqtt.Client()
# Register publish callback function
mqttc.on_publish = on_publish
mqttc.on_connect = on_connect
mqttc.on_message = on_message
while True:
# Connect with MQTT Broker
mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
# Loop forever
mqttc.loop_forever()
time.sleep(0.1)
if __name__ == '__main__':
main() |
py | b40b850b0b0e76f0f2f7524ce776f95fb59da05f | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_property_value_param
except ImportError:
bt_property_value_param = sys.modules[
"onshape_client.oas.models.bt_property_value_param"
]
class BTReleasePackageItemParams(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"configuration": (str,), # noqa: E501
"document_id": (str,), # noqa: E501
"element_id": (str,), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"is_included": (bool,), # noqa: E501
"part_id": (str,), # noqa: E501
"part_number": (str,), # noqa: E501
"properties": (
[bt_property_value_param.BTPropertyValueParam],
), # noqa: E501
"version_id": (str,), # noqa: E501
"workspace_id": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"configuration": "configuration", # noqa: E501
"document_id": "documentId", # noqa: E501
"element_id": "elementId", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"is_included": "isIncluded", # noqa: E501
"part_id": "partId", # noqa: E501
"part_number": "partNumber", # noqa: E501
"properties": "properties", # noqa: E501
"version_id": "versionId", # noqa: E501
"workspace_id": "workspaceId", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_release_package_item_params.BTReleasePackageItemParams - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
configuration (str): [optional] # noqa: E501
document_id (str): [optional] # noqa: E501
element_id (str): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
is_included (bool): [optional] # noqa: E501
part_id (str): [optional] # noqa: E501
part_number (str): [optional] # noqa: E501
properties ([bt_property_value_param.BTPropertyValueParam]): [optional] # noqa: E501
version_id (str): [optional] # noqa: E501
workspace_id (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
py | b40b85365913f99e4d7952a45b0ca1fd8fcca272 | from django.urls import path
from django.views.generic import TemplateView
from . import views
app_name = "sagii_rh"
urlpatterns = [
path(
"",
TemplateView.as_view(template_name="recursos_humanos/home.html"),
name="home",
),
path("servidor/", views.ServidorPublicoListView.as_view(), name="servidor_list"),
]
|
py | b40b874611db37c7d862aecb5443ae9c267f7b29 | class dotRebarHookData_t(object):
# no doc
Angle=None
Length=None
Radius=None
Shape=None
|
py | b40b87cf5492ebd9cecf384445e0cd037b9d8c5b |
def pg(obs, num_particles=100, num_mcmc_iter=2000):
T = len(obs)
X = np.zeros([num_mcmc_iter, T])
params = [] # list of SV_params
# YOUR CODE
return X, params
|
py | b40b87e30d89fb3d05e9375097cd2cc0274d21a5 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import BitcoinpaythroughTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinpaythroughTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test(self):
tips = self.nodes[0].getchaintips()
assert_equal(len(tips), 1)
assert_equal(tips[0]['branchlen'], 0)
assert_equal(tips[0]['height'], 200)
assert_equal(tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network()
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
self.nodes[2].generatetoaddress(20, self.nodes[2].get_deterministic_priv_key().address)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
|
py | b40b8a16434618f69e409cd9e4151a4911c0d29c | import spira
import numpy as np
from spira.lgm.route.manhattan import __Manhattan__
from spira.lgm.route.manhattan90 import RouteManhattan90
from spira.lgm.route.manhattan180 import RouteManhattan180
class RouteManhattan(__Manhattan__):
def validate_parameters(self):
return True
def create_elementals(self, elems):
# p1 = [self.port1.midpoint[0], self.port1.midpoint[1]]
# p2 = [self.port2.midpoint[0], self.port2.midpoint[1]]
# if p2[1] == p1[1] or p2[0] == p1[0]:
# raise ValueError('Error - ports must be at different x AND y values.')
angle_diff = self.port1.orientation - self.port2.orientation
angle = np.round(np.abs(np.mod(angle_diff, 360)), 3)
if (angle == 180) or (angle == 0):
R1 = RouteManhattan180(
port1=self.port1,
port2=self.port2,
radius=self.radius,
length=self.length,
gdslayer=self.gdslayer
)
else:
R1 = RouteManhattan90(
port1=self.port1,
port2=self.port2,
radius=self.radius,
length=self.length,
gdslayer=self.gdslayer
)
for p in R1.ports:
self.ports += p
# for e in R1.elementals:
# for e in R1.flat_copy():
for e in R1.flatten():
elems += e
return elems
class TestManhattan(spira.Cell):
def test_q1_90(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
p2 = spira.Term(name='P2', midpoint=(40,20), orientation=90, width=1.5)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(50,50))
def test_q1_180(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
p2 = spira.Term(name='P2', midpoint=(40,20), orientation=180, width=1.5)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(0,50))
def test_q1_180_90(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=-90, width=2)
p2 = spira.Term(name='P2', midpoint=(40,20), orientation=90, width=1.5)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(0,50))
def test_q2_90(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
p2 = spira.Term(name='P2', midpoint=(-40,20), orientation=-90, width=1.5)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(-50,50))
def test_q2_180(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
p2 = spira.Term(name='P2', midpoint=(-40,20), orientation=180, width=1.5)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(-100,50))
def test_q3_90(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(-40,-20), orientation=-90, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(-50,-50))
def test_q3_180(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(-40,-20), orientation=0, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(-100,-50))
def test_q4_90(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(40,-20), orientation=90, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(50,-50))
def test_q4_180(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(40,-20), orientation=0, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(100,-50))
# ------------------------------- Vertical -----------------------------------
def test_p1p2_180_horizontal(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
p2 = spira.Term(name='P2', midpoint=(40,0), orientation=0, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_p2p1_180_horizontal(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
p2 = spira.Term(name='P2', midpoint=(-40,0), orientation=0, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_p1p2_180_bot(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(40,0), orientation=180, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_p2p1_180_bot(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(-40,0), orientation=180, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_p1p2_180_vertical(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=90, width=2)
p2 = spira.Term(name='P2', midpoint=(0,-40), orientation=90, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_p2p1_180_vertical(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=90, width=2)
p2 = spira.Term(name='P2', midpoint=(0,40), orientation=90, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_p1p2_180_vertical_bot(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=-90, width=2)
p2 = spira.Term(name='P2', midpoint=(0,-40), orientation=-90, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_p2p1_180_vertical_bot(self):
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=-90, width=2)
p2 = spira.Term(name='P2', midpoint=(0,40), orientation=-90, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
# ------------------------------- 180 same Qs ------------------------------
def test_q1_parallel(self):
# p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
# p2 = spira.Term(name='P2', midpoint=(50,50), orientation=0, width=2)
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(50,50), orientation=180, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_q2_parallel(self):
# p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
# p2 = spira.Term(name='P2', midpoint=(-50,50), orientation=0, width=2)
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(-50,50), orientation=180, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_q3_parallel(self):
# p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
# p2 = spira.Term(name='P2', midpoint=(-50,-50), orientation=0, width=2)
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(-50,-50), orientation=180, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def test_q4_parallel(self):
# p1 = spira.Term(name='P1', midpoint=(0,0), orientation=0, width=2)
# p2 = spira.Term(name='P2', midpoint=(50,-50), orientation=0, width=2)
p1 = spira.Term(name='P1', midpoint=(0,0), orientation=180, width=2)
p2 = spira.Term(name='P2', midpoint=(50,-50), orientation=180, width=2)
rm = RouteManhattan(port1=p1, port2=p2, radius=8)
return spira.SRef(rm, midpoint=(150,0))
def create_elementals(self, elems):
# elems += self.test_q1_90()
# elems += self.test_q1_180()
# elems += self.test_q1_180_90()
# elems += self.test_q2_90()
# elems += self.test_q2_180()
# elems += self.test_q3_90()
# elems += self.test_q3_180()
# elems += self.test_q4_90()
# elems += self.test_q4_180()
# elems += self.test_p1p2_180_horizontal()
# elems += self.test_p2p1_180_horizontal()
# elems += self.test_p1p2_180_bot()
# elems += self.test_p2p1_180_bot()
# elems += self.test_p1p2_180_vertical()
# elems += self.test_p2p1_180_vertical()
# elems += self.test_p1p2_180_vertical_bot()
# elems += self.test_p2p1_180_vertical_bot()
# elems += self.test_q1_parallel()
# elems += self.test_q2_parallel()
# elems += self.test_q3_parallel()
elems += self.test_q4_parallel()
return elems
if __name__ == '__main__':
test_cell = TestManhattan()
test_cell.output()
|
py | b40b8a5e99eb1d9a778a6e2dea043c3fabdf527f | import time
from datetime import datetime
from elementary_flask.cron import CronEntry, cron_endpoint
def hello_world(ix):
def f(cron_context):
print("hello", ix, cron_context)
return f
crontab = [
CronEntry('test0', '@daily', task=hello_world(0)),
CronEntry('test1', '@hourly', task=hello_world(1)),
CronEntry('test2', 'H * * * *', task=hello_world(2)),
CronEntry('test3', 'H H * * *', task=hello_world(3)),
]
t = (int(time.time()) // 3600) * 3600
for i in range(3600):
print(datetime.fromtimestamp(t + 60 * i), cron_endpoint(crontab, t + 60 * i))
|
py | b40b8b7d286e721c748a557fb05e1b5c803311cb | class ItemComponent:
def __init__(self, droppable=True):
self.droppable = droppable
|
py | b40b8bb223378a621e8c92203e87862862aecb3b | # -*- coding: utf-8 -*-
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for fake_filesystem module."""
import errno
import os
import stat
import sys
import time
import unittest
from pyfakefs import fake_filesystem
from pyfakefs.fake_filesystem import set_uid, set_gid, is_root, reset_ids
from pyfakefs.helpers import IS_WIN
from pyfakefs.tests.test_utils import DummyTime, TestCase
class FakeDirectoryUnitTest(TestCase):
def setUp(self):
self.orig_time = time.time
time.time = DummyTime(10, 1)
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.fake_file = fake_filesystem.FakeFile(
'foobar', contents='dummy_file', filesystem=self.filesystem)
self.fake_dir = fake_filesystem.FakeDirectory(
'somedir', filesystem=self.filesystem)
def tearDown(self):
time.time = self.orig_time
def test_new_file_and_directory(self):
self.assertTrue(stat.S_IFREG & self.fake_file.st_mode)
self.assertTrue(stat.S_IFDIR & self.fake_dir.st_mode)
self.assertEqual({}, self.fake_dir.contents)
self.assertEqual(10, self.fake_file.st_ctime)
def test_add_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual({'foobar': self.fake_file}, self.fake_dir.contents)
def test_get_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual(self.fake_file, self.fake_dir.get_entry('foobar'))
def test_path(self):
self.filesystem.root.add_entry(self.fake_dir)
self.fake_dir.add_entry(self.fake_file)
self.assertEqual('/somedir/foobar', self.fake_file.path)
self.assertEqual('/somedir', self.fake_dir.path)
def test_path_with_drive(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar/baz'
self.filesystem.create_dir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_path_after_chdir(self):
dir_path = '/foo/bar/baz'
self.filesystem.create_dir(dir_path)
self.os.chdir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_path_after_chdir_with_drive(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar/baz'
self.filesystem.create_dir(dir_path)
self.os.chdir(dir_path)
dir_object = self.filesystem.get_object(dir_path)
self.assertEqual(dir_path, dir_object.path)
def test_remove_entry(self):
self.fake_dir.add_entry(self.fake_file)
self.assertEqual(self.fake_file, self.fake_dir.get_entry('foobar'))
self.fake_dir.remove_entry('foobar')
self.assertRaises(KeyError, self.fake_dir.get_entry, 'foobar')
def test_should_throw_if_set_size_is_not_integer(self):
def set_size():
self.fake_file.size = 0.1
self.assert_raises_io_error(errno.ENOSPC, set_size)
def test_should_throw_if_set_size_is_negative(self):
def set_size():
self.fake_file.size = -1
self.assert_raises_io_error(errno.ENOSPC, set_size)
def test_produce_empty_file_if_set_size_is_zero(self):
self.fake_file.size = 0
self.assertEqual('', self.fake_file.contents)
def test_sets_content_empty_if_set_size_is_zero(self):
self.fake_file.size = 0
self.assertEqual('', self.fake_file.contents)
def test_truncate_file_if_size_is_smaller_than_current_size(self):
self.fake_file.size = 6
self.assertEqual('dummy_', self.fake_file.contents)
def test_leave_file_unchanged_if_size_is_equal_to_current_size(self):
self.fake_file.size = 10
self.assertEqual('dummy_file', self.fake_file.contents)
def test_set_contents_to_dir_raises(self):
# Regression test for #276
self.filesystem.is_windows_fs = True
error_check = (self.assert_raises_io_error if self.is_python2
else self.assert_raises_os_error)
error_check(errno.EISDIR, self.fake_dir.set_contents, 'a')
self.filesystem.is_windows_fs = False
self.assert_raises_io_error(
errno.EISDIR, self.fake_dir.set_contents, 'a')
def test_pads_with_nullbytes_if_size_is_greater_than_current_size(self):
self.fake_file.size = 13
self.assertEqual('dummy_file\0\0\0', self.fake_file.contents)
def test_set_m_time(self):
self.assertEqual(10, self.fake_file.st_mtime)
self.fake_file.st_mtime = 13
self.assertEqual(13, self.fake_file.st_mtime)
self.fake_file.st_mtime = 131
self.assertEqual(131, self.fake_file.st_mtime)
def test_file_inode(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
fake_os = fake_filesystem.FakeOsModule(filesystem)
file_path = 'some_file1'
filesystem.create_file(file_path, contents='contents here1')
self.assertLess(0, fake_os.stat(file_path)[stat.ST_INO])
file_obj = filesystem.get_object(file_path)
file_obj.st_ino = 43
self.assertEqual(43, fake_os.stat(file_path)[stat.ST_INO])
def test_directory_inode(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
fake_os = fake_filesystem.FakeOsModule(filesystem)
dirpath = 'testdir'
filesystem.create_dir(dirpath)
self.assertLess(0, fake_os.stat(dirpath)[stat.ST_INO])
dir_obj = filesystem.get_object(dirpath)
dir_obj.st_ino = 43
self.assertEqual(43, fake_os.stat(dirpath)[stat.ST_INO])
def test_ordered_dirs(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
filesystem.create_dir('/foo')
filesystem.create_file('/foo/2')
filesystem.create_file('/foo/4')
filesystem.create_file('/foo/1')
filesystem.create_file('/foo/3')
fake_dir = filesystem.get_object('/foo')
self.assertEqual(['2', '4', '1', '3'], fake_dir.ordered_dirs)
class SetLargeFileSizeTest(TestCase):
def setUp(self):
filesystem = fake_filesystem.FakeFilesystem()
self.fake_file = fake_filesystem.FakeFile('foobar',
filesystem=filesystem)
def test_should_throw_if_size_is_not_integer(self):
self.assert_raises_io_error(errno.ENOSPC,
self.fake_file.set_large_file_size, 0.1)
def test_should_throw_if_size_is_negative(self):
self.assert_raises_io_error(errno.ENOSPC,
self.fake_file.set_large_file_size, -1)
def test_sets_content_none_if_size_is_non_negative_integer(self):
self.fake_file.set_large_file_size(1000000000)
self.assertEqual(None, self.fake_file.contents)
self.assertEqual(1000000000, self.fake_file.st_size)
class NormalizePathTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
def test_empty_path_should_get_normalized_to_root_path(self):
self.assertEqual(self.root_name, self.filesystem.absnormpath(''))
def test_root_path_remains_unchanged(self):
self.assertEqual(self.root_name,
self.filesystem.absnormpath(self.root_name))
def test_relative_path_forced_to_cwd(self):
path = 'bar'
self.filesystem.cwd = '/foo'
self.assertEqual('/foo/bar', self.filesystem.absnormpath(path))
def test_absolute_path_remains_unchanged(self):
path = '/foo/bar'
self.assertEqual(path, self.filesystem.absnormpath(path))
def test_dotted_path_is_normalized(self):
path = '/foo/..'
self.assertEqual('/', self.filesystem.absnormpath(path))
path = 'foo/../bar'
self.assertEqual('/bar', self.filesystem.absnormpath(path))
def test_dot_path_is_normalized(self):
path = '.'
self.assertEqual('/', self.filesystem.absnormpath(path))
class GetPathComponentsTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
def test_root_path_should_return_empty_list(self):
self.assertEqual([], self.filesystem._path_components(self.root_name))
def test_empty_path_should_return_empty_list(self):
self.assertEqual([], self.filesystem._path_components(''))
def test_relative_path_with_one_component_should_return_component(self):
self.assertEqual(['foo'], self.filesystem._path_components('foo'))
def test_absolute_path_with_one_component_should_return_component(self):
self.assertEqual(['foo'], self.filesystem._path_components('/foo'))
def test_two_level_relative_path_should_return_components(self):
self.assertEqual(['foo', 'bar'],
self.filesystem._path_components('foo/bar'))
def test_two_level_absolute_path_should_return_components(self):
self.assertEqual(['foo', 'bar'],
self.filesystem._path_components('/foo/bar'))
class FakeFilesystemUnitTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = '/'
self.fake_file = fake_filesystem.FakeFile(
'foobar', filesystem=self.filesystem)
self.fake_child = fake_filesystem.FakeDirectory(
'foobaz', filesystem=self.filesystem)
self.fake_grandchild = fake_filesystem.FakeDirectory(
'quux', filesystem=self.filesystem)
def test_new_filesystem(self):
self.assertEqual('/', self.filesystem.path_separator)
self.assertTrue(stat.S_IFDIR & self.filesystem.root.st_mode)
self.assertEqual(self.root_name, self.filesystem.root.name)
self.assertEqual({}, self.filesystem.root.contents)
def test_none_raises_type_error(self):
self.assertRaises(TypeError, self.filesystem.exists, None)
def test_empty_string_does_not_exist(self):
self.assertFalse(self.filesystem.exists(''))
def test_exists_root(self):
self.assertTrue(self.filesystem.exists(self.root_name))
def test_exists_unadded_file(self):
self.assertFalse(self.filesystem.exists(self.fake_file.name))
def test_not_exists_subpath_named_like_file_contents(self):
# Regression test for #219
file_path = "/foo/bar"
self.filesystem.create_file(file_path, contents='baz')
self.assertFalse(self.filesystem.exists(file_path + "/baz"))
def test_get_root_object(self):
self.assertEqual(self.filesystem.root,
self.filesystem.get_object(self.root_name))
def test_add_object_to_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual({'foobar': self.fake_file},
self.filesystem.root.contents)
def test_exists_added_file(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertTrue(self.filesystem.exists(self.fake_file.name))
def test_exists_relative_path_posix(self):
self.filesystem.is_windows_fs = False
self.filesystem.create_file('/a/b/file_one')
self.filesystem.create_file('/a/c/file_two')
self.assertTrue(self.filesystem.exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.exists('a/b/../z/d'))
self.assertFalse(self.filesystem.exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = '/a/c'
self.assertTrue(self.filesystem.exists('../b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.exists('../z/file_one'))
self.assertFalse(self.filesystem.exists('../z/../c/file_two'))
def test_exists_relative_path_windows(self):
self.filesystem.is_windows_fs = True
self.filesystem.is_macos = False
self.filesystem.create_file('/a/b/file_one')
self.filesystem.create_file('/a/c/file_two')
self.assertTrue(self.filesystem.exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.exists('a/b/../z/d'))
self.assertTrue(self.filesystem.exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = '/a/c'
self.assertTrue(self.filesystem.exists('../b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.exists('../z/file_one'))
self.assertTrue(self.filesystem.exists('../z/../c/file_two'))
def test_get_object_from_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object('foobar'))
def test_get_nonexistent_object_from_root_error(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.get_object('foobar'))
self.assert_raises_io_error(
errno.ENOENT, self.filesystem.get_object, 'some_bogus_filename')
def test_remove_object_from_root(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.filesystem.remove_object(self.fake_file.name)
self.assert_raises_io_error(
errno.ENOENT, self.filesystem.get_object, self.fake_file.name)
def test_remove_nonexisten_object_from_root_error(self):
self.assert_raises_io_error(
errno.ENOENT, self.filesystem.remove_object, 'some_bogus_filename')
def test_exists_removed_file(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.filesystem.remove_object(self.fake_file.name)
self.assertFalse(self.filesystem.exists(self.fake_file.name))
def test_add_object_to_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assertEqual(
{self.fake_file.name: self.fake_file},
self.filesystem.root.get_entry(self.fake_child.name).contents)
def test_add_object_to_regular_file_error_posix(self):
self.filesystem.is_windows_fs = False
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_os_error(errno.ENOTDIR,
self.filesystem.add_object,
self.fake_file.name, self.fake_file)
def test_add_object_to_regular_file_error_windows(self):
self.filesystem.is_windows_fs = True
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_os_error(errno.ENOENT,
self.filesystem.add_object,
self.fake_file.name, self.fake_file)
def test_exists_file_added_to_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.assertTrue(self.filesystem.exists(path))
def test_get_object_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assertEqual(self.fake_file,
self.filesystem.get_object(
self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)))
def test_get_nonexistent_object_from_child_error(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
self.assert_raises_io_error(errno.ENOENT, self.filesystem.get_object,
self.filesystem.joinpaths(
self.fake_child.name,
'some_bogus_filename'))
def test_remove_object_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
target_path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.filesystem.remove_object(target_path)
self.assert_raises_io_error(errno.ENOENT, self.filesystem.get_object,
target_path)
def test_remove_object_from_child_error(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.assert_raises_io_error(
errno.ENOENT, self.filesystem.remove_object,
self.filesystem.joinpaths(self.fake_child.name,
'some_bogus_filename'))
def test_remove_object_from_non_directory_error(self):
self.filesystem.add_object(self.root_name, self.fake_file)
self.assert_raises_io_error(
errno.ENOTDIR, self.filesystem.remove_object,
self.filesystem.joinpaths(
'%s' % self.fake_file.name,
'file_does_not_matter_since_parent_not_a_directory'))
def test_exists_file_removed_from_child(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_file)
path = self.filesystem.joinpaths(self.fake_child.name,
self.fake_file.name)
self.filesystem.remove_object(path)
self.assertFalse(self.filesystem.exists(path))
def test_operate_on_grandchild_directory(self):
self.filesystem.add_object(self.root_name, self.fake_child)
self.filesystem.add_object(self.fake_child.name, self.fake_grandchild)
grandchild_directory = self.filesystem.joinpaths(
self.fake_child.name, self.fake_grandchild.name)
grandchild_file = self.filesystem.joinpaths(
grandchild_directory, self.fake_file.name)
self.assertRaises(IOError, self.filesystem.get_object, grandchild_file)
self.filesystem.add_object(grandchild_directory, self.fake_file)
self.assertEqual(self.fake_file,
self.filesystem.get_object(grandchild_file))
self.assertTrue(self.filesystem.exists(grandchild_file))
self.filesystem.remove_object(grandchild_file)
self.assertRaises(IOError, self.filesystem.get_object, grandchild_file)
self.assertFalse(self.filesystem.exists(grandchild_file))
def test_create_directory_in_root_directory(self):
path = 'foo'
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def test_create_directory_in_root_directory_already_exists_error(self):
path = 'foo'
self.filesystem.create_dir(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_dir, path)
def test_create_directory(self):
path = 'foo/bar/baz'
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
# Create second directory to make sure first is OK.
path = '%s/quux' % path
self.filesystem.create_dir(path)
new_dir = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def test_create_directory_already_exists_error(self):
path = 'foo/bar/baz'
self.filesystem.create_dir(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_dir, path)
def test_create_file_in_read_only_directory_raises_in_posix(self):
self.filesystem.is_windows_fs = False
dir_path = '/foo/bar'
self.filesystem.create_dir(dir_path, perm_bits=0o555)
file_path = dir_path + '/baz'
if not is_root():
if sys.version_info[0] < 3:
self.assert_raises_io_error(errno.EACCES,
self.filesystem.create_file,
file_path)
else:
self.assert_raises_os_error(errno.EACCES,
self.filesystem.create_file,
file_path)
else:
self.filesystem.create_file(file_path)
self.assertTrue(self.filesystem.exists(file_path))
def test_create_file_in_read_only_directory_possible_in_windows(self):
self.filesystem.is_windows_fs = True
dir_path = 'C:/foo/bar'
self.filesystem.create_dir(dir_path, perm_bits=0o555)
file_path = dir_path + '/baz'
self.filesystem.create_file(file_path)
self.assertTrue(self.filesystem.exists(file_path))
def test_create_file_in_current_directory(self):
path = 'foo'
contents = 'dummy data'
self.filesystem.create_file(path, contents=contents)
self.assertTrue(self.filesystem.exists(path))
self.assertFalse(self.filesystem.exists(os.path.dirname(path)))
path = './%s' % path
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
def test_create_file_in_root_directory(self):
path = '/foo'
contents = 'dummy data'
self.filesystem.create_file(path, contents=contents)
new_file = self.filesystem.get_object(path)
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
self.assertEqual(os.path.basename(path), new_file.name)
self.assertTrue(stat.S_IFREG & new_file.st_mode)
self.assertEqual(contents, new_file.contents)
def test_create_file_with_size_but_no_content_creates_large_file(self):
path = 'large_foo_bar'
self.filesystem.create_file(path, st_size=100000000)
new_file = self.filesystem.get_object(path)
self.assertEqual(None, new_file.contents)
self.assertEqual(100000000, new_file.st_size)
def test_create_file_in_root_directory_already_exists_error(self):
path = 'foo'
self.filesystem.create_file(path)
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_file, path)
def test_create_file(self):
path = 'foo/bar/baz'
retval = self.filesystem.create_file(path, contents='dummy_data')
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(os.path.dirname(path)))
new_file = self.filesystem.get_object(path)
self.assertEqual(os.path.basename(path), new_file.name)
if IS_WIN:
self.assertEqual(1, new_file.st_uid)
self.assertEqual(1, new_file.st_gid)
else:
self.assertEqual(os.getuid(), new_file.st_uid)
self.assertEqual(os.getgid(), new_file.st_gid)
self.assertEqual(new_file, retval)
def test_create_file_with_changed_ids(self):
path = 'foo/bar/baz'
set_uid(42)
set_gid(2)
self.filesystem.create_file(path)
self.assertTrue(self.filesystem.exists(path))
new_file = self.filesystem.get_object(path)
self.assertEqual(42, new_file.st_uid)
self.assertEqual(2, new_file.st_gid)
reset_ids()
def test_empty_file_created_for_none_contents(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
path = 'foo/bar/baz'
self.filesystem.create_file(path, contents=None)
with fake_open(path) as f:
self.assertEqual('', f.read())
def test_create_file_with_incorrect_mode_type(self):
self.assertRaises(TypeError, self.filesystem.create_file, 'foo', 'bar')
def test_create_file_already_exists_error(self):
path = 'foo/bar/baz'
self.filesystem.create_file(path, contents='dummy_data')
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.create_file, path)
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def test_create_link(self):
path = 'foo/bar/baz'
target_path = 'foo/bar/quux'
new_file = self.filesystem.create_symlink(path, 'quux')
# Neither the path nor the final target exists before we actually
# write to one of them, even though the link appears in the file
# system.
self.assertFalse(self.filesystem.exists(path))
self.assertFalse(self.filesystem.exists(target_path))
self.assertTrue(stat.S_IFLNK & new_file.st_mode)
# but once we write the linked to file, they both will exist.
self.filesystem.create_file(target_path)
self.assertTrue(self.filesystem.exists(path))
self.assertTrue(self.filesystem.exists(target_path))
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def test_resolve_object(self):
target_path = 'dir/target'
target_contents = '0123456789ABCDEF'
link_name = 'x'
self.filesystem.create_dir('dir')
self.filesystem.create_file('dir/target', contents=target_contents)
self.filesystem.create_symlink(link_name, target_path)
obj = self.filesystem.resolve(link_name)
self.assertEqual('target', obj.name)
self.assertEqual(target_contents, obj.contents)
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def check_lresolve_object(self):
target_path = 'dir/target'
target_contents = '0123456789ABCDEF'
link_name = 'x'
self.filesystem.create_dir('dir')
self.filesystem.create_file('dir/target', contents=target_contents)
self.filesystem.create_symlink(link_name, target_path)
obj = self.filesystem.lresolve(link_name)
self.assertEqual(link_name, obj.name)
self.assertEqual(target_path, obj.contents)
@unittest.skipIf(sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def test_lresolve_object_windows(self):
self.filesystem.is_windows_fs = True
self.check_lresolve_object()
def test_lresolve_object_posix(self):
self.filesystem.is_windows_fs = False
self.check_lresolve_object()
def check_directory_access_on_file(self, error_subtype):
self.filesystem.create_file('not_a_dir')
self.assert_raises_io_error(
error_subtype, self.filesystem.resolve, 'not_a_dir/foo')
self.assert_raises_io_error(
error_subtype, self.filesystem.lresolve, 'not_a_dir/foo/bar')
def test_directory_access_on_file_windows(self):
self.filesystem.is_windows_fs = True
self.check_directory_access_on_file(errno.ENOENT)
def test_directory_access_on_file_posix(self):
self.filesystem.is_windows_fs = False
self.check_directory_access_on_file(errno.ENOTDIR)
def test_pickle_fs(self):
"""Regression test for #445"""
import pickle
self.filesystem.open_files = []
p = pickle.dumps(self.filesystem)
fs = pickle.loads(p)
self.assertEqual(str(fs.root), str(self.filesystem.root))
self.assertEqual(fs.mount_points, self.filesystem.mount_points)
class CaseInsensitiveFakeFilesystemTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.filesystem.is_case_sensitive = False
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.path = self.os.path
def test_get_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
self.assertTrue(self.filesystem.get_object('/Foo/Bar/Baz'))
def test_remove_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
self.filesystem.remove_object('/Foo/Bar/Baz')
self.assertFalse(self.filesystem.exists('/foo/bar/baz'))
def test_exists(self):
self.filesystem.create_dir('/Foo/Bar')
self.assertTrue(self.filesystem.exists('/Foo/Bar'))
self.assertTrue(self.filesystem.exists('/foo/bar'))
self.filesystem.create_file('/foo/Bar/baz')
self.assertTrue(self.filesystem.exists('/Foo/bar/BAZ'))
self.assertTrue(self.filesystem.exists('/foo/bar/baz'))
def test_create_directory_with_different_case_root(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_dir('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertEqual(dir1, dir2)
def test_create_file_with_different_case_dir(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_file('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertEqual(dir1, dir2)
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def test_resolve_path(self):
self.filesystem.create_dir('/foo/baz')
self.filesystem.create_symlink('/Foo/Bar', './baz/bip')
self.assertEqual('/foo/baz/bip',
self.filesystem.resolve_path('/foo/bar'))
def test_isdir_isfile(self):
self.filesystem.create_file('foo/bar')
self.assertTrue(self.path.isdir('Foo'))
self.assertFalse(self.path.isfile('Foo'))
self.assertTrue(self.path.isfile('Foo/Bar'))
self.assertFalse(self.path.isdir('Foo/Bar'))
def test_getsize(self):
file_path = 'foo/bar/baz'
self.filesystem.create_file(file_path, contents='1234567')
self.assertEqual(7, self.path.getsize('FOO/BAR/BAZ'))
def test_getsize_with_looping_symlink(self):
self.filesystem.is_windows_fs = False
dir_path = '/foo/bar'
self.filesystem.create_dir(dir_path)
link_path = dir_path + "/link"
link_target = link_path + "/link"
self.os.symlink(link_target, link_path)
self.assert_raises_os_error(
errno.ELOOP, self.os.path.getsize, link_path)
def test_get_mtime(self):
test_file = self.filesystem.create_file('foo/bar1.txt')
test_file.st_mtime = 24
self.assertEqual(24, self.path.getmtime('Foo/Bar1.TXT'))
def test_get_object_with_file_size(self):
self.filesystem.create_file('/Foo/Bar', st_size=10)
self.assertTrue(self.filesystem.get_object('/foo/bar'))
class CaseSensitiveFakeFilesystemTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.filesystem.is_case_sensitive = True
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.path = self.os.path
def test_get_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
self.assertRaises(IOError, self.filesystem.get_object, '/Foo/Bar/Baz')
def test_remove_object(self):
self.filesystem.create_dir('/foo/bar')
self.filesystem.create_file('/foo/bar/baz')
self.assertRaises(
IOError, self.filesystem.remove_object, '/Foo/Bar/Baz')
self.assertTrue(self.filesystem.exists('/foo/bar/baz'))
def test_exists(self):
self.filesystem.create_dir('/Foo/Bar')
self.assertTrue(self.filesystem.exists('/Foo/Bar'))
self.assertFalse(self.filesystem.exists('/foo/bar'))
self.filesystem.create_file('/foo/Bar/baz')
self.assertFalse(self.filesystem.exists('/Foo/bar/BAZ'))
self.assertFalse(self.filesystem.exists('/foo/bar/baz'))
def test_create_directory_with_different_case_root(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_dir('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertNotEqual(dir1, dir2)
def test_create_file_with_different_case_dir(self):
self.filesystem.create_dir('/Foo/Bar')
self.filesystem.create_file('/foo/bar/baz')
dir1 = self.filesystem.get_object('/Foo/Bar')
dir2 = self.filesystem.get_object('/foo/bar')
self.assertNotEqual(dir1, dir2)
def test_isdir_isfile(self):
self.filesystem.create_file('foo/bar')
self.assertFalse(self.path.isdir('Foo'))
self.assertFalse(self.path.isfile('Foo'))
self.assertFalse(self.path.isfile('Foo/Bar'))
self.assertFalse(self.path.isdir('Foo/Bar'))
def test_getsize(self):
file_path = 'foo/bar/baz'
self.filesystem.create_file(file_path, contents='1234567')
self.assertRaises(os.error, self.path.getsize, 'FOO/BAR/BAZ')
def test_get_mtime(self):
test_file = self.filesystem.create_file('foo/bar1.txt')
test_file.st_mtime = 24
self.assert_raises_os_error(
errno.ENOENT, self.path.getmtime, 'Foo/Bar1.TXT')
class OsPathInjectionRegressionTest(TestCase):
"""Test faking os.path before calling os.walk.
Found when investigating a problem with
gws/tools/labrat/rat_utils_unittest, which was faking out os.path
before calling os.walk.
"""
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.os_path = os.path
# The bug was that when os.path gets faked, the FakePathModule doesn't
# get called in self.os.walk(). FakePathModule now insists that it is
# created as part of FakeOsModule.
self.os = fake_filesystem.FakeOsModule(self.filesystem)
def tearDown(self):
os.path = self.os_path
def test_create_top_level_directory(self):
top_level_dir = '/x'
self.assertFalse(self.filesystem.exists(top_level_dir))
self.filesystem.create_dir(top_level_dir)
self.assertTrue(self.filesystem.exists('/'))
self.assertTrue(self.filesystem.exists(top_level_dir))
self.filesystem.create_dir('%s/po' % top_level_dir)
self.filesystem.create_file('%s/po/control' % top_level_dir)
self.filesystem.create_file('%s/po/experiment' % top_level_dir)
self.filesystem.create_dir('%s/gv' % top_level_dir)
self.filesystem.create_file('%s/gv/control' % top_level_dir)
expected = [
('/', ['x'], []),
('/x', ['gv', 'po'], []),
('/x/gv', [], ['control']),
('/x/po', [], ['control', 'experiment']),
]
# as the result is unsorted, we have to check against sorted results
result = sorted([step for step in self.os.walk('/')],
key=lambda l: l[0])
self.assertEqual(len(expected), len(result))
for entry, expected_entry in zip(result, expected):
self.assertEqual(expected_entry[0], entry[0])
self.assertEqual(expected_entry[1], sorted(entry[1]))
self.assertEqual(expected_entry[2], sorted(entry[2]))
class FakePathModuleTest(TestCase):
def setUp(self):
self.orig_time = time.time
time.time = DummyTime(10, 1)
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.path = self.os.path
def tearDown(self):
time.time = self.orig_time
def check_abspath(self, is_windows):
# the implementation differs in Windows and Posix, so test both
self.filesystem.is_windows_fs = is_windows
filename = u'foo'
abspath = u'!%s' % filename
self.filesystem.create_file(abspath)
self.assertEqual(abspath, self.path.abspath(abspath))
self.assertEqual(abspath, self.path.abspath(filename))
self.assertEqual(abspath, self.path.abspath(u'..!%s' % filename))
def test_abspath_windows(self):
self.check_abspath(is_windows=True)
def test_abspath_posix(self):
"""abspath should return a consistent representation of a file."""
self.check_abspath(is_windows=False)
def check_abspath_bytes(self, is_windows):
"""abspath should return a consistent representation of a file."""
self.filesystem.is_windows_fs = is_windows
filename = b'foo'
abspath = b'!' + filename
self.filesystem.create_file(abspath)
self.assertEqual(abspath, self.path.abspath(abspath))
self.assertEqual(abspath, self.path.abspath(filename))
self.assertEqual(abspath, self.path.abspath(b'..!' + filename))
def test_abspath_bytes_windows(self):
self.check_abspath_bytes(is_windows=True)
def test_abspath_bytes_posix(self):
self.check_abspath_bytes(is_windows=False)
def test_abspath_deals_with_relative_non_root_path(self):
"""abspath should correctly handle relative paths from a
non-! directory.
This test is distinct from the basic functionality test because
fake_filesystem has historically been based in !.
"""
filename = '!foo!bar!baz'
file_components = filename.split(self.path.sep)
basedir = '!%s' % (file_components[0],)
self.filesystem.create_file(filename)
self.os.chdir(basedir)
self.assertEqual(basedir, self.path.abspath(self.path.curdir))
self.assertEqual('!', self.path.abspath('..'))
self.assertEqual(self.path.join(basedir, file_components[1]),
self.path.abspath(file_components[1]))
def test_abs_path_with_drive_component(self):
self.filesystem.is_windows_fs = True
self.filesystem.cwd = 'C:!foo'
self.assertEqual('C:!foo!bar', self.path.abspath('bar'))
self.assertEqual('C:!foo!bar', self.path.abspath('C:bar'))
self.assertEqual('C:!foo!bar', self.path.abspath('!foo!bar'))
def test_isabs_with_drive_component(self):
self.filesystem.is_windows_fs = False
self.assertFalse(self.path.isabs('C:!foo'))
self.assertTrue(self.path.isabs('!'))
self.filesystem.is_windows_fs = True
self.assertTrue(self.path.isabs('C:!foo'))
self.assertTrue(self.path.isabs('!'))
def test_relpath(self):
path_foo = '!path!to!foo'
path_bar = '!path!to!bar'
path_other = '!some!where!else'
self.assertRaises(ValueError, self.path.relpath, None)
self.assertRaises(ValueError, self.path.relpath, '')
self.assertEqual('path!to!foo', self.path.relpath(path_foo))
self.assertEqual('..!foo',
self.path.relpath(path_foo, path_bar))
self.assertEqual('..!..!..%s' % path_other,
self.path.relpath(path_other, path_bar))
self.assertEqual('.',
self.path.relpath(path_bar, path_bar))
def test_realpath_vs_abspath(self):
self.filesystem.is_windows_fs = False
self.filesystem.create_file('!george!washington!bridge')
self.filesystem.create_symlink('!first!president',
'!george!washington')
self.assertEqual('!first!president!bridge',
self.os.path.abspath('!first!president!bridge'))
self.assertEqual('!george!washington!bridge',
self.os.path.realpath('!first!president!bridge'))
self.os.chdir('!first!president')
self.assertEqual('!george!washington!bridge',
self.os.path.realpath('bridge'))
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 2),
'No Windows support before 3.2')
def test_samefile(self):
file_path1 = '!foo!bar!baz'
file_path2 = '!foo!bar!boo'
self.filesystem.create_file(file_path1)
self.filesystem.create_file(file_path2)
self.assertTrue(self.path.samefile(file_path1, file_path1))
self.assertFalse(self.path.samefile(file_path1, file_path2))
self.assertTrue(
self.path.samefile(file_path1, '!foo!..!foo!bar!..!bar!baz'))
def test_exists(self):
file_path = 'foo!bar!baz'
self.filesystem.create_file(file_path)
self.assertTrue(self.path.exists(file_path))
self.assertFalse(self.path.exists('!some!other!bogus!path'))
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def test_lexists(self):
file_path = 'foo!bar!baz'
self.filesystem.create_dir('foo!bar')
self.filesystem.create_symlink(file_path, 'bogus')
self.assertTrue(self.path.lexists(file_path))
self.assertFalse(self.path.exists(file_path))
self.filesystem.create_file('foo!bar!bogus')
self.assertTrue(self.path.exists(file_path))
def test_dirname_with_drive(self):
self.filesystem.is_windows_fs = True
self.assertEqual(u'c:!foo',
self.path.dirname(u'c:!foo!bar'))
self.assertEqual(b'c:!',
self.path.dirname(b'c:!foo'))
self.assertEqual(u'!foo',
self.path.dirname(u'!foo!bar'))
self.assertEqual(b'!',
self.path.dirname(b'!foo'))
self.assertEqual(u'c:foo',
self.path.dirname(u'c:foo!bar'))
self.assertEqual(b'c:',
self.path.dirname(b'c:foo'))
self.assertEqual(u'foo',
self.path.dirname(u'foo!bar'))
def test_dirname(self):
dirname = 'foo!bar'
self.assertEqual(dirname, self.path.dirname('%s!baz' % dirname))
def test_join_strings(self):
components = [u'foo', u'bar', u'baz']
self.assertEqual(u'foo!bar!baz', self.path.join(*components))
def test_join_bytes(self):
components = [b'foo', b'bar', b'baz']
self.assertEqual(b'foo!bar!baz', self.path.join(*components))
def test_expand_user(self):
if self.is_windows:
self.assertEqual(self.path.expanduser('~'),
self.os.environ['USERPROFILE'].replace('\\', '!'))
else:
self.assertEqual(self.path.expanduser('~'),
self.os.environ['HOME'].replace('/', '!'))
@unittest.skipIf(TestCase.is_windows or TestCase.is_cygwin,
'only tested on unix systems')
def test_expand_root(self):
if sys.platform == 'darwin':
roothome = '!var!root'
else:
roothome = '!root'
self.assertEqual(self.path.expanduser('~root'), roothome)
def test_getsize_path_nonexistent(self):
file_path = 'foo!bar!baz'
self.assertRaises(os.error, self.path.getsize, file_path)
def test_getsize_file_empty(self):
file_path = 'foo!bar!baz'
self.filesystem.create_file(file_path)
self.assertEqual(0, self.path.getsize(file_path))
def test_getsize_file_non_zero_size(self):
file_path = 'foo!bar!baz'
self.filesystem.create_file(file_path, contents='1234567')
self.assertEqual(7, self.path.getsize(file_path))
def test_getsize_dir_empty(self):
# For directories, only require that the size is non-negative.
dir_path = 'foo!bar'
self.filesystem.create_dir(dir_path)
size = self.path.getsize(dir_path)
self.assertFalse(int(size) < 0,
'expected non-negative size; actual: %s' % size)
def test_getsize_dir_non_zero_size(self):
# For directories, only require that the size is non-negative.
dir_path = 'foo!bar'
self.filesystem.create_file(self.filesystem.joinpaths(dir_path, 'baz'))
size = self.path.getsize(dir_path)
self.assertFalse(int(size) < 0,
'expected non-negative size; actual: %s' % size)
def test_isdir(self):
self.filesystem.create_file('foo!bar')
self.assertTrue(self.path.isdir('foo'))
self.assertFalse(self.path.isdir('foo!bar'))
self.assertFalse(self.path.isdir('it_dont_exist'))
def test_isdir_with_cwd_change(self):
self.filesystem.create_file('!foo!bar!baz')
self.assertTrue(self.path.isdir('!foo'))
self.assertTrue(self.path.isdir('!foo!bar'))
self.assertTrue(self.path.isdir('foo'))
self.assertTrue(self.path.isdir('foo!bar'))
self.filesystem.cwd = '!foo'
self.assertTrue(self.path.isdir('!foo'))
self.assertTrue(self.path.isdir('!foo!bar'))
self.assertTrue(self.path.isdir('bar'))
def test_isfile(self):
self.filesystem.create_file('foo!bar')
self.assertFalse(self.path.isfile('foo'))
self.assertTrue(self.path.isfile('foo!bar'))
self.assertFalse(self.path.isfile('it_dont_exist'))
def test_get_mtime(self):
test_file = self.filesystem.create_file('foo!bar1.txt')
time.time.start()
self.assertEqual(10, test_file.st_mtime)
test_file.st_mtime = 24
self.assertEqual(24, self.path.getmtime('foo!bar1.txt'))
def test_get_mtime_raises_os_error(self):
self.assertFalse(self.path.exists('it_dont_exist'))
self.assert_raises_os_error(errno.ENOENT, self.path.getmtime,
'it_dont_exist')
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def test_islink(self):
self.filesystem.create_dir('foo')
self.filesystem.create_file('foo!regular_file')
self.filesystem.create_symlink('foo!link_to_file', 'regular_file')
self.assertFalse(self.path.islink('foo'))
# An object can be both a link and a file or file, according to the
# comments in Python/Lib/posixpath.py.
self.assertTrue(self.path.islink('foo!link_to_file'))
self.assertTrue(self.path.isfile('foo!link_to_file'))
self.assertTrue(self.path.isfile('foo!regular_file'))
self.assertFalse(self.path.islink('foo!regular_file'))
self.assertFalse(self.path.islink('it_dont_exist'))
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def test_is_link_case_sensitive(self):
# Regression test for #306
self.filesystem.is_case_sensitive = False
self.filesystem.create_dir('foo')
self.filesystem.create_symlink('foo!bar', 'foo')
self.assertTrue(self.path.islink('foo!Bar'))
def test_ismount(self):
self.assertFalse(self.path.ismount(''))
self.assertTrue(self.path.ismount('!'))
self.assertFalse(self.path.ismount('!mount!'))
self.filesystem.add_mount_point('!mount')
self.assertTrue(self.path.ismount('!mount'))
self.assertTrue(self.path.ismount('!mount!'))
def test_ismount_with_drive_letters(self):
self.filesystem.is_windows_fs = True
self.assertTrue(self.path.ismount('!'))
self.assertTrue(self.path.ismount('c:!'))
self.assertFalse(self.path.ismount('c:'))
self.assertTrue(self.path.ismount('z:!'))
self.filesystem.add_mount_point('!mount')
self.assertTrue(self.path.ismount('!mount'))
self.assertTrue(self.path.ismount('!mount!'))
@unittest.skipIf(sys.version_info < (2, 7, 8),
'UNC path support since Python 2.7.8')
def test_ismount_with_unc_paths(self):
self.filesystem.is_windows_fs = True
self.assertTrue(self.path.ismount('!!a!'))
self.assertTrue(self.path.ismount('!!a!b'))
self.assertTrue(self.path.ismount('!!a!b!'))
self.assertFalse(self.path.ismount('!a!b!'))
self.assertFalse(self.path.ismount('!!a!b!c'))
def test_ismount_with_alternate_path_separator(self):
self.filesystem.alternative_path_separator = '!'
self.filesystem.add_mount_point('!mount')
self.assertTrue(self.path.ismount('!mount'))
self.assertTrue(self.path.ismount('!mount!'))
self.assertTrue(self.path.ismount('!mount!!'))
self.filesystem.is_windows_fs = True
self.assertTrue(self.path.ismount('Z:!'))
@unittest.skipIf(sys.version_info >= (3, 0),
'os.path.walk removed in Python 3')
def test_walk(self):
self.filesystem.create_file('!foo!bar!baz')
self.filesystem.create_file('!foo!bar!xyzzy!plugh')
visited_nodes = []
def RecordVisitedNodes(visited, dirname, fnames):
visited.extend(((dirname, fname) for fname in fnames))
self.path.walk('!foo', RecordVisitedNodes, visited_nodes)
expected = [('!foo', 'bar'),
('!foo!bar', 'baz'),
('!foo!bar', 'xyzzy'),
('!foo!bar!xyzzy', 'plugh')]
self.assertEqual(expected, sorted(visited_nodes))
@unittest.skipIf(sys.version_info >= (3, 0) or TestCase.is_windows,
'os.path.walk deprecrated in Python 3, '
'cannot be properly tested in win32')
def test_walk_from_nonexistent_top_does_not_throw(self):
visited_nodes = []
def RecordVisitedNodes(visited, dirname, fnames):
visited.extend(((dirname, fname) for fname in fnames))
self.path.walk('!foo', RecordVisitedNodes, visited_nodes)
self.assertEqual([], visited_nodes)
def test_getattr_forward_to_real_os_path(self):
"""Forwards any non-faked calls to os.path."""
self.assertTrue(hasattr(self.path, 'sep'),
'Get a faked os.path function')
private_path_function = None
if (2, 7) <= sys.version_info < (3, 6):
if self.is_windows:
if sys.version_info >= (3, 0):
private_path_function = '_get_bothseps'
else:
private_path_function = '_abspath_split'
else:
private_path_function = '_joinrealpath'
if private_path_function:
self.assertTrue(hasattr(self.path, private_path_function),
'Get a real os.path function '
'not implemented in fake os.path')
self.assertFalse(hasattr(self.path, 'nonexistent'))
class PathManipulationTestBase(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='|')
class CollapsePathPipeSeparatorTest(PathManipulationTestBase):
"""Tests CollapsePath (mimics os.path.normpath) using |
as path separator."""
def test_empty_path_becomes_dot_path(self):
self.assertEqual('.', self.filesystem.normpath(''))
def test_dot_path_unchanged(self):
self.assertEqual('.', self.filesystem.normpath('.'))
def test_slashes_are_not_collapsed(self):
"""Tests that '/' is not treated specially if the
path separator is '|'.
In particular, multiple slashes should not be collapsed.
"""
self.assertEqual('/', self.filesystem.normpath('/'))
self.assertEqual('/////', self.filesystem.normpath('/////'))
def test_root_path(self):
self.assertEqual('|', self.filesystem.normpath('|'))
def test_multiple_separators_collapsed_into_root_path(self):
self.assertEqual('|', self.filesystem.normpath('|||||'))
def test_all_dot_paths_removed_but_one(self):
self.assertEqual('.', self.filesystem.normpath('.|.|.|.'))
def test_all_dot_paths_removed_if_another_path_component_exists(self):
self.assertEqual('|', self.filesystem.normpath('|.|.|.|'))
self.assertEqual('foo|bar', self.filesystem.normpath('foo|.|.|.|bar'))
def test_ignores_up_level_references_starting_from_root(self):
self.assertEqual('|', self.filesystem.normpath('|..|..|..|'))
self.assertEqual(
'|', self.filesystem.normpath('|..|..|foo|bar|..|..|'))
self.filesystem.is_windows_fs = False # not an UNC path
self.assertEqual('|', self.filesystem.normpath('||..|.|..||'))
def test_conserves_up_level_references_starting_from_current_dir(self):
self.assertEqual(
'..|..', self.filesystem.normpath('..|foo|bar|..|..|..'))
def test_combine_dot_and_up_level_references_in_absolute_path(self):
self.assertEqual(
'|yes', self.filesystem.normpath('|||||.|..|||yes|no|..|.|||'))
def test_dots_in_path_collapses_to_last_path(self):
self.assertEqual(
'bar', self.filesystem.normpath('foo|..|bar'))
self.assertEqual(
'bar', self.filesystem.normpath('foo|..|yes|..|no|..|bar'))
class SplitPathTest(PathManipulationTestBase):
"""Tests SplitPath (which mimics os.path.split)
using | as path separator."""
def test_empty_path(self):
self.assertEqual(('', ''), self.filesystem.splitpath(''))
def test_no_separators(self):
self.assertEqual(('', 'ab'), self.filesystem.splitpath('ab'))
def test_slashes_do_not_split(self):
"""Tests that '/' is not treated specially if the
path separator is '|'."""
self.assertEqual(('', 'a/b'), self.filesystem.splitpath('a/b'))
def test_eliminate_trailing_separators_from_head(self):
self.assertEqual(('a', 'b'), self.filesystem.splitpath('a|b'))
self.assertEqual(('a', 'b'), self.filesystem.splitpath('a|||b'))
self.assertEqual(('|a', 'b'), self.filesystem.splitpath('|a||b'))
self.assertEqual(('a|b', 'c'), self.filesystem.splitpath('a|b|c'))
self.assertEqual(('|a|b', 'c'), self.filesystem.splitpath('|a|b|c'))
def test_root_separator_is_not_stripped(self):
self.assertEqual(('|', ''), self.filesystem.splitpath('|||'))
self.assertEqual(('|', 'a'), self.filesystem.splitpath('|a'))
self.assertEqual(('|', 'a'), self.filesystem.splitpath('|||a'))
def test_empty_tail_if_path_ends_in_separator(self):
self.assertEqual(('a|b', ''), self.filesystem.splitpath('a|b|'))
def test_empty_path_components_are_preserved_in_head(self):
self.assertEqual(('|a||b', 'c'), self.filesystem.splitpath('|a||b||c'))
class JoinPathTest(PathManipulationTestBase):
"""Tests JoinPath (which mimics os.path.join) using | as path separator."""
def test_one_empty_component(self):
self.assertEqual('', self.filesystem.joinpaths(''))
def test_multiple_empty_components(self):
self.assertEqual('', self.filesystem.joinpaths('', '', ''))
def test_separators_not_stripped_from_single_component(self):
self.assertEqual('||a||', self.filesystem.joinpaths('||a||'))
def test_one_separator_added_between_components(self):
self.assertEqual('a|b|c|d',
self.filesystem.joinpaths('a', 'b', 'c', 'd'))
def test_no_separator_added_for_components_ending_in_separator(self):
self.assertEqual('a|b|c', self.filesystem.joinpaths('a|', 'b|', 'c'))
self.assertEqual('a|||b|||c',
self.filesystem.joinpaths('a|||', 'b|||', 'c'))
def test_components_preceding_absolute_component_are_ignored(self):
self.assertEqual('|c|d',
self.filesystem.joinpaths('a', '|b', '|c', 'd'))
def test_one_separator_added_for_trailing_empty_components(self):
self.assertEqual('a|', self.filesystem.joinpaths('a', ''))
self.assertEqual('a|', self.filesystem.joinpaths('a', '', ''))
def test_no_separator_added_for_leading_empty_components(self):
self.assertEqual('a', self.filesystem.joinpaths('', 'a'))
def test_internal_empty_components_ignored(self):
self.assertEqual('a|b', self.filesystem.joinpaths('a', '', 'b'))
self.assertEqual('a|b|', self.filesystem.joinpaths('a|', '', 'b|'))
class PathSeparatorTest(TestCase):
def test_os_path_sep_matches_fake_filesystem_separator(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
fake_os = fake_filesystem.FakeOsModule(filesystem)
self.assertEqual('!', fake_os.sep)
self.assertEqual('!', fake_os.path.sep)
class NormalizeCaseTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.filesystem.is_case_sensitive = False
def test_normalize_case(self):
self.filesystem.create_file('/Foo/Bar')
self.assertEqual('/Foo/Bar',
self.filesystem._original_path('/foo/bar'))
self.assertEqual('/Foo/Bar',
self.filesystem._original_path('/FOO/BAR'))
def test_normalize_case_for_drive(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_file('C:/Foo/Bar')
self.assertEqual('C:/Foo/Bar',
self.filesystem._original_path('c:/foo/bar'))
self.assertEqual('C:/Foo/Bar',
self.filesystem._original_path('C:/FOO/BAR'))
def test_normalize_case_for_non_existing_file(self):
self.filesystem.create_dir('/Foo/Bar')
self.assertEqual('/Foo/Bar/baz',
self.filesystem._original_path('/foo/bar/baz'))
self.assertEqual('/Foo/Bar/BAZ',
self.filesystem._original_path('/FOO/BAR/BAZ'))
@unittest.skipIf(not TestCase.is_windows,
'Regression test for Windows problem only')
def test_normalize_case_for_lazily_added_empty_file(self):
# regression test for specific issue with added empty real files
filesystem = fake_filesystem.FakeFilesystem()
real_dir_path = os.path.split(
os.path.dirname(os.path.abspath(__file__)))[0]
filesystem.add_real_directory(real_dir_path)
initPyPath = os.path.join(real_dir_path, '__init__.py')
self.assertEqual(initPyPath,
filesystem._original_path(initPyPath.upper()))
class AlternativePathSeparatorTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
self.filesystem.alternative_path_separator = '?'
def test_initial_value(self):
filesystem = fake_filesystem.FakeFilesystem()
if self.is_windows:
self.assertEqual('/', filesystem.alternative_path_separator)
else:
self.assertIsNone(filesystem.alternative_path_separator)
filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.assertIsNone(filesystem.alternative_path_separator)
def test_alt_sep(self):
fake_os = fake_filesystem.FakeOsModule(self.filesystem)
self.assertEqual('?', fake_os.altsep)
self.assertEqual('?', fake_os.path.altsep)
def test_collapse_path_with_mixed_separators(self):
self.assertEqual('!foo!bar', self.filesystem.normpath('!foo??bar'))
def test_normalize_path_with_mixed_separators(self):
path = 'foo?..?bar'
self.assertEqual('!bar', self.filesystem.absnormpath(path))
def test_exists_with_mixed_separators(self):
self.filesystem.create_file('?foo?bar?baz')
self.filesystem.create_file('!foo!bar!xyzzy!plugh')
self.assertTrue(self.filesystem.exists('!foo!bar!baz'))
self.assertTrue(self.filesystem.exists('?foo?bar?xyzzy?plugh'))
class DriveLetterSupportTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
self.filesystem.is_windows_fs = True
def test_initial_value(self):
filesystem = fake_filesystem.FakeFilesystem()
if self.is_windows:
self.assertTrue(filesystem.is_windows_fs)
else:
self.assertFalse(filesystem.is_windows_fs)
def test_collapse_path(self):
self.assertEqual('c:!foo!bar',
self.filesystem.normpath('c:!!foo!!bar'))
@unittest.skipIf(sys.version_info < (2, 7, 8),
'UNC path support since Python 2.7.8')
def test_collapse_unc_path(self):
self.assertEqual('!!foo!bar!baz',
self.filesystem.normpath('!!foo!bar!!baz!!'))
def test_normalize_path_str(self):
self.filesystem.cwd = u''
self.assertEqual(u'c:!foo!bar',
self.filesystem.absnormpath(u'c:!foo!!bar'))
self.filesystem.cwd = u'c:!foo'
self.assertEqual(u'c:!foo!bar', self.filesystem.absnormpath(u'bar'))
def test_normalize_path_bytes(self):
self.filesystem.cwd = b''
self.assertEqual(b'c:!foo!bar',
self.filesystem.absnormpath(b'c:!foo!!bar'))
self.filesystem.cwd = b'c:!foo'
self.assertEqual(b'c:!foo!bar', self.filesystem.absnormpath(b'bar'))
def test_split_path_str(self):
self.assertEqual((u'c:!foo', u'bar'),
self.filesystem.splitpath(u'c:!foo!bar'))
self.assertEqual((u'c:!', u'foo'),
self.filesystem.splitpath(u'c:!foo'))
self.assertEqual((u'!foo', u'bar'),
self.filesystem.splitpath(u'!foo!bar'))
self.assertEqual((u'!', u'foo'),
self.filesystem.splitpath(u'!foo'))
self.assertEqual((u'c:foo', u'bar'),
self.filesystem.splitpath(u'c:foo!bar'))
self.assertEqual((u'c:', u'foo'),
self.filesystem.splitpath(u'c:foo'))
self.assertEqual((u'foo', u'bar'),
self.filesystem.splitpath(u'foo!bar'))
def test_split_path_bytes(self):
self.assertEqual((b'c:!foo', b'bar'),
self.filesystem.splitpath(b'c:!foo!bar'))
self.assertEqual((b'c:!', b'foo'),
self.filesystem.splitpath(b'c:!foo'))
self.assertEqual((b'!foo', b'bar'),
self.filesystem.splitpath(b'!foo!bar'))
self.assertEqual((b'!', b'foo'),
self.filesystem.splitpath(b'!foo'))
self.assertEqual((b'c:foo', b'bar'),
self.filesystem.splitpath(b'c:foo!bar'))
self.assertEqual((b'c:', b'foo'),
self.filesystem.splitpath(b'c:foo'))
self.assertEqual((b'foo', b'bar'),
self.filesystem.splitpath(b'foo!bar'))
def test_characters_before_root_ignored_in_join_paths(self):
self.assertEqual('c:d', self.filesystem.joinpaths('b', 'c:', 'd'))
def test_resolve_path(self):
self.assertEqual('c:!foo!bar',
self.filesystem.resolve_path('c:!foo!bar'))
def test_get_path_components(self):
self.assertEqual(['c:', 'foo', 'bar'],
self.filesystem._path_components('c:!foo!bar'))
self.assertEqual(['c:'], self.filesystem._path_components('c:'))
def test_split_drive_str(self):
self.assertEqual((u'c:', u'!foo!bar'),
self.filesystem.splitdrive(u'c:!foo!bar'))
self.assertEqual((u'', u'!foo!bar'),
self.filesystem.splitdrive(u'!foo!bar'))
self.assertEqual((u'c:', u'foo!bar'),
self.filesystem.splitdrive(u'c:foo!bar'))
self.assertEqual((u'', u'foo!bar'),
self.filesystem.splitdrive(u'foo!bar'))
def test_split_drive_bytes(self):
self.assertEqual((b'c:', b'!foo!bar'),
self.filesystem.splitdrive(b'c:!foo!bar'))
self.assertEqual((b'', b'!foo!bar'),
self.filesystem.splitdrive(b'!foo!bar'))
@unittest.skipIf(sys.version_info < (2, 7, 8),
'UNC path support since Python 2.7.8')
def test_split_drive_with_unc_path(self):
self.assertEqual(('!!foo!bar', '!baz'),
self.filesystem.splitdrive('!!foo!bar!baz'))
self.assertEqual(('', '!!foo'), self.filesystem.splitdrive('!!foo'))
self.assertEqual(('', '!!foo!!bar'),
self.filesystem.splitdrive('!!foo!!bar'))
self.assertEqual(('!!foo!bar', '!!'),
self.filesystem.splitdrive('!!foo!bar!!'))
class DiskSpaceTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!',
total_size=100)
self.os = fake_filesystem.FakeOsModule(self.filesystem)
def test_disk_usage_on_file_creation(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
total_size = 100
self.filesystem.add_mount_point('mount', total_size)
def create_too_large_file():
with fake_open('!mount!file', 'w') as dest:
dest.write('a' * (total_size + 1))
self.assertRaises((OSError, IOError), create_too_large_file)
self.assertEqual(0, self.filesystem.get_disk_usage('!mount').used)
with fake_open('!mount!file', 'w') as dest:
dest.write('a' * total_size)
self.assertEqual(total_size,
self.filesystem.get_disk_usage('!mount').used)
def test_file_system_size_after_large_file_creation(self):
filesystem = fake_filesystem.FakeFilesystem(
path_separator='!', total_size=1024 * 1024 * 1024 * 100)
filesystem.create_file('!foo!baz', st_size=1024 * 1024 * 1024 * 10)
self.assertEqual((1024 * 1024 * 1024 * 100,
1024 * 1024 * 1024 * 10,
1024 * 1024 * 1024 * 90),
filesystem.get_disk_usage())
def test_file_system_size_after_binary_file_creation(self):
self.filesystem.create_file('!foo!bar', contents=b'xyzzy')
self.assertEqual((100, 5, 95), self.filesystem.get_disk_usage())
def test_file_system_size_after_ascii_string_file_creation(self):
self.filesystem.create_file('!foo!bar', contents=u'complicated')
self.assertEqual((100, 11, 89), self.filesystem.get_disk_usage())
def test_filesystem_size_after_2byte_unicode_file_creation(self):
self.filesystem.create_file('!foo!bar', contents=u'сложно',
encoding='utf-8')
self.assertEqual((100, 12, 88), self.filesystem.get_disk_usage())
def test_filesystem_size_after_3byte_unicode_file_creation(self):
self.filesystem.create_file('!foo!bar', contents=u'複雑',
encoding='utf-8')
self.assertEqual((100, 6, 94), self.filesystem.get_disk_usage())
def test_file_system_size_after_file_deletion(self):
self.filesystem.create_file('!foo!bar', contents=b'xyzzy')
self.filesystem.create_file('!foo!baz', st_size=20)
self.filesystem.remove_object('!foo!bar')
self.assertEqual((100, 20, 80), self.filesystem.get_disk_usage())
def test_file_system_size_after_directory_removal(self):
self.filesystem.create_file('!foo!bar', st_size=10)
self.filesystem.create_file('!foo!baz', st_size=20)
self.filesystem.create_file('!foo1!bar', st_size=40)
self.filesystem.remove_object('!foo')
self.assertEqual((100, 40, 60), self.filesystem.get_disk_usage())
def test_creating_file_with_fitting_content(self):
initial_usage = self.filesystem.get_disk_usage()
try:
self.filesystem.create_file('!foo!bar', contents=b'a' * 100)
except IOError:
self.fail('File with contents fitting into disk space '
'could not be written.')
self.assertEqual(initial_usage.used + 100,
self.filesystem.get_disk_usage().used)
def test_creating_file_with_content_too_large(self):
def create_large_file():
self.filesystem.create_file('!foo!bar', contents=b'a' * 101)
initial_usage = self.filesystem.get_disk_usage()
self.assertRaises(IOError, create_large_file)
self.assertEqual(initial_usage, self.filesystem.get_disk_usage())
def test_creating_file_with_fitting_size(self):
initial_usage = self.filesystem.get_disk_usage()
try:
self.filesystem.create_file('!foo!bar', st_size=100)
except IOError:
self.fail(
'File with size fitting into disk space could not be written.')
self.assertEqual(initial_usage.used + 100,
self.filesystem.get_disk_usage().used)
def test_creating_file_with_size_too_large(self):
initial_usage = self.filesystem.get_disk_usage()
def create_large_file():
self.filesystem.create_file('!foo!bar', st_size=101)
self.assertRaises(IOError, create_large_file)
self.assertEqual(initial_usage, self.filesystem.get_disk_usage())
def test_resize_file_with_fitting_size(self):
file_object = self.filesystem.create_file('!foo!bar', st_size=50)
try:
file_object.set_large_file_size(100)
file_object.set_contents(b'a' * 100)
except IOError:
self.fail(
'Resizing file failed although disk space was sufficient.')
def test_resize_file_with_size_too_large(self):
file_object = self.filesystem.create_file('!foo!bar', st_size=50)
self.assert_raises_io_error(errno.ENOSPC,
file_object.set_large_file_size, 200)
self.assert_raises_io_error(errno.ENOSPC, file_object.set_contents,
'a' * 150)
def test_file_system_size_after_directory_rename(self):
self.filesystem.create_file('!foo!bar', st_size=20)
self.os.rename('!foo', '!baz')
self.assertEqual(20, self.filesystem.get_disk_usage().used)
def test_file_system_size_after_file_rename(self):
self.filesystem.create_file('!foo!bar', st_size=20)
self.os.rename('!foo!bar', '!foo!baz')
self.assertEqual(20, self.filesystem.get_disk_usage().used)
@unittest.skipIf(TestCase.is_windows and sys.version_info < (3, 3),
'Links are not supported under Windows before Python 3.3')
def test_that_hard_link_does_not_change_used_size(self):
file1_path = 'test_file1'
file2_path = 'test_file2'
self.filesystem.create_file(file1_path, st_size=20)
self.assertEqual(20, self.filesystem.get_disk_usage().used)
# creating a hard link shall not increase used space
self.os.link(file1_path, file2_path)
self.assertEqual(20, self.filesystem.get_disk_usage().used)
# removing a file shall not decrease used space
# if a hard link still exists
self.os.unlink(file1_path)
self.assertEqual(20, self.filesystem.get_disk_usage().used)
self.os.unlink(file2_path)
self.assertEqual(0, self.filesystem.get_disk_usage().used)
def test_that_the_size_of_correct_mount_point_is_used(self):
self.filesystem.add_mount_point('!mount_limited', total_size=50)
self.filesystem.add_mount_point('!mount_unlimited')
self.assert_raises_io_error(errno.ENOSPC,
self.filesystem.create_file,
'!mount_limited!foo', st_size=60)
self.assert_raises_io_error(errno.ENOSPC, self.filesystem.create_file,
'!bar', st_size=110)
try:
self.filesystem.create_file('!foo', st_size=60)
self.filesystem.create_file('!mount_limited!foo', st_size=40)
self.filesystem.create_file('!mount_unlimited!foo',
st_size=1000000)
except IOError:
self.fail('File with contents fitting into '
'disk space could not be written.')
def test_that_disk_usage_of_correct_mount_point_is_used(self):
self.filesystem.add_mount_point('!mount1', total_size=20)
self.filesystem.add_mount_point('!mount1!bar!mount2', total_size=50)
self.filesystem.create_file('!foo!bar', st_size=10)
self.filesystem.create_file('!mount1!foo!bar', st_size=10)
self.filesystem.create_file('!mount1!bar!mount2!foo!bar', st_size=10)
self.assertEqual(90, self.filesystem.get_disk_usage('!foo').free)
self.assertEqual(10,
self.filesystem.get_disk_usage('!mount1!foo').free)
self.assertEqual(40, self.filesystem.get_disk_usage(
'!mount1!bar!mount2').free)
def test_set_larger_disk_size(self):
self.filesystem.add_mount_point('!mount1', total_size=20)
self.assert_raises_io_error(errno.ENOSPC,
self.filesystem.create_file, '!mount1!foo',
st_size=100)
self.filesystem.set_disk_usage(total_size=200, path='!mount1')
self.filesystem.create_file('!mount1!foo', st_size=100)
self.assertEqual(100,
self.filesystem.get_disk_usage('!mount1!foo').free)
def test_set_smaller_disk_size(self):
self.filesystem.add_mount_point('!mount1', total_size=200)
self.filesystem.create_file('!mount1!foo', st_size=100)
self.assert_raises_io_error(errno.ENOSPC,
self.filesystem.set_disk_usage,
total_size=50, path='!mount1')
self.filesystem.set_disk_usage(total_size=150, path='!mount1')
self.assertEqual(50,
self.filesystem.get_disk_usage('!mount1!foo').free)
def test_disk_size_on_unlimited_disk(self):
self.filesystem.add_mount_point('!mount1')
self.filesystem.create_file('!mount1!foo', st_size=100)
self.filesystem.set_disk_usage(total_size=1000, path='!mount1')
self.assertEqual(900,
self.filesystem.get_disk_usage('!mount1!foo').free)
def test_disk_size_on_auto_mounted_drive_on_file_creation(self):
self.filesystem.is_windows_fs = True
# drive d: shall be auto-mounted and the used size adapted
self.filesystem.create_file('d:!foo!bar', st_size=100)
self.filesystem.set_disk_usage(total_size=1000, path='d:')
self.assertEqual(self.filesystem.get_disk_usage('d:!foo').free, 900)
def test_disk_size_on_auto_mounted_drive_on_directory_creation(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_dir('d:!foo!bar')
self.filesystem.create_file('d:!foo!bar!baz', st_size=100)
self.filesystem.create_file('d:!foo!baz', st_size=100)
self.filesystem.set_disk_usage(total_size=1000, path='d:')
self.assertEqual(self.filesystem.get_disk_usage('d:!foo').free, 800)
@unittest.skipIf(sys.version_info < (3, 0),
'Tests byte contents in Python3')
def test_copying_preserves_byte_contents(self):
source_file = self.filesystem.create_file('foo', contents=b'somebytes')
dest_file = self.filesystem.create_file('bar')
dest_file.set_contents(source_file.contents)
self.assertEqual(dest_file.contents, source_file.contents)
class MountPointTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='!',
total_size=100)
self.filesystem.add_mount_point('!foo')
self.filesystem.add_mount_point('!bar')
self.filesystem.add_mount_point('!foo!baz')
def test_that_new_mount_points_get_new_device_number(self):
self.assertEqual(1, self.filesystem.get_object('!').st_dev)
self.assertEqual(2, self.filesystem.get_object('!foo').st_dev)
self.assertEqual(3, self.filesystem.get_object('!bar').st_dev)
self.assertEqual(4, self.filesystem.get_object('!foo!baz').st_dev)
def test_that_new_directories_get_correct_device_number(self):
self.assertEqual(1, self.filesystem.create_dir('!foo1!bar').st_dev)
self.assertEqual(2, self.filesystem.create_dir('!foo!bar').st_dev)
self.assertEqual(4,
self.filesystem.create_dir('!foo!baz!foo!bar').st_dev)
def test_that_new_files_get_correct_device_number(self):
self.assertEqual(1, self.filesystem.create_file('!foo1!bar').st_dev)
self.assertEqual(2, self.filesystem.create_file('!foo!bar').st_dev)
self.assertEqual(4, self.filesystem.create_file(
'!foo!baz!foo!bar').st_dev)
def test_that_mount_point_cannot_be_added_twice(self):
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_mount_point, '!foo')
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_mount_point, '!foo!')
def test_that_drives_are_auto_mounted(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_dir('d:!foo!bar')
self.filesystem.create_file('d:!foo!baz')
self.filesystem.create_file('z:!foo!baz')
self.assertEqual(5, self.filesystem.get_object('d:').st_dev)
self.assertEqual(5, self.filesystem.get_object('d:!foo!bar').st_dev)
self.assertEqual(5, self.filesystem.get_object('d:!foo!baz').st_dev)
self.assertEqual(6, self.filesystem.get_object('z:!foo!baz').st_dev)
def test_that_drives_are_auto_mounted_case_insensitive(self):
self.filesystem.is_windows_fs = True
self.filesystem.is_case_sensitive = False
self.filesystem.create_dir('D:!foo!bar')
self.filesystem.create_file('e:!foo!baz')
self.assertEqual(5, self.filesystem.get_object('D:').st_dev)
self.assertEqual(5, self.filesystem.get_object('d:!foo!bar').st_dev)
self.assertEqual(6, self.filesystem.get_object('e:!foo').st_dev)
self.assertEqual(6, self.filesystem.get_object('E:!Foo!Baz').st_dev)
@unittest.skipIf(sys.version_info < (2, 7, 8),
'UNC path support since Python 2.7.8')
def test_that_unc_paths_are_auto_mounted(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_dir('!!foo!bar!baz')
self.filesystem.create_file('!!foo!bar!bip!bop')
self.assertEqual(5, self.filesystem.get_object('!!foo!bar').st_dev)
self.assertEqual(5, self.filesystem.get_object(
'!!foo!bar!bip!bop').st_dev)
class RealFileSystemAccessTest(TestCase):
def setUp(self):
# use the real path separator to work with the real file system
self.filesystem = fake_filesystem.FakeFilesystem()
self.fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
self.pyfakefs_path = os.path.split(
os.path.dirname(os.path.abspath(__file__)))[0]
self.root_path = os.path.split(self.pyfakefs_path)[0]
def test_add_non_existing_real_file_raises(self):
nonexisting_path = os.path.join('nonexisting', 'test.txt')
self.assertRaises(OSError, self.filesystem.add_real_file,
nonexisting_path)
self.assertFalse(self.filesystem.exists(nonexisting_path))
def test_add_non_existing_real_directory_raises(self):
nonexisting_path = '/nonexisting'
self.assert_raises_io_error(errno.ENOENT,
self.filesystem.add_real_directory,
nonexisting_path)
self.assertFalse(self.filesystem.exists(nonexisting_path))
def test_existing_fake_file_raises(self):
real_file_path = __file__
self.filesystem.create_file(real_file_path)
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_real_file,
real_file_path)
def test_existing_fake_directory_raises(self):
self.filesystem.create_dir(self.root_path)
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_real_directory,
self.root_path)
def check_fake_file_stat(self, fake_file, real_file_path,
target_path=None):
if target_path is None or target_path == real_file_path:
self.assertTrue(self.filesystem.exists(real_file_path))
else:
self.assertFalse(self.filesystem.exists(real_file_path))
self.assertTrue(self.filesystem.exists(target_path))
real_stat = os.stat(real_file_path)
self.assertIsNone(fake_file._byte_contents)
self.assertEqual(fake_file.st_size, real_stat.st_size)
self.assertAlmostEqual(fake_file.st_ctime, real_stat.st_ctime,
places=5)
self.assertAlmostEqual(fake_file.st_atime, real_stat.st_atime,
places=5)
self.assertAlmostEqual(fake_file.st_mtime, real_stat.st_mtime,
places=5)
self.assertEqual(fake_file.st_uid, real_stat.st_uid)
self.assertEqual(fake_file.st_gid, real_stat.st_gid)
def check_read_only_file(self, fake_file, real_file_path):
with open(real_file_path, 'rb') as f:
real_contents = f.read()
self.assertEqual(fake_file.byte_contents, real_contents)
if not is_root():
self.assert_raises_io_error(
errno.EACCES, self.fake_open, real_file_path, 'w')
else:
with self.fake_open(real_file_path, 'w'):
pass
def check_writable_file(self, fake_file, real_file_path):
with open(real_file_path, 'rb') as f:
real_contents = f.read()
self.assertEqual(fake_file.byte_contents, real_contents)
with self.fake_open(real_file_path, 'wb') as f:
f.write(b'test')
with open(real_file_path, 'rb') as f:
real_contents1 = f.read()
self.assertEqual(real_contents1, real_contents)
with self.fake_open(real_file_path, 'rb') as f:
fake_contents = f.read()
self.assertEqual(fake_contents, b'test')
def test_add_existing_real_file_read_only(self):
real_file_path = os.path.abspath(__file__)
fake_file = self.filesystem.add_real_file(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.assertEqual(fake_file.st_mode & 0o333, 0)
self.check_read_only_file(fake_file, real_file_path)
def test_add_existing_real_file_read_write(self):
real_file_path = os.path.realpath(__file__)
fake_file = self.filesystem.add_real_file(real_file_path,
read_only=False)
self.check_fake_file_stat(fake_file, real_file_path)
self.assertEqual(fake_file.st_mode, os.stat(real_file_path).st_mode)
self.check_writable_file(fake_file, real_file_path)
def test_add_real_file_to_existing_path(self):
real_file_path = os.path.abspath(__file__)
self.filesystem.create_file('/foo/bar')
self.assert_raises_os_error(
errno.EEXIST, self.filesystem.add_real_file,
real_file_path, target_path='/foo/bar')
def test_add_real_file_to_non_existing_path(self):
real_file_path = os.path.abspath(__file__)
fake_file = self.filesystem.add_real_file(real_file_path,
target_path='/foo/bar')
self.check_fake_file_stat(fake_file, real_file_path,
target_path='/foo/bar')
def test_write_to_real_file(self):
# regression test for #470
real_file_path = os.path.abspath(__file__)
self.filesystem.add_real_file(real_file_path, read_only=False)
with self.fake_open(real_file_path, 'w') as f:
f.write('foo')
with self.fake_open(real_file_path, 'rb') as f:
self.assertEqual(b'foo', f.read())
def test_add_existing_real_directory_read_only(self):
self.filesystem.add_real_directory(self.pyfakefs_path)
self.assertTrue(self.filesystem.exists(self.pyfakefs_path))
self.assertTrue(self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'fake_filesystem.py')))
self.assertTrue(self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'fake_pathlib.py')))
file_path = os.path.join(self.pyfakefs_path,
'fake_filesystem_shutil.py')
fake_file = self.filesystem.resolve(file_path)
self.check_fake_file_stat(fake_file, file_path)
self.check_read_only_file(fake_file, file_path)
def test_add_existing_real_directory_tree(self):
self.filesystem.add_real_directory(self.root_path)
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', 'tests',
'fake_filesystem_test.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs',
'fake_filesystem.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs', '__init__.py')))
def test_add_existing_real_directory_tree_to_existing_path(self):
self.filesystem.create_dir('/foo/bar')
self.assert_raises_os_error(errno.EEXIST,
self.filesystem.add_real_directory,
self.root_path,
target_path='/foo/bar')
def test_add_existing_real_directory_tree_to_other_path(self):
self.filesystem.add_real_directory(self.root_path,
target_path='/foo/bar')
self.assertFalse(
self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'tests',
'fake_filesystem_test.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join('foo', 'bar', 'pyfakefs', 'tests',
'fake_filesystem_test.py')))
self.assertFalse(
self.filesystem.exists(
os.path.join(self.root_path, 'pyfakefs',
'fake_filesystem.py')))
self.assertTrue(
self.filesystem.exists(
os.path.join('foo', 'bar', 'pyfakefs', '__init__.py')))
def test_get_object_from_lazily_added_real_directory(self):
self.filesystem.is_case_sensitive = True
self.filesystem.add_real_directory(self.root_path)
self.assertTrue(self.filesystem.get_object(
os.path.join(self.root_path, 'pyfakefs', 'fake_filesystem.py')))
self.assertTrue(
self.filesystem.get_object(
os.path.join(self.root_path, 'pyfakefs', '__init__.py')))
def test_add_existing_real_directory_lazily(self):
disk_size = 1024 * 1024 * 1024
real_dir_path = os.path.join(self.root_path, 'pyfakefs')
self.filesystem.set_disk_usage(disk_size, real_dir_path)
self.filesystem.add_real_directory(real_dir_path)
# the directory contents have not been read, the the disk usage
# has not changed
self.assertEqual(disk_size,
self.filesystem.get_disk_usage(real_dir_path).free)
# checking for existence shall read the directory contents
self.assertTrue(
self.filesystem.get_object(
os.path.join(real_dir_path, 'fake_filesystem.py')))
# so now the free disk space shall have decreased
self.assertGreater(disk_size,
self.filesystem.get_disk_usage(real_dir_path).free)
def test_add_existing_real_directory_not_lazily(self):
disk_size = 1024 * 1024 * 1024
self.filesystem.set_disk_usage(disk_size, self.pyfakefs_path)
self.filesystem.add_real_directory(self.pyfakefs_path, lazy_read=False)
# the directory has been read, so the file sizes have
# been subtracted from the free space
self.assertGreater(disk_size, self.filesystem.get_disk_usage(
self.pyfakefs_path).free)
def test_add_existing_real_directory_read_write(self):
self.filesystem.add_real_directory(self.pyfakefs_path, read_only=False)
self.assertTrue(self.filesystem.exists(self.pyfakefs_path))
self.assertTrue(self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'fake_filesystem.py')))
self.assertTrue(self.filesystem.exists(
os.path.join(self.pyfakefs_path, 'fake_pathlib.py')))
file_path = os.path.join(self.pyfakefs_path, 'pytest_plugin.py')
fake_file = self.filesystem.resolve(file_path)
self.check_fake_file_stat(fake_file, file_path)
self.check_writable_file(fake_file, file_path)
def test_add_existing_real_paths_read_only(self):
real_file_path = os.path.realpath(__file__)
fixture_path = os.path.join(self.pyfakefs_path, 'tests', 'fixtures')
self.filesystem.add_real_paths([real_file_path, fixture_path])
fake_file = self.filesystem.resolve(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.check_read_only_file(fake_file, real_file_path)
real_file_path = os.path.join(fixture_path,
'module_with_attributes.py')
fake_file = self.filesystem.resolve(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.check_read_only_file(fake_file, real_file_path)
def test_add_existing_real_paths_read_write(self):
real_file_path = os.path.realpath(__file__)
fixture_path = os.path.join(self.pyfakefs_path, 'tests', 'fixtures')
self.filesystem.add_real_paths([real_file_path, fixture_path],
read_only=False)
fake_file = self.filesystem.resolve(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.check_writable_file(fake_file, real_file_path)
real_file_path = os.path.join(fixture_path,
'module_with_attributes.py')
fake_file = self.filesystem.resolve(real_file_path)
self.check_fake_file_stat(fake_file, real_file_path)
self.check_writable_file(fake_file, real_file_path)
class FileSideEffectTests(TestCase):
def side_effect(self):
test_case = self
test_case.side_effect_called = False
def __side_effect(file_object):
test_case.side_effect_called = True
test_case.side_effect_file_object_content = file_object.contents
return __side_effect
def setUp(self):
# use the real path separator to work with the real file system
self.filesystem = fake_filesystem.FakeFilesystem()
self.filesystem.create_file('/a/b/file_one',
side_effect=self.side_effect())
def test_side_effect_called(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
self.side_effect_called = False
with fake_open('/a/b/file_one', 'w') as handle:
handle.write('foo')
self.assertTrue(self.side_effect_called)
def test_side_effect_file_object(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
self.side_effect_called = False
with fake_open('/a/b/file_one', 'w') as handle:
handle.write('foo')
self.assertEquals(self.side_effect_file_object_content, 'foo')
if __name__ == '__main__':
unittest.main()
|
py | b40b8c379ca004991b388c014bbfa669cdded8d3 | import pytest
from stp_core.loop.eventually import eventually, slowFactor
from stp_core.common.log import getlogger
from stp_core.loop.looper import Looper
from plenum.server.node import Node
from plenum.test import waits
from plenum.test.delayers import delayerMsgTuple
from plenum.test.helper import sendMessageAndCheckDelivery, addNodeBack, assertExp
from plenum.test.msgs import randomMsg, TestMsg
from plenum.test.test_node import TestNodeSet, checkNodesConnected, \
ensureElectionsDone, prepareNodeSet
logger = getlogger()
@pytest.mark.skipif('sys.platform == "win32"', reason='SOV-457')
def testTestNodeDelay(tdir_for_func, tconf_for_func):
nodeNames = {"testA", "testB"}
with TestNodeSet(tconf_for_func, names=nodeNames, tmpdir=tdir_for_func) as nodes:
nodeA = nodes.getNode("testA")
nodeB = nodes.getNode("testB")
with Looper(nodes) as looper:
looper.run(checkNodesConnected(nodes))
# send one message, without delay
looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB))
# set delay, then send another message
# and find that it doesn't arrive
delay = 5 * waits.expectedNodeToNodeMessageDeliveryTime()
nodeB.nodeIbStasher.delay(
delayerMsgTuple(delay, TestMsg, nodeA.name)
)
with pytest.raises(AssertionError):
looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB))
# but then find that it arrives after the delay
# duration has passed
timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delay
looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB,
customTimeout=timeout))
# reset the delay, and find another message comes quickly
nodeB.nodeIbStasher.reset_delays_and_process_delayeds()
looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB))
@pytest.mark.skip('Nodes use round robin primary selection')
def testSelfNominationDelay(tdir_for_func):
nodeNames = ["testA", "testB", "testC", "testD"]
with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodeSet:
with Looper(nodeSet) as looper:
prepareNodeSet(looper, nodeSet)
delay = 30
# Add node A
nodeA = addNodeBack(nodeSet, looper, nodeNames[0])
nodeA.delaySelfNomination(delay)
nodesBCD = []
for name in nodeNames[1:]:
# nodesBCD.append(nodeSet.addNode(name, i+1, AutoMode.never))
nodesBCD.append(addNodeBack(nodeSet, looper, name))
# Ensuring that NodeA is started before any other node to demonstrate
# that it is delaying self nomination
timeout = waits.expectedNodeStartUpTimeout()
looper.run(
eventually(lambda: assertExp(nodeA.isReady()), retryWait=1,
timeout=timeout))
ensureElectionsDone(looper=looper,
nodes=nodeSet,
retryWait=1)
# node A should not have any primary replica
timeout = waits.expectedNodeStartUpTimeout()
looper.run(
eventually(lambda: assertExp(not nodeA.hasPrimary),
retryWait=1,
timeout=timeout))
# Make sure that after at the most 30 seconds, nodeA's
# `startElection` is called
looper.run(eventually(lambda: assertExp(
len(nodeA.spylog.getAll(
Node.decidePrimaries.__name__)) > 0),
retryWait=1, timeout=delay))
|
py | b40b8d6dc3487d3b7bf8f235b95e800e79f8d778 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.bradley_2013 import Bradley2013, Bradley2013Volc
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class Bradley2013TestCase(BaseGSIMTestCase):
GSIM_CLASS = Bradley2013
# Tests developed using MATLAB code from Brendon Bradley
# available at https://dl.dropboxusercontent.com/u/35408783/webpage/ ....
# Software/GroundMotions/Bradley2010Gmpe.zip and described at
# https://sites.google.com/site/brendonabradley/software/ ....
# canterbury-earthquakes-data
# Downloaded 26 March 2014.
def test_mean_strike_slip(self):
self.check('BRADLEY13/Bradley2013_MEAN_SS.csv',
max_discrep_percentage=0.1)
def test_mean_reverse(self):
self.check('BRADLEY13/Bradley2013_MEAN_RV.csv',
max_discrep_percentage=0.1)
def test_mean_normal(self):
self.check('BRADLEY13/Bradley2013_MEAN_NM.csv',
max_discrep_percentage=0.1)
def test_inter_event_stddev_strike_slip(self):
self.check('BRADLEY13/Bradley2013_INTER_EVENT_STDDEV_SS.csv',
max_discrep_percentage=0.1)
def test_inter_event_stddev_reverse(self):
self.check('BRADLEY13/Bradley2013_INTER_EVENT_STDDEV_RV.csv',
max_discrep_percentage=0.1)
def test_inter_event_stddev_normal(self):
self.check('BRADLEY13/Bradley2013_INTER_EVENT_STDDEV_NM.csv',
max_discrep_percentage=0.1)
def test_intra_event_stddev_strike_slip(self):
self.check('BRADLEY13/Bradley2013_INTRA_EVENT_STDDEV_SS.csv',
max_discrep_percentage=0.1)
def test_intra_event_stddev_reverse(self):
self.check('BRADLEY13/Bradley2013_INTRA_EVENT_STDDEV_RV.csv',
max_discrep_percentage=0.1)
def test_intra_event_stddev_normal(self):
self.check('BRADLEY13/Bradley2013_INTRA_EVENT_STDDEV_NM.csv',
max_discrep_percentage=0.1)
def test_total_stddev_strike_slip(self):
self.check('BRADLEY13/Bradley2013_TOTAL_STDDEV_SS.csv',
max_discrep_percentage=0.1)
def test_total_stddev_reverse(self):
self.check('BRADLEY13/Bradley2013_TOTAL_STDDEV_RV.csv',
max_discrep_percentage=0.1)
def test_total_stddev_normal(self):
self.check('BRADLEY13/Bradley2013_TOTAL_STDDEV_NM.csv',
max_discrep_percentage=0.1)
class Bradley2013VolcTestCase(BaseGSIMTestCase):
GSIM_CLASS = Bradley2013Volc
# Tests developed using MATLAB code from Brendon Bradley
# available at https://dl.dropboxusercontent.com/u/35408783/webpage/ ....
# Software/GroundMotions/Bradley2010Gmpe.zip and described at
# https://sites.google.com/site/brendonabradley/software/ ....
# canterbury-earthquakes-data
# Downloaded 26 March 2014.
def test_mean(self):
self.check('BRADLEY13/Bradley2013Volc_MEAN.csv',
max_discrep_percentage=0.1)
def test_total_stddev(self):
self.check('BRADLEY13/Bradley2013Volc_TOTAL_STDDEV.csv',
max_discrep_percentage=0.1)
def test_intra_event_stddev(self):
self.check('BRADLEY13/Bradley2013Volc_INTRA_EVENT_STDDEV.csv',
max_discrep_percentage=0.1)
def test_inter_event_stddev(self):
self.check('BRADLEY13/Bradley2013Volc_INTER_EVENT_STDDEV.csv',
max_discrep_percentage=0.1)
|
py | b40b8db3323bacb48e6d79023fc7456f4ac5ecbf | # Copyright (C) 2008 Tristan Seligmann <[email protected]>
# Copyright (C) 2009 Canonical Ltd
# Copyright (C) 2009 Duncan McGreggor <[email protected]>
# Copyright (C) 2012 New Dream Network (DreamHost)
# Licenced under the txaws licence available at /LICENSE in the txaws source.
"""
Client wrapper for Amazon's Simple Storage Service.
API stability: unstable.
Various API-incompatible changes are planned in order to expose missing
functionality in this wrapper.
"""
import datetime
import hashlib
import mimetypes
import warnings
from dateutil.parser import parse as parseTime
from hashlib import sha256
from incremental import Version
from io import BytesIO
from operator import itemgetter
from twisted.internet import task
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.web.client import FileBodyProducer
from twisted.web.http import datetimeToString
from twisted.web.http_headers import Headers
from urllib.parse import urlencode, unquote
from txaws import _auth_v4
from txaws.client.base import (
_URLContext, BaseClient, BaseQuery, error_wrapper,
RequestDetails, query,
)
from txaws.s3.acls import AccessControlPolicy
from txaws.s3.exception import S3Error
from txaws.s3.model import (
Bucket, BucketItem, BucketListing, ItemOwner, LifecycleConfiguration,
LifecycleConfigurationRule, NotificationConfiguration, RequestPayment,
VersioningConfiguration, WebsiteConfiguration, MultipartInitiationResponse,
MultipartCompletionResponse)
from txaws.s3.tweaks import to_str, to_bytes
from txaws.service import AWSServiceEndpoint, REGION_US_EAST_1, S3_ENDPOINT
from txaws.util import XML
# aliases
_t = to_str
_b = to_bytes
def _to_dict(headers):
return {k: vs[0] for (k, vs) in headers.getAllRawHeaders()}
def s3_error_wrapper(error):
error_wrapper(error, S3Error)
class S3Client(BaseClient):
"""A client for S3."""
def __init__(self, creds=None, endpoint=None, query_factory=None,
receiver_factory=None, agent=None, utcnow=None,
cooperator=None):
if query_factory is None:
query_factory = query
self.agent = agent
self.utcnow = utcnow
if cooperator is None:
cooperator = task
self._cooperator = cooperator
super(S3Client, self).__init__(creds, endpoint, query_factory,
receiver_factory=receiver_factory)
def _submit(self, query):
d = query.submit(self.agent, self.receiver_factory, self.utcnow)
d.addErrback(s3_error_wrapper)
return d
def _query_factory(self, details, **kw):
return self.query_factory(credentials=self.creds, details=details, **kw)
def _details(self, **kw):
body = kw.pop("body", None)
body_producer = kw.pop("body_producer", None)
amz_headers = kw.pop("amz_headers", {})
# It makes no sense to specify both. That makes it ambiguous
# what data should make up the request body.
if body is not None and body_producer is not None:
raise ValueError("data and body_producer are mutually exclusive")
# If the body was specified as a string, we can compute a hash
# of it and sign the hash along with the rest. That protects
# against replay attacks with different content.
#
# If the body was specified as a producer, we can't really do
# this. :( The producer may generate large amounts of data
# which we can't hold in memory and it may not be replayable.
# AWS requires the signature in the header so there's no way
# to both hash/sign and avoid buffering everything in memory.
#
# The saving grace is that we'll only issue requests over TLS
# after verifying the AWS certificate and requests with a date
# (included in the signature) more than 15 minutes in the past
# are rejected. :/
if body is not None:
content_sha256 = _t(sha256(body).hexdigest(), encoding="ascii")
body_producer = FileBodyProducer(BytesIO(body), cooperator=self._cooperator)
elif body_producer is None:
# Just as important is to include the empty content hash
# for all no-body requests.
content_sha256 = _t(sha256(b"").hexdigest(), encoding="ascii")
else:
# Tell AWS we're not trying to sign the payload.
content_sha256 = None
return RequestDetails(
region=_b(REGION_US_EAST_1),
service=b"s3",
body_producer=body_producer,
amz_headers=amz_headers,
content_sha256=content_sha256,
**kw
)
def _url_context(self, *a, **kw):
return s3_url_context(self.endpoint, *a, **kw)
def _headers(self, content_type):
if content_type is None:
return Headers()
return Headers({"content-type": [content_type]})
def list_buckets(self):
"""
List all buckets.
Returns a list of all the buckets owned by the authenticated sender of
the request.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(),
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_list_buckets)
return d
def _parse_list_buckets(self, response):
"""
Parse XML bucket list response.
"""
status, xml_bytes = response
root = XML(xml_bytes)
buckets = []
for bucket_data in root.find("Buckets"):
name = bucket_data.findtext("Name")
date_text = bucket_data.findtext("CreationDate")
date_time = parseTime(date_text)
bucket = Bucket(name, date_time)
buckets.append(bucket)
return buckets
def create_bucket(self, bucket):
"""
Create a new bucket.
"""
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket),
)
query = self._query_factory(details)
return self._submit(query)
def delete_bucket(self, bucket):
"""
Delete a bucket.
The bucket must be empty before it can be deleted.
"""
details = self._details(
method=b"DELETE",
url_context=self._url_context(bucket=bucket),
)
query = self._query_factory(details)
return self._submit(query)
def get_bucket(self, bucket, marker=None, max_keys=None, prefix=None):
"""
Get a list of all the objects in a bucket.
@param bucket: The name of the bucket from which to retrieve objects.
@type bucket: L{str}
@param marker: If given, indicate a position in the overall
results where the results of this call should begin. The
first result is the first object that sorts greater than
this marker.
@type marker: L{bytes} or L{NoneType}
@param max_keys: If given, the maximum number of objects to
return.
@type max_keys: L{int} or L{NoneType}
@param prefix: If given, indicate that only objects with keys
beginning with this value should be returned.
@type prefix: L{bytes} or L{NoneType}
@return: A L{Deferred} that fires with a L{BucketListing}
describing the result.
@see: U{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html}
"""
args = []
if marker is not None:
args.append(("marker", marker))
if max_keys is not None:
args.append(("max-keys", "%d" % (max_keys,)))
if prefix is not None:
args.append(("prefix", prefix))
if args:
object_name = "?" + urlencode(args)
else:
object_name = None
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_bucket)
return d
def _parse_get_bucket(self, response):
status, xml_bytes = response
root = XML(xml_bytes)
name = root.findtext("Name")
prefix = root.findtext("Prefix")
marker = root.findtext("Marker")
max_keys = root.findtext("MaxKeys")
is_truncated = root.findtext("IsTruncated")
contents = []
for content_data in root.findall("Contents"):
key = content_data.findtext("Key")
date_text = content_data.findtext("LastModified")
modification_date = parseTime(date_text)
etag = content_data.findtext("ETag")
size = content_data.findtext("Size")
storage_class = content_data.findtext("StorageClass")
owner_id = content_data.findtext("Owner/ID")
owner_display_name = content_data.findtext("Owner/DisplayName")
owner = ItemOwner(owner_id, owner_display_name)
content_item = BucketItem(key, modification_date, etag, size,
storage_class, owner)
contents.append(content_item)
common_prefixes = []
for prefix_data in root.findall("CommonPrefixes"):
common_prefixes.append(prefix_data.text)
return BucketListing(name, prefix, marker, max_keys, is_truncated,
contents, common_prefixes)
def get_bucket_location(self, bucket):
"""
Get the location (region) of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's region.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?location"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_bucket_location)
return d
def _parse_bucket_location(self, response):
"""Parse a C{LocationConstraint} XML document."""
status, xml_bytes = response
root = XML(xml_bytes)
return root.text or ""
def get_bucket_lifecycle(self, bucket):
"""
Get the lifecycle configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's lifecycle
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?lifecycle"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_lifecycle_config)
return d
def _parse_lifecycle_config(self, response):
"""Parse a C{LifecycleConfiguration} XML document."""
status, xml_bytes = response
root = XML(xml_bytes)
rules = []
for content_data in root.findall("Rule"):
id = content_data.findtext("ID")
prefix = content_data.findtext("Prefix")
status = content_data.findtext("Status")
expiration = int(content_data.findtext("Expiration/Days"))
rules.append(
LifecycleConfigurationRule(id, prefix, status, expiration))
return LifecycleConfiguration(rules)
def get_bucket_website_config(self, bucket):
"""
Get the website configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the bucket's website
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name='?website'),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_website_config)
return d
def _parse_website_config(self, response):
"""Parse a C{WebsiteConfiguration} XML document."""
status, xml_bytes = response
root = XML(xml_bytes)
index_suffix = root.findtext("IndexDocument/Suffix")
error_key = root.findtext("ErrorDocument/Key")
return WebsiteConfiguration(index_suffix, error_key)
def get_bucket_notification_config(self, bucket):
"""
Get the notification configuration of a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will request the bucket's notification
configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?notification"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_notification_config)
return d
def _parse_notification_config(self, response):
"""Parse a C{NotificationConfiguration} XML document."""
status, xml_bytes = response
root = XML(xml_bytes)
topic = root.findtext("TopicConfiguration/Topic")
event = root.findtext("TopicConfiguration/Event")
return NotificationConfiguration(topic, event)
def get_bucket_versioning_config(self, bucket):
"""
Get the versioning configuration of a bucket.
@param bucket: The name of the bucket. @return: A C{Deferred} that
will request the bucket's versioning configuration.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?versioning"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_versioning_config)
return d
def _parse_versioning_config(self, response):
"""Parse a C{VersioningConfiguration} XML document."""
status, xml_bytes = response
root = XML(xml_bytes)
mfa_delete = root.findtext("MfaDelete")
status = root.findtext("Status")
return VersioningConfiguration(mfa_delete=mfa_delete, status=status)
def get_bucket_acl(self, bucket):
"""
Get the access control policy for a bucket.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?acl"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_acl)
return d
def put_bucket_acl(self, bucket, access_control_policy):
"""
Set access control policy on a bucket.
"""
data = access_control_policy.to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=b"?acl"),
body=data,
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_acl)
return d
def _parse_acl(self, response):
"""
Parse an C{AccessControlPolicy} XML document and convert it into an
L{AccessControlPolicy} instance.
"""
status, xml_bytes = response
return AccessControlPolicy.from_xml(xml_bytes)
def put_object(self, bucket, object_name, data=None, content_type=None,
metadata={}, amz_headers={}, body_producer=None):
"""
Put an object in a bucket.
An existing object with the same name will be replaced.
@param bucket: The name of the bucket.
@param object_name: The name of the object.
@type object_name: L{str}
@param data: The data to write.
@param content_type: The type of data being written.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=_b(bucket), object_name=_b(object_name)),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
body=data,
body_producer=body_producer,
)
d = self._submit(self._query_factory(details))
d.addCallback(itemgetter(1))
return d
def copy_object(self, source_bucket, source_object_name, dest_bucket=None,
dest_object_name=None, metadata={}, amz_headers={}):
"""
Copy an object stored in S3 from a source bucket to a destination
bucket.
@param source_bucket: The S3 bucket to copy the object from.
@param source_object_name: The name of the object to copy.
@param dest_bucket: Optionally, the S3 bucket to copy the object to.
Defaults to C{source_bucket}.
@param dest_object_name: Optionally, the name of the new object.
Defaults to C{source_object_name}.
@param metadata: A C{dict} used to build C{x-amz-meta-*} headers.
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: A C{Deferred} that will fire with the result of request.
"""
dest_bucket = dest_bucket or source_bucket
dest_object_name = dest_object_name or source_object_name
amz_headers["copy-source"] = "/%s/%s" % (source_bucket,
source_object_name)
details = self._details(
method=b"PUT",
url_context=self._url_context(
bucket=dest_bucket, object_name=dest_object_name,
),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
return d
def get_object(self, bucket, object_name):
"""
Get an object from a bucket.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=_b(bucket), object_name=_b(object_name)),
)
d = self._submit(self._query_factory(details))
d.addCallback(itemgetter(1))
return d
def head_object(self, bucket, object_name):
"""
Retrieve object metadata only.
"""
details = self._details(
method=b"HEAD",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
d.addCallback(lambda response: _to_dict(response[0].responseHeaders))
return d
def delete_object(self, bucket, object_name):
"""
Delete an object from a bucket.
Once deleted, there is no method to restore or undelete an object.
"""
details = self._details(
method=b"DELETE",
url_context=self._url_context(bucket=bucket, object_name=object_name),
)
d = self._submit(self._query_factory(details))
return d
def put_object_acl(self, bucket, object_name, access_control_policy):
"""
Set access control policy on an object.
"""
data = access_control_policy.to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(
bucket=bucket, object_name='%s?acl' % (object_name,),
),
body=data,
)
query = self._query_factory(details)
d = self._submit(query)
d.addCallback(self._parse_acl)
return d
def get_object_acl(self, bucket, object_name):
"""
Get the access control policy for an object.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name='%s?acl' % (object_name,)),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_acl)
return d
def put_request_payment(self, bucket, payer):
"""
Set request payment configuration on bucket to payer.
@param bucket: The name of the bucket.
@param payer: The name of the payer.
@return: A C{Deferred} that will fire with the result of the request.
"""
data = RequestPayment(payer).to_xml()
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
body=data,
)
d = self._submit(self._query_factory(details))
return d
def get_request_payment(self, bucket):
"""
Get the request payment configuration on a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the name of the payer.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_request_payment)
return d
def _parse_get_request_payment(self, response):
"""
Parse a C{RequestPaymentConfiguration} XML document and extract the
payer.
"""
status, xml_bytes = response
return RequestPayment.from_xml(xml_bytes).payer
def init_multipart_upload(self, bucket, object_name, content_type=None,
amz_headers={}, metadata={}):
"""
Initiate a multipart upload to a bucket.
@param bucket: The name of the bucket
@param object_name: The object name
@param content_type: The Content-Type for the object
@param metadata: C{dict} containing additional metadata
@param amz_headers: A C{dict} used to build C{x-amz-*} headers.
@return: C{str} upload_id
"""
objectname_plus = '%s?uploads' % object_name
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
amz_headers=amz_headers,
)
d = self._submit(self._query_factory(details))
d.addCallback(
lambda response: MultipartInitiationResponse.from_xml(response[1])
)
return d
def upload_part(self, bucket, object_name, upload_id, part_number,
data=None, content_type=None, metadata={},
body_producer=None):
"""
Upload a part of data corresponding to a multipart upload.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param part_number: The part number
@param data: Data (optional, requires body_producer if not specified)
@param content_type: The Content-Type
@param metadata: Additional metadata
@param body_producer: an C{IBodyProducer} (optional, requires data if
not specified)
@return: the C{Deferred} from underlying query.submit() call
"""
parms = 'partNumber=%s&uploadId=%s' % (str(part_number), upload_id)
objectname_plus = '%s?%s' % (object_name, parms)
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
body=data,
)
d = self._submit(self._query_factory(details))
d.addCallback(lambda response: _to_dict(response[0].responseHeaders))
return d
def complete_multipart_upload(self, bucket, object_name, upload_id,
parts_list, content_type=None, metadata={}):
"""
Complete a multipart upload.
N.B. This can be possibly be a slow operation.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param parts_list: A List of all the parts
(2-tuples of part sequence number and etag)
@param content_type: The Content-Type of the object
@param metadata: C{dict} containing additional metadata
@return: a C{Deferred} that fires after request is complete
"""
data = self._build_complete_multipart_upload_xml(parts_list)
objectname_plus = '%s?uploadId=%s' % (object_name, upload_id)
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
body=data,
)
d = self._submit(self._query_factory(details))
# TODO - handle error responses
d.addCallback(
lambda response: MultipartCompletionResponse.from_xml(response[1])
)
return d
def _build_complete_multipart_upload_xml(self, parts_list):
xml = []
parts_list.sort(key=lambda p: int(p[0]))
xml.append('<CompleteMultipartUpload>')
for pt in parts_list:
xml.append('<Part>')
xml.append('<PartNumber>%s</PartNumber>' % pt[0])
xml.append('<ETag>%s</ETag>' % pt[1])
xml.append('</Part>')
xml.append('</CompleteMultipartUpload>')
return '\n'.join(xml)
class Query(BaseQuery):
"""A query for submission to the S3 service."""
def __init__(self, bucket=None, object_name=None, data="",
content_type=None, metadata={}, amz_headers={},
body_producer=None, *args, **kwargs):
super(Query, self).__init__(*args, **kwargs)
# data might be None or "", alas.
if data and body_producer is not None:
raise ValueError("data and body_producer are mutually exclusive.")
self.bucket = bucket
self.object_name = object_name
self.data = data
self.body_producer = body_producer
self.content_type = content_type
self.metadata = metadata
self.amz_headers = amz_headers
self._date = datetimeToString()
if not self.endpoint or not self.endpoint.host:
self.endpoint = AWSServiceEndpoint(S3_ENDPOINT)
self.endpoint.set_method(self.action)
@property
def date(self):
"""
Return the date and emit a deprecation warning.
"""
warnings.warn("txaws.s3.client.Query.date is a deprecated attribute",
DeprecationWarning,
stacklevel=2)
return self._date
@date.setter
def date(self, value):
"""
Set the date.
@param value: The new date for this L{Query}.
@type value: L{str}
"""
self._date = value
def set_content_type(self):
"""
Set the content type based on the file extension used in the object
name.
"""
if self.object_name and not self.content_type:
# XXX nothing is currently done with the encoding... we may
# need to in the future
self.content_type, encoding = mimetypes.guess_type(
self.object_name, strict=False)
def get_headers(self, instant):
"""
Build the list of headers needed in order to perform S3 operations.
"""
headers = {'x-amz-date': _auth_v4.makeAMZDate(instant)}
if self.body_producer is None:
data = self.data
if data is None:
data = b""
headers["x-amz-content-sha256"] = hashlib.sha256(data).hexdigest()
else:
data = None
headers["x-amz-content-sha256"] = b"UNSIGNED-PAYLOAD"
for key, value in self.metadata.items():
headers["x-amz-meta-" + key] = value
for key, value in self.amz_headers.items():
headers["x-amz-" + key] = value
# Before we check if the content type is set, let's see if we can set
# it by guessing the the mimetype.
self.set_content_type()
if self.content_type is not None:
headers["Content-Type"] = self.content_type
if self.creds is not None:
headers["Authorization"] = self.sign(
headers,
data,
s3_url_context(self.endpoint, self.bucket, self.object_name),
instant,
method=self.action)
return headers
def sign(self, headers, data, url_context, instant, method,
region=REGION_US_EAST_1):
"""Sign this query using its built in credentials."""
headers["host"] = url_context.get_encoded_host()
if data is None:
request = _auth_v4._CanonicalRequest.from_request_components(
method=method,
url=url_context.get_encoded_path(),
headers=headers,
headers_to_sign=('host', 'x-amz-date'),
payload_hash=None,
)
else:
request = _auth_v4._CanonicalRequest.from_request_components_and_payload(
method=method,
url=url_context.get_encoded_path(),
headers=headers,
headers_to_sign=('host', 'x-amz-date'),
payload=data,
)
return _auth_v4._make_authorization_header(
region=region,
service="s3",
canonical_request=request,
credentials=self.creds,
instant=instant)
def submit(self, url_context=None, utcnow=datetime.datetime.utcnow):
"""Submit this query.
@return: A deferred from get_page
"""
if not url_context:
url_context = s3_url_context(
self.endpoint, self.bucket, self.object_name)
d = self.get_page(
url_context.get_encoded_url(),
method=self.action,
postdata=self.data or b"",
headers=self.get_headers(utcnow()),
)
return d.addErrback(s3_error_wrapper)
def s3_url_context(service_endpoint, bucket=None, object_name=None):
"""
Create a URL based on the given service endpoint and suitable for
the given bucket or object.
@param service_endpoint: The service endpoint on which to base the
resulting URL.
@type service_endpoint: L{AWSServiceEndpoint}
@param bucket: If given, the name of a bucket to reference.
@type bucket: L{str}
@param object_name: If given, the name of an object or object
subresource to reference.
@type object_name: L{str}
"""
# Define our own query parser which can handle the consequences of
# `?acl` and such (subresources). At its best, parse_qsl doesn't
# let us differentiate between these and empty values (such as
# `?acl=`).
def p(s):
results = []
args = s.split("&")
for a in args:
pieces = a.split("=")
if len(pieces) == 1:
results.append((unquote(pieces[0]),))
elif len(pieces) == 2:
results.append(tuple(map(unquote, pieces)))
else:
raise Exception("oh no")
return results
query = []
path = []
if bucket is None:
path.append("")
else:
if isinstance(bucket, bytes):
bucket = bucket.decode("utf-8")
path.append(bucket)
if object_name is None:
path.append("")
else:
if isinstance(object_name, bytes):
object_name = object_name.decode("utf-8")
if "?" in object_name:
object_name, query = object_name.split("?", 1)
query = p(query)
object_name_components = object_name.split("/")
if object_name_components[0] == "":
object_name_components.pop(0)
if object_name_components:
path.extend(object_name_components)
else:
path.append("")
return _S3URLContext(
scheme=_t(service_endpoint.scheme),
host=_t(service_endpoint.get_host()),
port=service_endpoint.port,
path=path,
query=query,
)
class _S3URLContext(_URLContext):
# Backwards compatibility layer. For deprecation. s3_url_context
# should just return an _URLContext and application code should
# interact with that interface.
def get_host(self):
return self.get_encoded_host()
def get_path(self):
return self.get_encoded_path()
def get_url(self):
return self.get_encoded_url()
# Backwards compatibility layer. For deprecation.
def URLContext(service_endpoint, bucket=None, object_name=None):
args = (service_endpoint,)
for s in (bucket, object_name):
if s is not None:
args += (s.decode("utf-8"),)
return s3_url_context(*args)
deprecatedModuleAttribute(
Version("txAWS", 0, 3, 0),
"See txaws.s3.client.query",
__name__,
"Query",
)
deprecatedModuleAttribute(
Version("txAWS", 0, 3, 0),
"See txaws.s3.client.s3_url_context",
__name__,
"URLContext",
)
|
py | b40b8ddc9a06752d98caca627d8122b5f5a4ca15 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['Project']
class Project(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
gitrepo: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An object that represents a machine learning project.
API Version: 2017-05-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the machine learning team account.
:param pulumi.Input[str] description: The description of this project.
:param pulumi.Input[str] friendly_name: The friendly name for this project.
:param pulumi.Input[str] gitrepo: The reference to git repo for this project.
:param pulumi.Input[str] location: The location of the resource. This cannot be changed after the resource is created.
:param pulumi.Input[str] project_name: The name of the machine learning project under a team account workspace.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the machine learning team account belongs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] workspace_name: The name of the machine learning team account workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['description'] = description
if friendly_name is None and not opts.urn:
raise TypeError("Missing required property 'friendly_name'")
__props__['friendly_name'] = friendly_name
__props__['gitrepo'] = gitrepo
__props__['location'] = location
__props__['project_name'] = project_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['account_id'] = None
__props__['creation_date'] = None
__props__['name'] = None
__props__['project_id'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['workspace_id'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningexperimentation/v20170501preview:Project")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Project, __self__).__init__(
'azure-nextgen:machinelearningexperimentation:Project',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Project':
"""
Get an existing Project resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Project(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Output[str]:
"""
The immutable id of the team account which contains this project.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
The creation date of the project in ISO8601 format.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of this project.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[str]:
"""
The friendly name for this project.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def gitrepo(self) -> pulumi.Output[Optional[str]]:
"""
The reference to git repo for this project.
"""
return pulumi.get(self, "gitrepo")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The immutable id of this project.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment state of project resource. The provisioningState is to indicate states for resource provisioning.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> pulumi.Output[str]:
"""
The immutable id of the workspace which contains this project.
"""
return pulumi.get(self, "workspace_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b40b8e1063f78c6ce2f273230300c21c0f360e7b | from shortcodes import parser
from django import template
register = template.Library()
def shortcodes_replace(value, request=None):
"""
A filter for parsing a string on the format ``[shortcode keyword=value]``
using the shortcodes parser method.
"""
return parser.parse(value, request)
register.filter('shortcodes', shortcodes_replace)
def shortcodes_remove(value, request=None):
"""
A filter for removing shortcodes and the content inside them.
"""
return parser.remove(value, request)
register.filter('removeshortcodes', shortcodes_remove)
|
py | b40b90882b754b3675ab7f48a986a4fa55424899 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018, 2019, 2020, 2021, 2022 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for pytest-REANA.
This file is imported by ``pytest_reana.__init__``
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = "0.9.0a5"
|
py | b40b90e71cdec69e70d100eff00fc007b0d31c0f | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import pathlib
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
from torch import nn
from flash.core.data.data_source import DefaultDataKeys, DefaultDataSources
from flash.core.data.process import Preprocess
from flash.core.data.transforms import ApplyToKeys
from flash.core.utilities.imports import _TORCHVISION_AVAILABLE
from flash.image.classification import ImageClassificationData
from flash.image.data import ImageNumpyDataSource, ImagePathsDataSource, ImageTensorDataSource
from flash.image.style_transfer.utils import raise_not_supported
if _TORCHVISION_AVAILABLE:
from torchvision import transforms as T
__all__ = ["StyleTransferPreprocess", "StyleTransferData"]
def _apply_to_input(default_transforms_fn, keys: Union[Sequence[DefaultDataKeys],
DefaultDataKeys]) -> Callable[..., Dict[str, ApplyToKeys]]:
@functools.wraps(default_transforms_fn)
def wrapper(*args: Any, **kwargs: Any) -> Optional[Dict[str, ApplyToKeys]]:
default_transforms = default_transforms_fn(*args, **kwargs)
if not default_transforms:
return default_transforms
return {hook: ApplyToKeys(keys, transform) for hook, transform in default_transforms.items()}
return wrapper
class StyleTransferPreprocess(Preprocess):
def __init__(
self,
train_transform: Optional[Union[Dict[str, Callable]]] = None,
val_transform: Optional[Union[Dict[str, Callable]]] = None,
test_transform: Optional[Union[Dict[str, Callable]]] = None,
predict_transform: Optional[Union[Dict[str, Callable]]] = None,
image_size: int = 256,
):
if val_transform:
raise_not_supported("validation")
if test_transform:
raise_not_supported("test")
if isinstance(image_size, int):
image_size = (image_size, image_size)
self.image_size = image_size
super().__init__(
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_sources={
DefaultDataSources.FILES: ImagePathsDataSource(),
DefaultDataSources.FOLDERS: ImagePathsDataSource(),
DefaultDataSources.NUMPY: ImageNumpyDataSource(),
DefaultDataSources.TENSORS: ImageTensorDataSource(),
DefaultDataSources.TENSORS: ImageTensorDataSource(),
},
default_data_source=DefaultDataSources.FILES,
)
def get_state_dict(self) -> Dict[str, Any]:
return {**self.transforms, "image_size": self.image_size}
@classmethod
def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool = False):
return cls(**state_dict)
@functools.partial(_apply_to_input, keys=DefaultDataKeys.INPUT)
def default_transforms(self) -> Optional[Dict[str, Callable]]:
if self.training:
return dict(
to_tensor_transform=T.ToTensor(),
per_sample_transform_on_device=nn.Sequential(
T.Resize(self.image_size),
T.CenterCrop(self.image_size),
),
)
elif self.predicting:
return dict(
pre_tensor_transform=T.Resize(self.image_size),
to_tensor_transform=T.ToTensor(),
)
# Style transfer doesn't support a validation or test phase, so we return nothing here
return None
class StyleTransferData(ImageClassificationData):
preprocess_cls = StyleTransferPreprocess
@classmethod
def from_folders(
cls,
train_folder: Optional[Union[str, pathlib.Path]] = None,
predict_folder: Optional[Union[str, pathlib.Path]] = None,
train_transform: Optional[Union[str, Dict]] = None,
predict_transform: Optional[Union[str, Dict]] = None,
preprocess: Optional[Preprocess] = None,
**kwargs: Any,
) -> "StyleTransferData":
if any(param in kwargs for param in ("val_folder", "val_transform")):
raise_not_supported("validation")
if any(param in kwargs for param in ("test_folder", "test_transform")):
raise_not_supported("test")
preprocess = preprocess or cls.preprocess_cls(
train_transform=train_transform,
predict_transform=predict_transform,
)
return cls.from_data_source(
DefaultDataSources.FOLDERS,
train_data=train_folder,
predict_data=predict_folder,
preprocess=preprocess,
**kwargs,
)
|
py | b40b9101100bf81d86898b391891377b7e271ba9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Instantiate the postgres component
"""
import pyre.db
class Weather(pyre.db.table, id="weather"):
city = pyre.db.str()
city.doc = "the city name"
state = pyre.db.str(maxlen=2)
state.doc = "the state name"
date = pyre.db.date()
date.doc = "the date of the measurement"
low = pyre.db.decimal(precision=5, scale=2)
low.doc = "the temperature low"
high = pyre.db.decimal(precision=5, scale=2)
high.doc = "the temperature low"
def test():
import journal
# journal.debug("postgres.init").active = True
# journal.debug("postgres.execute").active = True
# journal.debug("postgres.connection").active = True
journal.debug("postgres.transactions").active = True
# this is the SQL statement that looks for a table by a given name
sql = "SELECT tablename FROM pg_tables WHERE tablename='{}'".format(Weather.pyre_name)
# build a database component and connect to the database specified in the local
# configuration file
db = pyre.db.postgres(name="test").attach()
# in a transaction block
with db:
# create the table
db.createTable(Weather)
# verify it is there
assert db.execute(sql) == (('tablename',), ('weather',))
# drop the table
db.dropTable(Weather)
# verify it is not there
assert db.execute(sql) == (('tablename',),)
# and return the connection and the table
return db, Weather
# main
if __name__ == "__main__":
test()
# end of file
|
py | b40b9223c976fc4c18cc4d8758fa1356b8198e05 | """
Unit tests for the frontend code.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import shutil
import unittest
import logging
import ga4gh.server.frontend as frontend
import ga4gh.server.exceptions as exceptions
import ga4gh.server.auth as auth
import ga4gh.schemas.protocol as protocol
class TestAuth0(unittest.TestCase):
"""
Tests the basic routing and HTTP handling for the Flask app.
"""
exampleUrl = 'www.example.com'
@classmethod
def setUpClass(cls):
config = {
"DATA_SOURCE": "simulated://",
"SIMULATED_BACKEND_RANDOM_SEED": 1111,
"SIMULATED_BACKEND_NUM_CALLS": 1,
"SIMULATED_BACKEND_VARIANT_DENSITY": 1.0,
"SIMULATED_BACKEND_NUM_VARIANT_SETS": 1,
"CACHE_DIRECTORY": "/tmp/ga4gh-test"
}
frontend.reset()
frontend.configure(
baseConfig="TestAuth0Config", extraConfig=config)
cls.app = frontend.app
# silence usually unhelpful CORS log
logging.getLogger('ga4gh.frontend.cors').setLevel(logging.CRITICAL)
cls.backend = frontend.app.backend
cls.client = frontend.app.test_client()
@classmethod
def tearDownClass(cls):
shutil.rmtree('/tmp/ga4gh-test', True)
frontend.reset()
def sendPostRequest(self, path, request, extraHeaders=None):
"""
Sends the specified GA request object and returns the response.
"""
headers = {
'Content-type': 'application/json',
'Origin': self.exampleUrl,
}
if extraHeaders:
headers.update(extraHeaders)
return self.client.post(
path, headers=headers, data=protocol.toJson(request))
def sendGetRequest(self, path):
"""
Sends a get request to the specified URL and returns the response.
"""
headers = {
'Origin': self.exampleUrl,
}
return self.client.get(path, headers=headers)
def testCallback(self):
response = self.sendGetRequest("callback")
self.assertEqual(
response.status_code,
401, "Ensure that when the callback is hit without a code"
"it will return 401 but got {}".format(response.status_code))
response = self.sendGetRequest("callback?code=abc")
self.assertEqual(
response.status_code,
401, "Ensure that when the callback is hit without a code"
"it will return 401 but got {}".format(response.status_code))
def testLogin(self):
response = self.sendGetRequest("login")
self.assertEqual(
response.status_code,
200, "Ensure that when Auth0 is turned on the login page"
"returns 200 {}".format(response.status_code))
def testBadBearer(self):
"""
Tests to see if a malformed bearer token fails in expected ways
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 401)
protectedPath = "datasets/search"
request = protocol.SearchDatasetsRequest()
headers = {"Authorization": ""}
response = self.sendPostRequest(protectedPath, request, headers)
self.assertEquals(response.status_code, 401, "No bearer should fail"
"with 401")
headers = {"Authorization": "Bearer"}
response = self.sendPostRequest(protectedPath, request, headers)
self.assertEquals(response.status_code, 401, "")
def testProtected(self):
protectedPath = "datasets/search"
request = protocol.SearchDatasetsRequest()
response = self.sendPostRequest(protectedPath, request)
self.assertEquals(
response.status_code,
401, "If Auth0 is enabled this endpoint "
"is not accessible without auth.")
def testDecodeExceptions(self):
"""
Unit tests the header parsing functions.
"""
with self.assertRaises(exceptions.NotAuthorizedException):
auth._has_header('')
with self.assertRaises(exceptions.NotAuthorizedException):
auth._has_bearer('empty')
with self.assertRaises(exceptions.NotAuthorizedException):
auth._has_token('Bearer')
with self.assertRaises(exceptions.NotAuthorizedException):
auth._has_token('Bearer123')
with self.assertRaises(exceptions.NotAuthorizedException):
auth._well_formed('Bearer 123 456')
with self.assertRaises(exceptions.NotAuthorizedException):
client_id = self.app.config.get("AUTH0_CLIENT_ID")
client_secret = self.app.config.get("AUTH0_CLIENT_SECRET")
token, payload = auth._decode_header(
'Bearer 123', client_id, client_secret)
def testRenderLogin(self):
"""
Tests that the login template renders without failing
"""
return auth.render_login(
app=self.app,
scopes=self.app.config.get('AUTH0_SCOPES'),
redirect_uri=self.app.config.get('AUTH0_CALLBACK_URL'),
domain=self.app.config.get('AUTH0_HOST'),
client_id=self.app.config.get('AUTH0_CLIENT_ID'))
def testAuthorizeEmail(self):
"""
Tests that the email is set to authorized in the cache.
"""
email = "[email protected]"
auth.authorize_email(email, self.app.cache)
entry = self.app.cache.get(email)
self.assertTrue(entry['authorized'])
self.assertTrue(auth.is_authorized(self.app.cache, email))
def testIsActive(self):
"""
Tests that is active throws an exception when the token is not found.
"""
token = "123"
email = "[email protected]"
with self.assertRaises(exceptions.NotAuthenticatedException):
auth.is_active(self.app.cache, token)
self.app.cache.set("123", {"email": email})
# Even though the token is set, in the cache, it
self.assertEquals(
email, auth.is_active(self.app.cache, token)['email'])
def testCallbackMaker(self):
"""
Tests that the callback maker returns a function and runs without
failure.
"""
callback = auth.callback_maker(
cache=self.app.cache,
domain=self.app.config.get('AUTH0_HOST'),
client_id=self.app.config.get('AUTH0_CLIENT_ID'),
client_secret=self.app.config.get('AUTH0_CLIENT_SECRET'),
redirect_uri=self.app.config.get('AUTH0_CALLBACK_URL'))
self.assertTrue(callable(callback))
|
py | b40b9303160748decaf866d7fe52c0a101a57667 |
import codecs
import json
import select
import socket
import sys
import time
import urllib.parse
# Slim HTTP client written directly on top of the UNIX socket API.
# Therefore it can be used with both UNIX and TCP sockets.
#
# Its intended use is limited to rocker (the restclient API should not be
# considered stable).
#
# Right now the idea is to create a new Request instance for each request.
#
# The best way to instantiate the client is using a with statement. That way
# all resources will be released properly. E.g.:
#
# with Request("unix:///var/run/docker.sock") as req:
# response = client.doGet('/version').send()
# # do something with the response
#
# send() will return a Response object which can then be used to act accordingly
class Request:
# Request constructor
#
# You'll have to provide either a UNIX socket path or a HTTP/HTTPS server
# URL. For example:
#
# unix:///var/run/docker.sock
# http://dockerHost:1234/
# https://dockerHost:1234/
#
# Note that HTTP and HTTPS aren't implemented yet (feel free to provide a
# patch/merge request).
def __init__(self, url):
url = urllib.parse.urlsplit(url)
self._headers = {}
self._headerKeys = {}
self._chunked = False
self._headersSent = False
self._method = None
self._url = None
self._reqBodyPos = 0
self.setHeader("User-agent", "rocker v0.1") # TODO use the real rocker version
try:
if url.scheme == 'unix':
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(url.path)
elif url.scheme in ['http', 'https']:
raise Exception("Not yet implemented: {0}".format(url))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.create_connection()
else:
raise Exception("Unsupported schema: {0}".format(url.schema))
except PermissionError as e:
raise SocketError("Can't access '{0}'".format(url), e)
except FileNotFoundError as e:
raise SocketError("Socket not found: '{0}'".format(url), e)
# sock.setblocking(0)
self._sock = ChunkReader(BufferedReader(sock))
# 'with' statement implementation
# simply returns self
def __enter__(self):
return self
# 'with' statement implementation
# calls close() when the calling code exits the 'with' block
def __exit__(self, type, value, traceback):
self.close()
# Sends the HTTP request headers
#
# This method makes sure the headers will be sent only once
def _sendHeaders(self):
if self._headersSent:
return
self._sock.send("{0} {1} HTTP/1.1\r\n".format(self._method, self._url).encode('ascii'))
for key, value in self._headers.items():
# for now I'll only allow ASCII headers (file a bug if that's not enough)
self._sock.send("{0}: {1}\r\n".format(key, value).encode('ascii'))
self._sock.send(b'\r\n')
self._headersSent = True
# Closes the underlying socket
def close(self):
self._sock.close()
def doDelete(self, url):
self._method = "DELETE"
self._url = url
return self
# Specifies the url for this GET request
def doGet(self, url):
self._method = "GET"
self._url = url
return self
# Specifies the url for this POST request
def doPost(self, url):
self._method = "POST"
self._url = url
return self
# Tells Request to use chunked mode
#
# You need to call this method before using write().
# But in chunked mode send() won't accept any request body data.
#
# Will fail if the headers have already been sent.
def enableChunkedMode(self):
self.setHeader("Transfer-encoding", "chunked")
self._chunked = True
# Set a request header
#
# Header names are case insensitive (so 'Content-type' will overwrite 'Content-Type', etc.)
#
# This method will fail if the headers have been sent already
def setHeader(self, key, value):
if self._headersSent:
raise Exception("Headers already sent!")
if key.lower() in self._headerKeys:
# overwrite header
del self._headers[self._headerKeys[key]]
self._headers[key] = value
self._headerKeys[key.lower()] = key
# Finalizes the request and returns a Response object
#
# This method will send the headers if that hasn't happened yet,
# send data if not in chunked mode and then return a Response
# object using the underlying socket
def send(self, data=None):
if data != None:
if self._chunked:
raise Exception("data can't be set when in chunked mode")
if type(data) == dict:
data = bytes(json.dumps(data), 'utf8')
self.setHeader("Content-type", "application/json")
self.setHeader("Content-length", str(len(data)))
elif self._chunked:
# send final chunk
self._sock.send(b'0\r\n\r\n')
self._sendHeaders()
if data != None:
self._sock.send(data)
return Response(self._sock)
# Returns the number of bytes already written in the request body
#
# With this method you can use Request as `fileobj` parameter for `tarfile`
def tell(self):
return self._reqBodyPos
# Write request body data in chunked mode
def write(self, data):
if not self._chunked:
raise Exception("Request.write() only works in chunked mode!")
# make sure we can actually write data
select.select([], [self._sock], [])
self._sendHeaders()
self._sock.send("{0:x}\r\n".format(len(data)).encode('ascii'))
self._sock.send(data)
self._sock.send(b"\r\n")
self._reqBodyPos += len(data)
# Represents a HTTP response
#
# Response objects are created by Request.send().
#
# They will parse the response headers, try to figure out content type and charset
# and give you access to the response body in various forms
class Response:
# Response constructor (should only be called by Request.send()
def __init__(self, sock):
self._sock = ChunkReader(BufferedReader(sock))
self._headers = None
self._headerKeys = {}
self._status = None
self._statusMsg = None
self._parseHeaders()
self.__parseContentType()
if self.isChunked():
self._sock.enableChunkedMode()
# 'in' operator.
# This method will return true if a response header with the given name exists
# (case insensitive).
# Use it like follows:
#
# if 'Content-Type' in restClient:
# contentType = restClient.getHeader('Content-type')
def __contains__(self, key):
if self._headerKeys == None:
raise Exception("Headers haven't been read yet!")
else:
return key.lower() in self._headerKeys
# Internal method to figure out the response data type and character set
def __parseContentType(self):
# will be something like:
# - text/plain; charset=utf-8
# - application/json
# if no charset is specified, this method will assume ascii data
# (it's better to raise an exception and be able to fix bugs as they come
# than to decode the data in a wrong way)
#
# JSON however uses a default charset of utf8
if 'Content-Type' not in self:
if self._status == 204: # no content
self._contentType = None
self._charset = None
return
else:
raise Exception("Missing Content-Type header in Docker response!")
header = self.getHeader('Content-Type')
cTypeParts = header.split(';')
cType = cTypeParts[0].strip().lower()
charset = 'ascii'
if len(cTypeParts) > 2:
raise ValueError("Malformed content-type header: {0}".format(header))
if len(cTypeParts) == 2:
charsetParts = cTypeParts[1].split('=')
if len(charsetParts) != 2 or charsetParts[0].lower().strip() != 'charset':
raise ValueError("Malformed charset declaration: {0}".format(cTypeParts[1]))
charset = charsetParts[1].strip().lower()
elif cType == 'application/json': # implicitly: and len(cTypeParts) < 2
charset = 'utf-8'
self._contentType = cType
self._charset = charset
# Parses the response headers (and returns them)
#
# The header data will be stored in self._headers, so subsequent calls
# to __readHeaders() will simply return the cached data.
def _parseHeaders(self):
rc = {}
if self._headers != None:
return self._headers
while True:
line = self._sock.readLine().strip()
if len(line) == 0:
break
else:
if self._status == None:
# first line contains the HTTP status (sth like: 'HTTP/1.1 200 Ok')
firstSpace = line.find(b' ')
secondSpace = line.find(b' ', firstSpace+1)
if firstSpace < 0 or secondSpace < 0:
raise Exception("Malformed response status: {0}".format(line))
self._status = int(line[firstSpace+1:secondSpace])
self._statusMsg = line[secondSpace+1:]
else:
colonPos = line.find(b':')
if colonPos < 0:
raise Exception("Malformed response header line: {0}".format(line))
key = str(line[:colonPos].strip(), 'ascii')
value = str(line[colonPos+1:].strip(), 'utf-8')
rc[key] = value
self._headers = rc
# fill _headerKeys (which allows case-insensitive header lookup)
self._headerKeys = {}
for key in rc.keys():
self._headerKeys[key.lower()] = key
if self._status not in [200, 201, 204]:
# read data
data = None
if 'content-length' in self:
dataLen = int(self.getHeader('content-length'))
data = self._sock.recv(dataLen)
else:
data = self._headers
raise HttpResponseError(self._statusMsg, self._status, data)
return rc
# Get a response header (key is case insensitive)
#
# Raises a KeyError if the header wasn't found, so use the `in` operator before calling
# this method.
def getHeader(self, key):
key = key.lower()
if self._headers == None:
raise Exception("Headers haven't been read yet!")
elif key not in self._headerKeys:
raise KeyError("Header not found: {0}".format(key))
return self._headers[self._headerKeys[key]]
# Returns a json decoded response object.
#
# if the response was chunked, this method only reads the first chunk (might change if it turns out to be necessary)
# If it wasn't, readAll() will be used.
def getObject(self):
rc = None
if self.isChunked():
rc = self.readChunk()
else:
rc = self.readAll()
return json.loads(rc)
# Returns True if the server indicated the use of chunked transfer encoding
# (by setting the respective header)
#
# If this method returns True, you need to use readChunk(); read() and readLine() will raise
# an exception. If it's false, readChunk() throws an exception while the other two will work.
def isChunked(self):
if 'Transfer-Encoding' in self:
if self.getHeader('Transfer-Encoding').lower().strip() == 'chunked':
return True
return False
# Read data from the underlying socket
#
# If blocking is set to False (default) count will be the maximum number of bytes to read.
# If it's true, read() will read exactly count bytes (which means that it might block indefinitely
# if you expect more data than you'll get).
#
# Note: count is in bytes, not characters.
def read(self, count, blocking=False):
if not blocking:
return str(self._sock.recv(count), self._charset)
else:
return str(self._sock.readExactly(count), self._charset)
# Reads exactly `content-length` response bytes and decodes them using the detected encoding.
#
# This method will only work if the content-length header was specified by the remote server
# (which won't be the case for chunked responses)
def readAll(self):
if self.isChunked():
raise Exception("readAll() can't be used in chunked mode!")
count = int(self.getHeader('Content-length'))
rc = self._sock.readExactly(count)
return str(rc, self._charset)
# Reads the next response chunk from the underlying socket.
#
# This method will only return full chunks and might block to wait for
# all data to be received.
#
# However, if there's no data available at all, it will return an empty
# result immediately.
def readChunk(self):
rc = self._sock.readChunk()
if rc != None:
rc = str(rc, self._charset)
return rc
# Reads the next line from the underlying socket
def readLine(self):
return str(self._sock.readLine(), self._charset)
# Wraps around the socket to provide readline() and unrecv()
class BufferedReader:
# source is a file-like object
def __init__(self, source):
self._source = source
self._buffer = None
def close(self):
self._source.close()
def enableChunkedMode(self):
self._source.enableChunkedMode()
def fileno(self):
return self._source.fileno()
# Buffered read command. Reads at most <length> bytes from the socket.
# If no bytes are currently available, an empty result will be returned.
# This method won't block.
#
# This method maintains a readahead buffer (you can 'undo' reads calling
# the unrecv() method)
def recv(self, length):
rc = bytes()
if self._buffer != None:
rc = self._buffer
if len(rc) > length:
self._buffer = rc[length:]
rc = rc[:length]
length = 0
else:
length -= len(rc)
self._buffer = None
else:
if self.wait(2):
rc += self._source.recv(length)
return rc
# Reads exactly length bytes from the socket
#
# May block indefinitely
def readExactly(self, length):
rc = []
while length > 0:
self.wait()
data = self.recv(length)
length -= len(data)
rc.append(data)
return b''.join(rc)
# Reads and returns one line of data from the socket.
#
# This method invokes recv() until a newline is found and then calls unrecv()
# to push the extra data onto the readahead buffer.
#
# It will block until a whole method was read
def readLine(self):
buff = []
while True:
data = self.recv(128)
nlPos = data.find(b'\n')
if nlPos >= 0:
# we've found a newline, unrecv everything after it
self.unrecv(data[nlPos+1:])
buff.append(data[:nlPos])
break
else:
buff.append(data)
buff = b''.join(buff)
# strip \r (windows newline)
if buff.endswith(b'\r'):
buff = buff[:-1]
return buff
def send(self, data):
self._source.send(data)
# Push data onto the readahead buffer (which is checked by read())
def unrecv(self, data):
if self._buffer != None:
# append it to the buffer
self._buffer += data
else:
self._buffer = data
# Wait for data to be available on the socket (or the timeout to elapse)
#
# timeout is a float value representing the maximum time to wait in seconds
#
# Returns True if there's data to be read, False on timeout
#
# This method uses select.select() internally but uses its own timing code.
# For a timeout of 0 wait() will return immediately.
def wait(self, timeout=2):
if self._buffer != None and len(self._buffer) > 0:
return True
inputs,_,_ = select.select([self._source.fileno()], [], [], timeout)
return len(inputs) > 0
# HTTP chunked response implementation
class ChunkReader:
def __init__(self, source):
self._source = source
self._chunked = False
def close(self):
self._source.close
def enableChunkedMode(self):
self._chunked = True
def fileno(self):
return self._source.fileno()
def recv(self, maxLen):
if not self._chunked:
# normal un-chunked mode
return self._source.recv(maxLen)
else:
raise IOError("recv() not allowed in chunked mode!")
# reads a whole chunk of data from the server.
# If an empty chunk is returned (EOT), this method returns None
def readChunk(self):
if not self._chunked:
raise IOError("readChunk() can only be used in chunked mode!")
# read chunk len (format: '0123abc\r\n' - 0123abc being the hexadecimal length of the next chunk)
length = self._source.readLine()
length = int(length, 16)
# read the actual data
rc = self._source.readExactly(length)
# hit the end of a chunk. read \r\n
chunkEnd = self._source.readExactly(2)
if chunkEnd != b'\r\n':
raise Exception("Got invalid chunk end mark: {0} (expected {1})".format(codecs.encode(chunkEnd, 'hex'), codecs.encode(b'\r\n', 'hex')))
# We'll return None instead of an empty string
if rc == b'':
rc = None # indicates EOT
return rc
def readExactly(self, length):
if not self._chunked:
# normal mode => simply pass call to BufferedReader
return self._source.readExactly(length)
else:
raise IOError("readExactly() not allowed in chunked mode!")
def readLine(self):
if not self._chunked:
# normal mode => simply pass call to BufferedReader
return self._source.readLine()
else:
raise IOError("readLine() not allowed in chunked mode!")
def send(self, data):
self._source.send(data)
def wait(self, timeout=2):
return self._source.wait(timeout)
# Will be raised if the REST server responds with a code other than 200 (Ok)
class HttpResponseError(Exception):
def __init__(self, message, code, data):
super(Exception, self).__init__(message)
self._code = code
self._data = data
# Get the HTTP response code
def getCode(self):
return self._code
def getData(self):
return self._data
# Indicates some sort of error while connecting to the remote service
class SocketError(Exception):
def __init__(self, message, cause=None):
self.message = message
self.cause = cause
|
py | b40b932f6572013d89a2eae2730aae5018431586 | """
djnydus.shards.models
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.db.models.base import ModelBase, Model
from django.db.models.options import Options
from .manager import PartitionManager
from .options import ShardInfo, ShardOptions, DEFAULT_NAMES
class PartitionBase(ModelBase):
def __new__(cls, name, bases, attrs):
if 'Meta' not in attrs:
attrs['Meta'] = type('Meta', (object,), {
'abstract': True,
})
else:
attrs['Meta'].abstract = True
if 'objects' not in attrs:
attrs['objects'] = PartitionManager()
attrs['Meta'].managed = True
new_cls = super(PartitionBase, cls).__new__(cls, name, bases, attrs)
attr_shardopts = attrs.pop('Shards', None)
if not attr_shardopts:
shardopts = getattr(new_cls, 'Shards', None)
else:
shardopts = attr_shardopts
base_shardopts = getattr(new_cls, '_shards', None)
shards = []
new_cls.add_to_class('_shards', ShardInfo(shardopts, nodes=shards))
if base_shardopts:
for k in DEFAULT_NAMES:
if not hasattr(new_cls._shards, k):
setattr(new_cls._shards, k, getattr(base_shardopts, k, None))
return new_cls
def add_to_class(cls, name, value):
if isinstance(value, Options):
value = ShardOptions(value)
return super(PartitionBase, cls).add_to_class(name, value)
class PartitionModel(Model):
__metaclass__ = PartitionBase
class Meta:
abstract = True
|
py | b40b9347ab3e680cfbaad92b544099a9942d9850 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import re
class AtWikiStripper(object):
# Comment: `// comment`
COMMENT = re.compile(r'^//')
# Inline annotation: `&color(#999999){text}`, `&nicovideo(url)`
INLINE_ANN = re.compile(r'&[a-z_]+\(([^()]*?)\)({([^{}]+?)})?'), 3
# Inline links: `[[page]]`, `[[alias>URL]]`
INLINE_LINK = re.compile(r'\[\[(.+?)((>|>>)(.+?))?\]\]'), 1
# Inline italic: `'''text'''`
INLINE_ITALIC = re.compile(r'\'\'\'(.+?)\'\'\''), 1
# Inline bold: `''text''`
INLINE_BOLD = re.compile(r'\'\'(.+?)\'\''), 1
# Inline del: `%%text%%`
INLINE_DEL = re.compile(r'%%(.+?)%%'), 1
# Line annotation: `#right(){text}`, `#comment()`, `#region`
LINE_ANN = re.compile(r'^#[a-z_]+(\(([^()]*?)\)({([^{}]+?)})?)?\s*$'), 4
# Line horizontal line: `----`
LINE_HR = re.compile(r'^----\s*()$'), 1
# Line item list and heading: `+foo`, `-foo`, `*foo`
LINE_ITEMLIST = re.compile(r'^(\*+|\++|-+)(.+)$'), 2
# Line quote: `>text`
LINE_QUOTE = re.compile(r'^>+(.+)$'), 1
# Line formatted: ` text`
LINE_PRE = re.compile(r'^ (.+)$'), 1
# Block annotation: `#exk(){{{` ... `}}}`
BLOCK_BEGIN_ANN = re.compile(r'^#[a-z_]+\(([^{}()]*?)\)({+)\s*$')
BLOCK_END_ANN = re.compile(r'^(}+)\s*$')
def __init__(self, source):
self._source = source
def _inline_strip(self, line, pattern, group):
while True:
prev = line
# Note: prior to Python 3.5, use of backreference of nonmatching group
# in replacement string raises exception.
line = pattern.sub(lambda m: m.group(group), line)
if prev == line: return line
def _line_process(self, buf, line, pattern, group):
prev = line
line = pattern.sub(lambda m: m.group(group), line)
if prev == line: return False
buf.append(line)
return True
def text(self):
ret = []
lines = self._source.splitlines()
block_level = 0
for line in lines:
if self.COMMENT.match(line): continue
line = self._inline_strip(line, *self.INLINE_ANN)
line = self._inline_strip(line, *self.INLINE_LINK)
line = self._inline_strip(line, *self.INLINE_ITALIC)
line = self._inline_strip(line, *self.INLINE_BOLD)
line = self._inline_strip(line, *self.INLINE_DEL)
if self._line_process(ret, line, *self.LINE_ANN): continue
if self._line_process(ret, line, *self.LINE_HR): continue
if self._line_process(ret, line, *self.LINE_ITEMLIST): continue
if self._line_process(ret, line, *self.LINE_QUOTE): continue
if self._line_process(ret, line, *self.LINE_PRE): continue
if block_level == 0:
m = self.BLOCK_BEGIN_ANN.match(line)
if m:
block_level = len(m.group(2))
continue
else:
m = self.BLOCK_END_ANN.match(line)
if m and len(m.group(1)) == block_level:
block_level = 0
continue
ret.append(line)
return '\n'.join(ret)
|
py | b40b9361a6f0b7428bbcff9fc075d7db1e54c1c3 | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.11.1-SNAPSHOT
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VersionControlInformationEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'version_control_information': 'VersionControlInformationDTO',
'process_group_revision': 'RevisionDTO',
'disconnected_node_acknowledged': 'bool'
}
attribute_map = {
'version_control_information': 'versionControlInformation',
'process_group_revision': 'processGroupRevision',
'disconnected_node_acknowledged': 'disconnectedNodeAcknowledged'
}
def __init__(self, version_control_information=None, process_group_revision=None, disconnected_node_acknowledged=None):
"""
VersionControlInformationEntity - a model defined in Swagger
"""
self._version_control_information = None
self._process_group_revision = None
self._disconnected_node_acknowledged = None
if version_control_information is not None:
self.version_control_information = version_control_information
if process_group_revision is not None:
self.process_group_revision = process_group_revision
if disconnected_node_acknowledged is not None:
self.disconnected_node_acknowledged = disconnected_node_acknowledged
@property
def version_control_information(self):
"""
Gets the version_control_information of this VersionControlInformationEntity.
The Version Control information
:return: The version_control_information of this VersionControlInformationEntity.
:rtype: VersionControlInformationDTO
"""
return self._version_control_information
@version_control_information.setter
def version_control_information(self, version_control_information):
"""
Sets the version_control_information of this VersionControlInformationEntity.
The Version Control information
:param version_control_information: The version_control_information of this VersionControlInformationEntity.
:type: VersionControlInformationDTO
"""
self._version_control_information = version_control_information
@property
def process_group_revision(self):
"""
Gets the process_group_revision of this VersionControlInformationEntity.
The Revision for the Process Group
:return: The process_group_revision of this VersionControlInformationEntity.
:rtype: RevisionDTO
"""
return self._process_group_revision
@process_group_revision.setter
def process_group_revision(self, process_group_revision):
"""
Sets the process_group_revision of this VersionControlInformationEntity.
The Revision for the Process Group
:param process_group_revision: The process_group_revision of this VersionControlInformationEntity.
:type: RevisionDTO
"""
self._process_group_revision = process_group_revision
@property
def disconnected_node_acknowledged(self):
"""
Gets the disconnected_node_acknowledged of this VersionControlInformationEntity.
Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: The disconnected_node_acknowledged of this VersionControlInformationEntity.
:rtype: bool
"""
return self._disconnected_node_acknowledged
@disconnected_node_acknowledged.setter
def disconnected_node_acknowledged(self, disconnected_node_acknowledged):
"""
Sets the disconnected_node_acknowledged of this VersionControlInformationEntity.
Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:param disconnected_node_acknowledged: The disconnected_node_acknowledged of this VersionControlInformationEntity.
:type: bool
"""
self._disconnected_node_acknowledged = disconnected_node_acknowledged
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VersionControlInformationEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b40b93987f1303c5df032df9b162e60319a76c71 | """
Django settings for pythonzoo project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_eu9#$b&hmycq$wqmse-+1k4wf0m7r7x&)2zc4j&9)$mg3s58f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'zoo.apps.ZooConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pythonzoo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pythonzoo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
py | b40b94f62e3be79763e3a732eb99d576d10c5484 | # Generated by Django 3.2.4 on 2021-06-07 17:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('perfil', '0002_perfil_acessos'),
]
operations = [
migrations.AlterField(
model_name='perfil',
name='acessos',
field=models.PositiveIntegerField(blank=True, default=1, null=True),
),
]
|
py | b40b95f6956bd40739833c622ee154192320a936 | from django.db import models
from django.contrib.auth.models import User
class Tag(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Instrument(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE
)
name = models.CharField(max_length=255)
tags = models.ManyToManyField(to="Tag")
def __str__(self):
return self.name + " " + str(self.user)
class TimeSeriesDatum(models.Model):
instrument = models.ForeignKey( # one-to-many
to='Instrument',
on_delete=models.CASCADE
)
value = models.FloatField()
time = models.DateTimeField()
def __str__(self):
return str(self.instrument) + " is " + str(self.value) + " at " + str(self.time)
|
py | b40b961489776e4bbfbedb8276981bc18dd65c68 | #!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureSignatureResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'signature': 'SignatureSignatureInfo'
}
self.signature = None # SignatureSignatureInfo
|
py | b40b965fa1ec91bd8e0b40ae84ad9dad6253741b | # -*- coding: utf-8 -*-
import unittest
from ctfbox.thirdparty import phpserialize
class PhpSerializeTestCase(unittest.TestCase):
def test_dumps_int(self):
self.assertEqual(phpserialize.dumps(5), b'i:5;')
def test_dumps_float(self):
self.assertEqual(phpserialize.dumps(5.6), b'd:5.6;')
def test_dumps_str(self):
self.assertEqual(phpserialize.dumps('Hello world'),
b's:11:"Hello world";')
def test_dumps_unicode(self):
self.assertEqual(phpserialize.dumps('Björk Guðmundsdóttir'),
b's:23:"Bj\xc3\xb6rk Gu\xc3\xb0mundsd\xc3\xb3ttir";')
def test_dumps_binary(self):
self.assertEqual(phpserialize.dumps(b'\001\002\003'),
b's:3:"\x01\x02\x03";')
def test_dumps_list(self):
self.assertEqual(phpserialize.dumps([7, 8, 9]),
b'a:3:{i:0;i:7;i:1;i:8;i:2;i:9;}')
def test_dumps_tuple(self):
self.assertEqual(phpserialize.dumps((7, 8, 9)),
b'a:3:{i:0;i:7;i:1;i:8;i:2;i:9;}')
def test_dumps_dict(self):
self.assertEqual(phpserialize.dumps({'a': 1, 'b': 2, 'c': 3}),
b'a:3:{s:1:"a";i:1;s:1:"b";i:2;s:1:"c";i:3;}')
def test_loads_dict(self):
self.assertEqual(phpserialize.loads(b'a:3:{s:1:"a";i:1;s:1:"c";i:3;s:1:"b";i:2;}',
decode_strings=True), {'a': 1, 'b': 2, 'c': 3})
def test_loads_unicode(self):
self.assertEqual(phpserialize.loads(b's:23:"Bj\xc3\xb6rk Gu\xc3\xb0mundsd\xc3\xb3ttir";',
decode_strings=True), b'Bj\xc3\xb6rk Gu\xc3\xb0mundsd\xc3\xb3ttir'.decode('utf-8'))
def test_loads_binary(self):
self.assertEqual(phpserialize.loads(b's:3:"\001\002\003";', decode_strings=False),
b'\001\002\003')
def test_dumps_and_loads_dict(self):
self.assertEqual(phpserialize.loads(phpserialize.dumps({'a': 1, 'b': 2, 'c': 3}),
decode_strings=True), {'a': 1, 'b': 2, 'c': 3})
def test_list_roundtrips(self):
x = phpserialize.loads(phpserialize.dumps(list(range(2))))
self.assertEqual(x, {0: 0, 1: 1})
y = phpserialize.dict_to_list(x)
self.assertEqual(y, [0, 1])
def test_tuple_roundtrips(self):
x = phpserialize.loads(phpserialize.dumps(list(range(2))))
self.assertEqual(x, {0: 0, 1: 1})
y = phpserialize.dict_to_tuple(x)
self.assertEqual(y, (0, 1))
def test_fileio_support_with_chaining_and_all(self):
f = phpserialize.BytesIO()
phpserialize.dump([1, 2], f)
phpserialize.dump(42, f)
f = phpserialize.BytesIO(f.getvalue())
self.assertEqual(phpserialize.load(f), {0: 1, 1: 2})
self.assertEqual(phpserialize.load(f), 42)
def test_object_hook(self):
class User(object):
def __init__(self, username):
self.username = username
def load_object_hook(name, d):
return {'WP_User': User}[name](**d)
def dump_object_hook(obj):
if isinstance(obj, User):
return phpserialize.phpobject('WP_User', {'username': obj.username})
raise LookupError('unknown object')
user = User('test')
x = phpserialize.dumps(user, object_hook=dump_object_hook)
y = phpserialize.loads(x, object_hook=load_object_hook,
decode_strings=True)
self.assert_(b'WP_User' in x)
self.assertEqual(type(y), type(user))
self.assertEqual(y.username, user.username)
def test_basic_object_hook(self):
data = b'O:7:"WP_User":1:{s:8:"username";s:5:"admin";}'
user = phpserialize.loads(data, object_hook=phpserialize.phpobject,
decode_strings=True)
self.assertEqual(user.username, 'admin')
self.assertEqual(user.__name__, 'WP_User')
if __name__ == '__main__':
unittest.main()
|
py | b40b9668a5f1dc437a442c7e09b69b9ac439579c | from __future__ import division, print_function, absolute_import
from nnmnkwii.datasets import FileSourceDataset, PaddedFileSourceDataset
from nnmnkwii.datasets import MemoryCacheFramewiseDataset
from nnmnkwii.datasets import FileDataSource
from nnmnkwii.util import example_file_data_sources_for_acoustic_model
from nnmnkwii.util import example_file_data_sources_for_duration_model
import numpy as np
from nose.tools import raises
from nose.plugins.attrib import attr
from os.path import join, dirname
DATA_DIR = join(dirname(__file__), "data")
def _get_small_datasets(padded=False, duration=False, padded_length=1000):
if duration:
X, Y = example_file_data_sources_for_duration_model()
else:
X, Y = example_file_data_sources_for_acoustic_model()
if padded:
X = PaddedFileSourceDataset(X, padded_length=padded_length)
Y = PaddedFileSourceDataset(Y, padded_length=padded_length)
else:
X = FileSourceDataset(X)
Y = FileSourceDataset(Y)
return X, Y
def test_empty_dataset():
class EmptyDataSource(FileDataSource):
def collect_files(self):
return []
def collect_features(self, path):
pass
X = FileSourceDataset(EmptyDataSource())
def __test_outof_range(X):
print(X[0])
# Should raise IndexError
yield raises(IndexError)(__test_outof_range), X
def test_invalid_dataset():
class WrongNumberOfArgsDataSource(FileDataSource):
def collect_files(self):
return ["dummy.txt"]
def collect_features(self, path, this_is_not_needed):
pass
class WrongNumberOfCollectedFilesDataSource(FileDataSource):
def collect_files(self):
return ["dummy.txt"] * 1, ["dummy.txt"] * 2
def collect_features(self, path):
pass
def __test_wrong_num_args():
X = FileSourceDataset(WrongNumberOfArgsDataSource())
X[0]
def __test_wrong_num_collected_files():
X = FileSourceDataset(WrongNumberOfCollectedFilesDataSource())
X[0]
yield raises(TypeError)(__test_wrong_num_args)
yield raises(RuntimeError)(__test_wrong_num_collected_files)
@attr("pickle")
def test_asarray_tqdm():
# verbose=1 triggers tqdm progress report
for padded in [True, False]:
X, _ = _get_small_datasets(padded=False, duration=True)
X.asarray(verbose=1)
@attr("pickle")
def test_asarray():
X, Y = _get_small_datasets(padded=False, duration=True)
lengths = [len(x) for x in X]
X, Y = _get_small_datasets(
padded=True, duration=True, padded_length=np.max(lengths))
X_array = np.asarray(X)
assert X_array.ndim == 3
assert np.allclose(X_array, X.asarray())
# Explicitly give padded length to actual max time length
X, Y = _get_small_datasets(padded=False, duration=True)
assert np.allclose(X_array, X.asarray(padded_length=np.max(lengths)))
# Make sure that auto-guessing padded_length should get same result as
# explicitly given max time length
assert np.allclose(X_array, X.asarray(padded_length=None))
# Force triggering re-allocations
assert np.allclose(X_array, X.asarray(
padded_length=None, padded_length_guess=1))
def __test_very_small_padded_length():
X, Y = _get_small_datasets(padded=False, duration=True)
X.asarray(padded_length=1)
# Should raise `num frames exceeded`
yield raises(RuntimeError)(__test_very_small_padded_length)
@attr("pickle")
def test_duration_sources():
X, Y = _get_small_datasets(padded=False, duration=True)
for idx, (x, y) in enumerate(zip(X, Y)):
print(idx, x.shape, y.shape)
@attr("pickle")
def test_slice():
X, _ = _get_small_datasets(padded=False)
x = X[:2]
assert isinstance(x, list)
assert len(x) == 2
X, _ = _get_small_datasets(padded=True)
x = X[:2]
assert isinstance(x, np.ndarray)
assert len(x.shape) == 3 and x.shape[0] == 2
@attr("pickle")
def test_variable_length_sequence_wise_iteration():
X, Y = _get_small_datasets(padded=False)
for idx, (x, y) in enumerate(zip(X, Y)):
print(idx, x.shape, y.shape)
@attr("pickle")
def test_fixed_length_sequence_wise_iteration():
X, Y = _get_small_datasets(padded=True)
Tx = X[0].shape[0]
Ty = Y[0].shape[0]
assert Tx == Ty
for idx, (x, y) in enumerate(zip(X, Y)):
print(idx, x.shape, y.shape)
assert x.shape[0] == Tx
assert y.shape[0] == Ty
@attr("pickle")
def test_frame_wise_iteration():
X, Y = _get_small_datasets(padded=False)
lengths = np.array([len(x) for x in X], dtype=np.int)
num_utterances = len(lengths)
# With sufficient cache size
X = MemoryCacheFramewiseDataset(X, lengths, cache_size=len(X))
Y = MemoryCacheFramewiseDataset(Y, lengths, cache_size=len(Y))
assert np.sum(lengths) == len(X)
assert len(X) == len(Y)
Dx = X[0].shape[-1]
Dy = Y[0].shape[-1]
for idx, (x, y) in enumerate(zip(X, Y)):
assert x.shape[-1] == Dx
assert y.shape[-1] == Dy
assert len(X.cached_utterances) == num_utterances
assert len(Y.cached_utterances) == num_utterances
# Should support slice indexing
for idx, (x, y) in enumerate(zip(X[:2], Y[:2])):
pass
@attr("pickle")
def test_sequence_wise_torch_data_loader():
import torch
from torch.utils import data as data_utils
X, Y = _get_small_datasets(padded=False)
class TorchDataset(data_utils.Dataset):
def __init__(self, X, Y):
self.X = X
self.Y = Y
def __getitem__(self, idx):
return torch.from_numpy(self.X[idx]), torch.from_numpy(self.Y[idx])
def __len__(self):
return len(self.X)
def __test(X, Y, batch_size):
dataset = TorchDataset(X, Y)
loader = data_utils.DataLoader(
dataset, batch_size=batch_size, num_workers=1, shuffle=True)
for idx, (x, y) in enumerate(loader):
assert len(x.shape) == len(y.shape)
assert len(x.shape) == 3
print(idx, x.shape, y.shape)
# Test with batch_size = 1
yield __test, X, Y, 1
# Since we have variable length frames, batch size larger than 1 causes
# runtime error.
yield raises(RuntimeError)(__test), X, Y, 2
# For padded dataset, which can be reprensented by (N, T^max, D), batchsize
# can be any number.
X, Y = _get_small_datasets(padded=True)
yield __test, X, Y, 1
yield __test, X, Y, 2
@attr("pickle")
def test_frame_wise_torch_data_loader():
import torch
from torch.utils import data as data_utils
X, Y = _get_small_datasets(padded=False)
# Since torch's Dataset (and Chainer, and maybe others) assumes dataset has
# fixed size length, i.e., implements `__len__` method, we need to know
# number of frames for each utterance.
# Sum of the number of frames is the dataset size for frame-wise iteration.
lengths = np.array([len(x) for x in X], dtype=np.int)
# For the above reason, we need to explicitly give the number of frames.
X = MemoryCacheFramewiseDataset(X, lengths, cache_size=len(X))
Y = MemoryCacheFramewiseDataset(Y, lengths, cache_size=len(Y))
class TorchDataset(data_utils.Dataset):
def __init__(self, X, Y):
self.X = X
self.Y = Y
def __getitem__(self, idx):
return torch.from_numpy(self.X[idx]), torch.from_numpy(self.Y[idx])
def __len__(self):
return len(self.X)
def __test(X, Y, batch_size):
dataset = TorchDataset(X, Y)
loader = data_utils.DataLoader(
dataset, batch_size=batch_size, num_workers=1, shuffle=True)
for idx, (x, y) in enumerate(loader):
assert len(x.shape) == 2
assert len(y.shape) == 2
yield __test, X, Y, 128
yield __test, X, Y, 256
|
py | b40b975c2214bc98082e56de5654560ffcb7efea | import os
import sys
import pprint
# for running the script directly from command line
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..',))
from rescene.main import content_hash
path = "D:\srrdb.com_2011-10-27"
path = sys.argv[1]
# # cd1 before the RARs and CD1 before the SFVs
# bad = os.path.join(path, "007.A.View.To.A.Kill.1985.iNTERNAL.DVDRip.XviD-iNCiTE.srr")
# # Unicode comments in the SFV
# bad2 = os.path.join(path, "13.Going.On.30.DVDRiP.XviD-BRUTUS.srr")
# pprint.pprint(rescene.info(bad))
# pprint.pprint(rescene.info(bad2))
# print(rescene.hash_srr(bad))
print(len(os.listdir(path)))
for srr in os.listdir(path):
try:
release = srr[:-4]
srr_file = os.path.join(path, srr)
srr_hash = content_hash(srr_file)
print(srr_hash + ";" + release)
except KeyboardInterrupt:
sys.exit()
except BaseException as err:
print(err)
# 3.On.Stage.Rest.Of.Pinkpop.2011.DUTCH.WS.PDTV.XviD-iFH
# (<type 'exceptions.EnvironmentError'>, EnvironmentError('Invalid RAR block length (20) at offset 0x1c528',), <traceback object at 0x032E5AA8>)
|
py | b40b97e079f9c20eab612b4c254a5a0532c6198e | from datetime import datetime
from shared import debugException
from database import SessionLoader, DummySession, Subscribers, dSIPLeases, Gateways
def cleanupLeases():
db = DummySession()
try:
db = SessionLoader()
Leases = db.query(dSIPLeases).filter(datetime.now() >= dSIPLeases.expiration).all()
for Lease in Leases:
# Remove the entry in the Subscribers table
db.query(Subscribers).filter(Subscribers.id == Lease.sid).delete(synchronize_session=False)
# Remove the entry in the Gateway table
db.query(Gateways).filter(Gateways.gwid == Lease.gwid).delete(synchronize_session=False)
# Remove the entry in the Lease table
db.delete(Lease)
db.commit()
except Exception as ex:
debugException(ex)
db.rollback()
db.flush()
finally:
db.close()
|
py | b40b98988a527d3253d06e08bcee1b64596ab44e | """Tests for certbot.cli."""
from __future__ import print_function
import argparse
import functools
import itertools
import os
import shutil
import traceback
import tempfile
import unittest
import mock
import six
from six.moves import reload_module # pylint: disable=import-error
from acme import jose
from certbot import account
from certbot import cli
from certbot import configuration
from certbot import constants
from certbot import crypto_util
from certbot import errors
from certbot import util
from certbot import main
from certbot import renewal
from certbot import storage
from certbot.plugins import disco
from certbot.plugins import manual
from certbot.tests import storage_test
from certbot.tests import test_util
CERT = test_util.vector_path('cert.pem')
CSR = test_util.vector_path('csr.der')
KEY = test_util.vector_path('rsa256_key.pem')
class CLITest(unittest.TestCase): # pylint: disable=too-many-public-methods
"""Tests for different commands."""
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.config_dir = os.path.join(self.tmp_dir, 'config')
self.work_dir = os.path.join(self.tmp_dir, 'work')
self.logs_dir = os.path.join(self.tmp_dir, 'logs')
self.standard_args = ['--config-dir', self.config_dir,
'--work-dir', self.work_dir,
'--logs-dir', self.logs_dir, '--text']
def tearDown(self):
shutil.rmtree(self.tmp_dir)
# Reset globals in cli
# pylint: disable=protected-access
cli._parser = cli.set_by_cli.detector = None
def _call(self, args, stdout=None):
"Run the cli with output streams and actual client mocked out"
with mock.patch('certbot.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
def _call_no_clientmock(self, args, stdout=None):
"Run the client with output streams mocked out"
args = self.standard_args + args
toy_stdout = stdout if stdout else six.StringIO()
with mock.patch('certbot.main.sys.stdout', new=toy_stdout):
with mock.patch('certbot.main.sys.stderr') as stderr:
ret = main.main(args[:]) # NOTE: parser can alter its args!
return ret, toy_stdout, stderr
def test_no_flags(self):
with mock.patch('certbot.main.run') as mock_run:
self._call([])
self.assertEqual(1, mock_run.call_count)
def _help_output(self, args):
"Run a command, and return the ouput string for scrutiny"
output = six.StringIO()
self.assertRaises(SystemExit, self._call, args, output)
out = output.getvalue()
return out
def test_help(self):
self.assertRaises(SystemExit, self._call, ['--help'])
self.assertRaises(SystemExit, self._call, ['--help', 'all'])
plugins = disco.PluginsRegistry.find_all()
out = self._help_output(['--help', 'all'])
self.assertTrue("--configurator" in out)
self.assertTrue("how a cert is deployed" in out)
self.assertTrue("--manual-test-mode" in out)
out = self._help_output(['-h', 'nginx'])
if "nginx" in plugins:
# may be false while building distributions without plugins
self.assertTrue("--nginx-ctl" in out)
self.assertTrue("--manual-test-mode" not in out)
self.assertTrue("--checkpoints" not in out)
out = self._help_output(['-h'])
self.assertTrue("letsencrypt-auto" not in out) # test cli.cli_command
if "nginx" in plugins:
self.assertTrue("Use the Nginx plugin" in out)
else:
self.assertTrue("(nginx support is experimental" in out)
out = self._help_output(['--help', 'plugins'])
self.assertTrue("--manual-test-mode" not in out)
self.assertTrue("--prepare" in out)
self.assertTrue("Plugin options" in out)
out = self._help_output(['--help', 'install'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
out = self._help_output(['--help', 'revoke'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
out = self._help_output(['-h', 'config_changes'])
self.assertTrue("--cert-path" not in out)
self.assertTrue("--key-path" not in out)
out = self._help_output(['-h'])
self.assertTrue(cli.usage_strings(plugins)[0] in out)
def _cli_missing_flag(self, args, message):
"Ensure that a particular error raises a missing cli flag error containing message"
exc = None
try:
with mock.patch('certbot.main.sys.stderr'):
main.main(self.standard_args + args[:]) # NOTE: parser can alter its args!
except errors.MissingCommandlineFlag as exc:
self.assertTrue(message in str(exc))
self.assertTrue(exc is not None)
def test_noninteractive(self):
args = ['-n', 'certonly']
self._cli_missing_flag(args, "specify a plugin")
args.extend(['--standalone', '-d', 'eg.is'])
self._cli_missing_flag(args, "register before running")
with mock.patch('certbot.main._auth_from_domains'):
with mock.patch('certbot.main.client.acme_from_config_key'):
args.extend(['--email', '[email protected]'])
self._cli_missing_flag(args, "--agree-tos")
@mock.patch('certbot.main.renew')
def test_gui(self, renew):
args = ['renew', '--dialog']
# --text conflicts with --dialog
self.standard_args.remove('--text')
self._call(args)
self.assertFalse(renew.call_args[0][0].noninteractive_mode)
@mock.patch('certbot.main.client.acme_client.Client')
@mock.patch('certbot.main._determine_account')
@mock.patch('certbot.main.client.Client.obtain_and_enroll_certificate')
@mock.patch('certbot.main._auth_from_domains')
def test_user_agent(self, afd, _obt, det, _client):
# Normally the client is totally mocked out, but here we need more
# arguments to automate it...
args = ["--standalone", "certonly", "-m", "[email protected]",
"-d", "example.com", '--agree-tos'] + self.standard_args
det.return_value = mock.MagicMock(), None
afd.return_value = mock.MagicMock(), "newcert"
with mock.patch('certbot.main.client.acme_client.ClientNetwork') as acme_net:
self._call_no_clientmock(args)
os_ver = util.get_os_info_ua()
ua = acme_net.call_args[1]["user_agent"]
self.assertTrue(os_ver in ua)
import platform
plat = platform.platform()
if "linux" in plat.lower():
self.assertTrue(util.get_os_info_ua() in ua)
with mock.patch('certbot.main.client.acme_client.ClientNetwork') as acme_net:
ua = "bandersnatch"
args += ["--user-agent", ua]
self._call_no_clientmock(args)
acme_net.assert_called_once_with(mock.ANY, verify_ssl=True, user_agent=ua)
def test_install_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('certbot.main.install') as mock_install:
self._call(['install', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
args = mock_install.call_args[0][0]
self.assertEqual(args.cert_path, os.path.abspath(cert))
self.assertEqual(args.key_path, os.path.abspath(key))
self.assertEqual(args.chain_path, os.path.abspath(chain))
self.assertEqual(args.fullchain_path, os.path.abspath(fullchain))
@mock.patch('certbot.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot.main.plug_sel.pick_installer')
def test_installer_selection(self, mock_pick_installer, _rec):
self._call(['install', '--domains', 'foo.bar', '--cert-path', 'cert',
'--key-path', 'key', '--chain-path', 'chain'])
self.assertEqual(mock_pick_installer.call_count, 1)
@mock.patch('certbot.util.exe_exists')
def test_configurator_selection(self, mock_exe_exists):
mock_exe_exists.return_value = True
real_plugins = disco.PluginsRegistry.find_all()
args = ['--apache', '--authenticator', 'standalone']
# This needed two calls to find_all(), which we're avoiding for now
# because of possible side effects:
# https://github.com/letsencrypt/letsencrypt/commit/51ed2b681f87b1eb29088dd48718a54f401e4855
#with mock.patch('certbot.cli.plugins_testable') as plugins:
# plugins.return_value = {"apache": True, "nginx": True}
# ret, _, _, _ = self._call(args)
# self.assertTrue("Too many flags setting" in ret)
args = ["install", "--nginx", "--cert-path", "/tmp/blah", "--key-path", "/tmp/blah",
"--nginx-server-root", "/nonexistent/thing", "-d",
"example.com", "--debug"]
if "nginx" in real_plugins:
# Sending nginx a non-existent conf dir will simulate misconfiguration
# (we can only do that if certbot-nginx is actually present)
ret, _, _, _ = self._call(args)
self.assertTrue("The nginx plugin is not working" in ret)
self.assertTrue("MisconfigurationError" in ret)
self._cli_missing_flag(["--standalone"], "With the standalone plugin, you probably")
with mock.patch("certbot.main._init_le_client") as mock_init:
with mock.patch("certbot.main._auth_from_domains") as mock_afd:
mock_afd.return_value = (mock.MagicMock(), mock.MagicMock())
self._call(["certonly", "--manual", "-d", "foo.bar"])
unused_config, auth, unused_installer = mock_init.call_args[0]
self.assertTrue(isinstance(auth, manual.Authenticator))
with mock.patch('certbot.main.obtain_cert') as mock_certonly:
self._call(["auth", "--standalone"])
self.assertEqual(1, mock_certonly.call_count)
def test_rollback(self):
_, _, _, client = self._call(['rollback'])
self.assertEqual(1, client.rollback.call_count)
_, _, _, client = self._call(['rollback', '--checkpoints', '123'])
client.rollback.assert_called_once_with(
mock.ANY, 123, mock.ANY, mock.ANY)
def test_config_changes(self):
_, _, _, client = self._call(['config_changes'])
self.assertEqual(1, client.view_config_changes.call_count)
def test_plugins(self):
flags = ['--init', '--prepare', '--authenticators', '--installers']
for args in itertools.chain(
*(itertools.combinations(flags, r)
for r in xrange(len(flags)))):
self._call(['plugins'] + list(args))
@mock.patch('certbot.main.plugins_disco')
@mock.patch('certbot.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args(self, _det, mock_disco):
ifaces = []
plugins = mock_disco.PluginsRegistry.find_all()
_, stdout, _, _ = self._call(['plugins'])
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot.main.plugins_disco')
@mock.patch('certbot.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_init(self, _det, mock_disco):
ifaces = []
plugins = mock_disco.PluginsRegistry.find_all()
_, stdout, _, _ = self._call(['plugins', '--init'])
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
self.assertEqual(stdout.getvalue().strip(), str(verified))
@mock.patch('certbot.main.plugins_disco')
@mock.patch('certbot.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_prepare(self, _det, mock_disco):
ifaces = []
plugins = mock_disco.PluginsRegistry.find_all()
_, stdout, _, _ = self._call(['plugins', '--init', '--prepare'])
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
verified.prepare.assert_called_once_with()
verified.available.assert_called_once_with()
available = verified.available()
self.assertEqual(stdout.getvalue().strip(), str(available))
def test_certonly_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('certbot.main.obtain_cert') as mock_obtaincert:
self._call(['certonly', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
config, unused_plugins = mock_obtaincert.call_args[0]
self.assertEqual(config.cert_path, os.path.abspath(cert))
self.assertEqual(config.key_path, os.path.abspath(key))
self.assertEqual(config.chain_path, os.path.abspath(chain))
self.assertEqual(config.fullchain_path, os.path.abspath(fullchain))
def test_certonly_bad_args(self):
try:
self._call(['-a', 'bad_auth', 'certonly'])
assert False, "Exception should have been raised"
except errors.PluginSelectionError as e:
self.assertTrue('The requested bad_auth plugin does not appear' in e.message)
def test_check_config_sanity_domain(self):
# Punycode
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'this.is.xn--ls8h.tld'])
# FQDN
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'comma,gotwrong.tld'])
# FQDN 2
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'illegal.character=.tld'])
# Wildcard
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', '*.wildcard.tld'])
# Bare IP address (this is actually a different error message now)
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', '204.11.231.35'])
def test_csr_with_besteffort(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0} --allow-subset-of-names'.format(CSR).split())
def test_run_with_csr(self):
# This is an error because you can only use --csr with certonly
try:
self._call(['--csr', CSR])
except errors.Error as e:
assert "Please try the certonly" in repr(e)
return
assert False, "Expected supplying --csr to fail with default verb"
def test_csr_with_no_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0}'.format(
test_util.vector_path('csr-nonames.pem')).split())
def test_csr_with_inconsistent_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly -d example.org --csr {0}'.format(CSR).split())
def _get_argument_parser(self):
plugins = disco.PluginsRegistry.find_all()
return functools.partial(cli.prepare_and_parse_args, plugins)
def test_parse_domains(self):
parse = self._get_argument_parser()
short_args = ['-d', 'example.com']
namespace = parse(short_args)
self.assertEqual(namespace.domains, ['example.com'])
short_args = ['-d', 'trailing.period.com.']
namespace = parse(short_args)
self.assertEqual(namespace.domains, ['trailing.period.com'])
short_args = ['-d', 'example.com,another.net,third.org,example.com']
namespace = parse(short_args)
self.assertEqual(namespace.domains, ['example.com', 'another.net',
'third.org'])
long_args = ['--domains', 'example.com']
namespace = parse(long_args)
self.assertEqual(namespace.domains, ['example.com'])
long_args = ['--domains', 'trailing.period.com.']
namespace = parse(long_args)
self.assertEqual(namespace.domains, ['trailing.period.com'])
long_args = ['--domains', 'example.com,another.net,example.com']
namespace = parse(long_args)
self.assertEqual(namespace.domains, ['example.com', 'another.net'])
def test_server_flag(self):
parse = self._get_argument_parser()
namespace = parse('--server example.com'.split())
self.assertEqual(namespace.server, 'example.com')
def _check_server_conflict_message(self, parser_args, conflicting_args):
parse = self._get_argument_parser()
try:
parse(parser_args)
self.fail( # pragma: no cover
"The following flags didn't conflict with "
'--server: {0}'.format(', '.join(conflicting_args)))
except errors.Error as error:
self.assertTrue('--server' in error.message)
for arg in conflicting_args:
self.assertTrue(arg in error.message)
def test_must_staple_flag(self):
parse = self._get_argument_parser()
short_args = ['--must-staple']
namespace = parse(short_args)
self.assertTrue(namespace.must_staple)
self.assertTrue(namespace.staple)
def test_staging_flag(self):
parse = self._get_argument_parser()
short_args = ['--staging']
namespace = parse(short_args)
self.assertTrue(namespace.staging)
self.assertEqual(namespace.server, constants.STAGING_URI)
short_args += '--server example.com'.split()
self._check_server_conflict_message(short_args, '--staging')
def _assert_dry_run_flag_worked(self, namespace, existing_account):
self.assertTrue(namespace.dry_run)
self.assertTrue(namespace.break_my_certs)
self.assertTrue(namespace.staging)
self.assertEqual(namespace.server, constants.STAGING_URI)
if existing_account:
self.assertTrue(namespace.tos)
self.assertTrue(namespace.register_unsafely_without_email)
else:
self.assertFalse(namespace.tos)
self.assertFalse(namespace.register_unsafely_without_email)
def test_dry_run_flag(self):
parse = self._get_argument_parser()
config_dir = tempfile.mkdtemp()
short_args = '--dry-run --config-dir {0}'.format(config_dir).split()
self.assertRaises(errors.Error, parse, short_args)
self._assert_dry_run_flag_worked(
parse(short_args + ['auth']), False)
self._assert_dry_run_flag_worked(
parse(short_args + ['certonly']), False)
self._assert_dry_run_flag_worked(
parse(short_args + ['renew']), False)
account_dir = os.path.join(config_dir, constants.ACCOUNTS_DIR)
os.mkdir(account_dir)
os.mkdir(os.path.join(account_dir, 'fake_account_dir'))
self._assert_dry_run_flag_worked(parse(short_args + ['auth']), True)
self._assert_dry_run_flag_worked(parse(short_args + ['renew']), True)
short_args += ['certonly']
self._assert_dry_run_flag_worked(parse(short_args), True)
short_args += '--server example.com'.split()
conflicts = ['--dry-run']
self._check_server_conflict_message(short_args, '--dry-run')
short_args += ['--staging']
conflicts += ['--staging']
self._check_server_conflict_message(short_args, conflicts)
def _certonly_new_request_common(self, mock_client, args=None):
with mock.patch('certbot.main._treat_as_renewal') as mock_renewal:
mock_renewal.return_value = ("newcert", None)
with mock.patch('certbot.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
if args is None:
args = []
args += '-d foo.bar -a standalone certonly'.split()
self._call(args)
@mock.patch('certbot.main.zope.component.getUtility')
def test_certonly_dry_run_new_request_success(self, mock_get_utility):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = None
self._certonly_new_request_common(mock_client, ['--dry-run'])
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
# Asserts we don't suggest donating after a successful dry run
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch('certbot.crypto_util.notAfter')
@mock.patch('certbot.main.zope.component.getUtility')
def test_certonly_new_request_success(self, mock_get_utility, mock_notAfter):
cert_path = '/etc/letsencrypt/live/foo.bar'
date = '1970-01-01'
mock_notAfter().date.return_value = date
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=cert_path)
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = mock_lineage
self._certonly_new_request_common(mock_client)
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue(cert_path in cert_msg)
self.assertTrue(date in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
def test_certonly_new_request_failure(self):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = False
self.assertRaises(errors.Error,
self._certonly_new_request_common, mock_client)
def _test_renewal_common(self, due_for_renewal, extra_args, log_out=None,
args=None, should_renew=True, error_expected=False):
# pylint: disable=too-many-locals,too-many-arguments
cert_path = 'certbot/tests/testdata/cert.pem'
chain_path = '/etc/letsencrypt/live/foo.bar/fullchain.pem'
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=chain_path)
mock_lineage.should_autorenew.return_value = due_for_renewal
mock_certr = mock.MagicMock()
mock_key = mock.MagicMock(pem='pem_key')
mock_client = mock.MagicMock()
stdout = None
mock_client.obtain_certificate.return_value = (mock_certr, 'chain',
mock_key, 'csr')
try:
with mock.patch('certbot.main._find_duplicative_certs') as mock_fdc:
mock_fdc.return_value = (mock_lineage, None)
with mock.patch('certbot.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
get_utility_path = 'certbot.main.zope.component.getUtility'
with mock.patch(get_utility_path) as mock_get_utility:
with mock.patch('certbot.main.renewal.OpenSSL') as mock_ssl:
mock_latest = mock.MagicMock()
mock_latest.get_issuer.return_value = "Fake fake"
mock_ssl.crypto.load_certificate.return_value = mock_latest
with mock.patch('certbot.main.renewal.crypto_util'):
if not args:
args = ['-d', 'isnot.org', '-a', 'standalone', 'certonly']
if extra_args:
args += extra_args
try:
ret, stdout, _, _ = self._call(args)
if ret:
print("Returned", ret)
raise AssertionError(ret)
assert not error_expected, "renewal should have errored"
except: # pylint: disable=bare-except
if not error_expected:
raise AssertionError(
"Unexpected renewal error:\n" +
traceback.format_exc())
if should_renew:
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'])
else:
self.assertEqual(mock_client.obtain_certificate.call_count, 0)
except:
self._dump_log()
raise
finally:
if log_out:
with open(os.path.join(self.logs_dir, "letsencrypt.log")) as lf:
self.assertTrue(log_out in lf.read())
return mock_lineage, mock_get_utility, stdout
def test_certonly_renewal(self):
lineage, get_utility, _ = self._test_renewal_common(True, [])
self.assertEqual(lineage.save_successor.call_count, 1)
lineage.update_all_links_to.assert_called_once_with(
lineage.latest_common_version())
cert_msg = get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertTrue('donate' in get_utility().add_message.call_args[0][0])
def test_certonly_renewal_triggers(self):
# --dry-run should force renewal
_, get_utility, _ = self._test_renewal_common(False, ['--dry-run', '--keep'],
log_out="simulating renewal")
self.assertEqual(get_utility().add_message.call_count, 1)
self.assertTrue('dry run' in get_utility().add_message.call_args[0][0])
self._test_renewal_common(False, ['--renew-by-default', '-tvv', '--debug'],
log_out="Auto-renewal forced")
self.assertEqual(get_utility().add_message.call_count, 1)
self._test_renewal_common(False, ['-tvv', '--debug', '--keep'],
log_out="not yet due", should_renew=False)
def _dump_log(self):
with open(os.path.join(self.logs_dir, "letsencrypt.log")) as lf:
print("Logs:")
print(lf.read())
def _make_test_renewal_conf(self, testfile):
with open(test_util.vector_path(testfile)) as src:
# put the correct path for cert.pem, chain.pem etc in the renewal conf
renewal_conf = src.read().replace("MAGICDIR", test_util.vector_path())
rd = os.path.join(self.config_dir, "renewal")
if not os.path.exists(rd):
os.makedirs(rd)
rc = os.path.join(rd, "sample-renewal.conf")
with open(rc, "w") as dest:
dest.write(renewal_conf)
return rc
def test_renew_verb(self):
self._make_test_renewal_conf('sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
def test_quiet_renew(self):
self._make_test_renewal_conf('sample-renewal.conf')
args = ["renew", "--dry-run"]
_, _, stdout = self._test_renewal_common(True, [], args=args, should_renew=True)
out = stdout.getvalue()
self.assertTrue("renew" in out)
args = ["renew", "--dry-run", "-q"]
_, _, stdout = self._test_renewal_common(True, [], args=args, should_renew=True)
out = stdout.getvalue()
self.assertEqual("", out)
@mock.patch("certbot.cli.set_by_cli")
def test_ancient_webroot_renewal_conf(self, mock_set_by_cli):
mock_set_by_cli.return_value = False
rc_path = self._make_test_renewal_conf('sample-renewal-ancient.conf')
args = mock.MagicMock(account=None, email=None, webroot_path=None)
config = configuration.NamespaceConfig(args)
lineage = storage.RenewableCert(rc_path,
configuration.RenewerConfiguration(config))
renewalparams = lineage.configuration["renewalparams"]
# pylint: disable=protected-access
renewal._restore_webroot_config(config, renewalparams)
self.assertEqual(config.webroot_path, ["/var/www/"])
def test_renew_verb_empty_config(self):
rd = os.path.join(self.config_dir, 'renewal')
if not os.path.exists(rd):
os.makedirs(rd)
with open(os.path.join(rd, 'empty.conf'), 'w'):
pass # leave the file empty
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(False, [], args=args, should_renew=False, error_expected=True)
def _make_dummy_renewal_config(self):
renewer_configs_dir = os.path.join(self.config_dir, 'renewal')
os.makedirs(renewer_configs_dir)
with open(os.path.join(renewer_configs_dir, 'test.conf'), 'w') as f:
f.write("My contents don't matter")
def _test_renew_common(self, renewalparams=None, names=None,
assert_oc_called=None, **kwargs):
self._make_dummy_renewal_config()
with mock.patch('certbot.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somepath/fullchain.pem"
if renewalparams is not None:
mock_lineage.configuration = {'renewalparams': renewalparams}
if names is not None:
mock_lineage.names.return_value = names
mock_rc.return_value = mock_lineage
with mock.patch('certbot.main.obtain_cert') as mock_obtain_cert:
kwargs.setdefault('args', ['renew'])
self._test_renewal_common(True, None, should_renew=False, **kwargs)
if assert_oc_called is not None:
if assert_oc_called:
self.assertTrue(mock_obtain_cert.called)
else:
self.assertFalse(mock_obtain_cert.called)
def test_renew_no_renewalparams(self):
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_no_authenticator(self):
self._test_renew_common(renewalparams={}, assert_oc_called=False,
error_expected=True)
def test_renew_with_bad_int(self):
renewalparams = {'authenticator': 'webroot',
'rsa_key_size': 'over 9000'}
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
assert_oc_called=False)
def test_renew_with_nonetype_http01(self):
renewalparams = {'authenticator': 'webroot',
'http01_port': 'None'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_bad_domain(self):
renewalparams = {'authenticator': 'webroot'}
names = ['*.example.com']
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
names=names, assert_oc_called=False)
def test_renew_with_configurator(self):
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args='renew --configurator apache'.split())
def test_renew_plugin_config_restoration(self):
renewalparams = {'authenticator': 'webroot',
'webroot_path': 'None',
'webroot_imaginary_flag': '42'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_webroot_map(self):
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args=['renew', '--webroot-map', '{"example.com": "/tmp"}'])
def test_renew_reconstitute_error(self):
# pylint: disable=protected-access
with mock.patch('certbot.main.renewal._reconstitute') as mock_reconstitute:
mock_reconstitute.side_effect = Exception
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_obtain_cert_error(self):
self._make_dummy_renewal_config()
with mock.patch('certbot.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somewhere/fullchain.pem"
mock_rc.return_value = mock_lineage
mock_lineage.configuration = {
'renewalparams': {'authenticator': 'webroot'}}
with mock.patch('certbot.main.obtain_cert') as mock_obtain_cert:
mock_obtain_cert.side_effect = Exception
self._test_renewal_common(True, None, error_expected=True,
args=['renew'], should_renew=False)
def test_renew_with_bad_cli_args(self):
self._test_renewal_common(True, None, args='renew -d example.com'.split(),
should_renew=False, error_expected=True)
self._test_renewal_common(True, None, args='renew --csr {0}'.format(CSR).split(),
should_renew=False, error_expected=True)
@mock.patch('certbot.main.zope.component.getUtility')
@mock.patch('certbot.main._treat_as_renewal')
@mock.patch('certbot.main._init_le_client')
def test_certonly_reinstall(self, mock_init, mock_renewal, mock_get_utility):
mock_renewal.return_value = ('reinstall', mock.MagicMock())
mock_init.return_value = mock_client = mock.MagicMock()
self._call(['-d', 'foo.bar', '-a', 'standalone', 'certonly'])
self.assertFalse(mock_client.obtain_certificate.called)
self.assertFalse(mock_client.obtain_and_enroll_certificate.called)
self.assertEqual(mock_get_utility().add_message.call_count, 0)
#self.assertTrue('donate' not in mock_get_utility().add_message.call_args[0][0])
def _test_certonly_csr_common(self, extra_args=None):
certr = 'certr'
chain = 'chain'
mock_client = mock.MagicMock()
mock_client.obtain_certificate_from_csr.return_value = (certr, chain)
cert_path = '/etc/letsencrypt/live/example.com/cert.pem'
mock_client.save_certificate.return_value = cert_path, None, None
with mock.patch('certbot.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
get_utility_path = 'certbot.main.zope.component.getUtility'
with mock.patch(get_utility_path) as mock_get_utility:
chain_path = '/etc/letsencrypt/live/example.com/chain.pem'
full_path = '/etc/letsencrypt/live/example.com/fullchain.pem'
args = ('-a standalone certonly --csr {0} --cert-path {1} '
'--chain-path {2} --fullchain-path {3}').format(
CSR, cert_path, chain_path, full_path).split()
if extra_args:
args += extra_args
with mock.patch('certbot.main.crypto_util'):
self._call(args)
if '--dry-run' in args:
self.assertFalse(mock_client.save_certificate.called)
else:
mock_client.save_certificate.assert_called_once_with(
certr, chain, cert_path, chain_path, full_path)
return mock_get_utility
def test_certonly_csr(self):
mock_get_utility = self._test_certonly_csr_common()
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('cert.pem' in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
def test_certonly_csr_dry_run(self):
mock_get_utility = self._test_certonly_csr_common(['--dry-run'])
self.assertEqual(mock_get_utility().add_message.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
@mock.patch('certbot.main.client.acme_client')
def test_revoke_with_key(self, mock_acme_client):
server = 'foo.bar'
self._call_no_clientmock(['--cert-path', CERT, '--key-path', KEY,
'--server', server, 'revoke'])
with open(KEY) as f:
mock_acme_client.Client.assert_called_once_with(
server, key=jose.JWK.load(f.read()), net=mock.ANY)
with open(CERT) as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = mock_acme_client.Client().revoke
mock_revoke.assert_called_once_with(jose.ComparableX509(cert))
@mock.patch('certbot.main._determine_account')
def test_revoke_without_key(self, mock_determine_account):
mock_determine_account.return_value = (mock.MagicMock(), None)
_, _, _, client = self._call(['--cert-path', CERT, 'revoke'])
with open(CERT) as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = client.acme_from_config_key().revoke
mock_revoke.assert_called_once_with(jose.ComparableX509(cert))
@mock.patch('certbot.main.sys')
def test_handle_exception(self, mock_sys):
# pylint: disable=protected-access
from acme import messages
config = mock.MagicMock()
mock_open = mock.mock_open()
with mock.patch('certbot.main.open', mock_open, create=True):
exception = Exception('detail')
config.verbose_count = 1
main._handle_exception(
Exception, exc_value=exception, trace=None, config=None)
mock_open().write.assert_called_once_with(''.join(
traceback.format_exception_only(Exception, exception)))
error_msg = mock_sys.exit.call_args_list[0][0][0]
self.assertTrue('unexpected error' in error_msg)
with mock.patch('certbot.main.open', mock_open, create=True):
mock_open.side_effect = [KeyboardInterrupt]
error = errors.Error('detail')
main._handle_exception(
errors.Error, exc_value=error, trace=None, config=None)
# assert_any_call used because sys.exit doesn't exit in cli.py
mock_sys.exit.assert_any_call(''.join(
traceback.format_exception_only(errors.Error, error)))
exception = messages.Error(detail='alpha', typ='urn:acme:error:triffid',
title='beta')
config = mock.MagicMock(debug=False, verbose_count=-3)
main._handle_exception(
messages.Error, exc_value=exception, trace=None, config=config)
error_msg = mock_sys.exit.call_args_list[-1][0][0]
self.assertTrue('unexpected error' in error_msg)
self.assertTrue('acme:error' not in error_msg)
self.assertTrue('alpha' in error_msg)
self.assertTrue('beta' in error_msg)
config = mock.MagicMock(debug=False, verbose_count=1)
main._handle_exception(
messages.Error, exc_value=exception, trace=None, config=config)
error_msg = mock_sys.exit.call_args_list[-1][0][0]
self.assertTrue('unexpected error' in error_msg)
self.assertTrue('acme:error' in error_msg)
self.assertTrue('alpha' in error_msg)
interrupt = KeyboardInterrupt('detail')
main._handle_exception(
KeyboardInterrupt, exc_value=interrupt, trace=None, config=None)
mock_sys.exit.assert_called_with(''.join(
traceback.format_exception_only(KeyboardInterrupt, interrupt)))
def test_read_file(self):
rel_test_path = os.path.relpath(os.path.join(self.tmp_dir, 'foo'))
self.assertRaises(
argparse.ArgumentTypeError, cli.read_file, rel_test_path)
test_contents = 'bar\n'
with open(rel_test_path, 'w') as f:
f.write(test_contents)
path, contents = cli.read_file(rel_test_path)
self.assertEqual(path, os.path.abspath(path))
self.assertEqual(contents, test_contents)
def test_agree_dev_preview_config(self):
with mock.patch('certbot.main.run') as mocked_run:
self._call(['-c', test_util.vector_path('cli.ini')])
self.assertTrue(mocked_run.called)
def test_register(self):
with mock.patch('certbot.main.client') as mocked_client:
acc = mock.MagicMock()
acc.id = "imaginary_account"
mocked_client.register.return_value = (acc, "worked")
self._call_no_clientmock(["register", "--email", "[email protected]"])
# TODO: It would be more correct to explicitly check that
# _determine_account() gets called in the above case,
# but coverage statistics should also show that it did.
with mock.patch('certbot.main.account') as mocked_account:
mocked_storage = mock.MagicMock()
mocked_account.AccountFileStorage.return_value = mocked_storage
mocked_storage.find_all.return_value = ["an account"]
x = self._call_no_clientmock(["register", "--email", "[email protected]"])
self.assertTrue("There is an existing account" in x[0])
def test_update_registration_no_existing_accounts(self):
# with mock.patch('certbot.main.client') as mocked_client:
with mock.patch('certbot.main.account') as mocked_account:
mocked_storage = mock.MagicMock()
mocked_account.AccountFileStorage.return_value = mocked_storage
mocked_storage.find_all.return_value = []
x = self._call_no_clientmock(
["register", "--update-registration", "--email",
"[email protected]"])
self.assertTrue("Could not find an existing account" in x[0])
def test_update_registration_unsafely(self):
# This test will become obsolete when register --update-registration
# supports removing an e-mail address from the account
with mock.patch('certbot.main.account') as mocked_account:
mocked_storage = mock.MagicMock()
mocked_account.AccountFileStorage.return_value = mocked_storage
mocked_storage.find_all.return_value = ["an account"]
x = self._call_no_clientmock(
"register --update-registration "
"--register-unsafely-without-email".split())
self.assertTrue("--register-unsafely-without-email" in x[0])
@mock.patch('certbot.main.display_ops.get_email')
@mock.patch('certbot.main.zope.component.getUtility')
def test_update_registration_with_email(self, mock_utility, mock_email):
email = "[email protected]"
mock_email.return_value = email
with mock.patch('certbot.main.client') as mocked_client:
with mock.patch('certbot.main.account') as mocked_account:
with mock.patch('certbot.main._determine_account') as mocked_det:
with mock.patch('certbot.main.client') as mocked_client:
mocked_storage = mock.MagicMock()
mocked_account.AccountFileStorage.return_value = mocked_storage
mocked_storage.find_all.return_value = ["an account"]
mocked_det.return_value = (mock.MagicMock(), "foo")
acme_client = mock.MagicMock()
mocked_client.Client.return_value = acme_client
x = self._call_no_clientmock(
["register", "--update-registration"])
# When registration change succeeds, the return value
# of register() is None
self.assertTrue(x[0] is None)
# and we got supposedly did update the registration from
# the server
self.assertTrue(
acme_client.acme.update_registration.called)
# and we saved the updated registration on disk
self.assertTrue(mocked_storage.save_regr.called)
self.assertTrue(
email in mock_utility().add_message.call_args[0][0])
def test_conflicting_args(self):
args = ['renew', '--dialog', '--text']
self.assertRaises(errors.Error, self._call, args)
class DetermineAccountTest(unittest.TestCase):
"""Tests for certbot.cli._determine_account."""
def setUp(self):
self.args = mock.MagicMock(account=None, email=None,
register_unsafely_without_email=False)
self.config = configuration.NamespaceConfig(self.args)
self.accs = [mock.MagicMock(id='x'), mock.MagicMock(id='y')]
self.account_storage = account.AccountMemoryStorage()
def _call(self):
# pylint: disable=protected-access
from certbot.main import _determine_account
with mock.patch('certbot.main.account.AccountFileStorage') as mock_storage:
mock_storage.return_value = self.account_storage
return _determine_account(self.config)
def test_args_account_set(self):
self.account_storage.save(self.accs[1])
self.config.account = self.accs[1].id
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
def test_single_account(self):
self.account_storage.save(self.accs[0])
self.assertEqual((self.accs[0], None), self._call())
self.assertEqual(self.accs[0].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot.client.display_ops.choose_account')
def test_multiple_accounts(self, mock_choose_accounts):
for acc in self.accs:
self.account_storage.save(acc)
mock_choose_accounts.return_value = self.accs[1]
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(
set(mock_choose_accounts.call_args[0][0]), set(self.accs))
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot.client.display_ops.get_email')
def test_no_accounts_no_email(self, mock_get_email):
mock_get_email.return_value = '[email protected]'
with mock.patch('certbot.main.client') as client:
client.register.return_value = (
self.accs[0], mock.sentinel.acme)
self.assertEqual((self.accs[0], mock.sentinel.acme), self._call())
client.register.assert_called_once_with(
self.config, self.account_storage, tos_cb=mock.ANY)
self.assertEqual(self.accs[0].id, self.config.account)
self.assertEqual('[email protected]', self.config.email)
def test_no_accounts_email(self):
self.config.email = 'other email'
with mock.patch('certbot.main.client') as client:
client.register.return_value = (self.accs[1], mock.sentinel.acme)
self._call()
self.assertEqual(self.accs[1].id, self.config.account)
self.assertEqual('other email', self.config.email)
class DuplicativeCertsTest(storage_test.BaseRenewableCertTest):
"""Test to avoid duplicate lineages."""
def setUp(self):
super(DuplicativeCertsTest, self).setUp()
self.config.write()
self._write_out_ex_kinds()
def tearDown(self):
shutil.rmtree(self.tempdir)
@mock.patch('certbot.util.make_or_verify_dir')
def test_find_duplicative_names(self, unused_makedir):
from certbot.main import _find_duplicative_certs
test_cert = test_util.load_vector('cert-san.pem')
with open(self.test_rc.cert, 'w') as f:
f.write(test_cert)
# No overlap at all
result = _find_duplicative_certs(
self.cli_config, ['wow.net', 'hooray.org'])
self.assertEqual(result, (None, None))
# Totally identical
result = _find_duplicative_certs(
self.cli_config, ['example.com', 'www.example.com'])
self.assertTrue(result[0].configfile.filename.endswith('example.org.conf'))
self.assertEqual(result[1], None)
# Superset
result = _find_duplicative_certs(
self.cli_config, ['example.com', 'www.example.com', 'something.new'])
self.assertEqual(result[0], None)
self.assertTrue(result[1].configfile.filename.endswith('example.org.conf'))
# Partial overlap doesn't count
result = _find_duplicative_certs(
self.cli_config, ['example.com', 'something.new'])
self.assertEqual(result, (None, None))
class DefaultTest(unittest.TestCase):
"""Tests for certbot.cli._Default."""
def setUp(self):
# pylint: disable=protected-access
self.default1 = cli._Default()
self.default2 = cli._Default()
def test_boolean(self):
self.assertFalse(self.default1)
self.assertFalse(self.default2)
def test_equality(self):
self.assertEqual(self.default1, self.default2)
def test_hash(self):
self.assertEqual(hash(self.default1), hash(self.default2))
class SetByCliTest(unittest.TestCase):
"""Tests for certbot.set_by_cli and related functions."""
def setUp(self):
reload_module(cli)
def test_webroot_map(self):
args = '-w /var/www/html -d example.com'.split()
verb = 'renew'
self.assertTrue(_call_set_by_cli('webroot_map', args, verb))
def test_report_config_interaction_str(self):
cli.report_config_interaction('manual_public_ip_logging_ok',
'manual_test_mode')
cli.report_config_interaction('manual_test_mode', 'manual')
self._test_report_config_interaction_common()
def test_report_config_interaction_iterable(self):
cli.report_config_interaction(('manual_public_ip_logging_ok',),
('manual_test_mode',))
cli.report_config_interaction(('manual_test_mode',), ('manual',))
self._test_report_config_interaction_common()
def _test_report_config_interaction_common(self):
"""Tests implied interaction between manual flags.
--manual implies --manual-test-mode which implies
--manual-public-ip-logging-ok. These interactions don't actually
exist in the client, but are used here for testing purposes.
"""
args = ['--manual']
verb = 'renew'
for v in ('manual', 'manual_test_mode', 'manual_public_ip_logging_ok'):
self.assertTrue(_call_set_by_cli(v, args, verb))
cli.set_by_cli.detector = None
args = ['--manual-test-mode']
for v in ('manual_test_mode', 'manual_public_ip_logging_ok'):
self.assertTrue(_call_set_by_cli(v, args, verb))
self.assertFalse(_call_set_by_cli('manual', args, verb))
def _call_set_by_cli(var, args, verb):
with mock.patch('certbot.cli.helpful_parser') as mock_parser:
mock_parser.args = args
mock_parser.verb = verb
return cli.set_by_cli(var)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
py | b40b98b9c136e02d642ea34116d3ba8523fa560e | import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import train_test_split
from keras import metrics
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
from keras.initializers import glorot_uniform
from sklearn.preprocessing import normalize, MinMaxScaler
import keras.backend as K
import matplotlib.pyplot as plt
import time
class ModelDataset:
def __init__(self, input_size, momenta, labels):
'''
:param input_size: the flattened input dim for the model
e.g. 3 jets has input_dim of (3-1)*4=8
:param momenta: input momenta in NJET format (i.e. [num points, num jets, 4])
:param labels: labels
'''
self.input_size = input_size
self.momenta = momenta
self.labels = labels
def standardise(self, data):
'''standardise data
:param data: an array over which to standardise (this array may be a variable column)
'''
array = np.array(data)
mean= np.mean(array)
std = np.std(array)
standard = (array-mean)/(std)
return mean, std, standard
def root_mean_squared_error(self, y_true, y_pred):
'custom loss functoin RMSE'
return K.sqrt(K.mean(K.square(y_pred - y_true)))
def process_training_data(self, random_state=42, **kwargs):
'''
trainind data must be standardised and split for training and validation
**kwargs can take on:
:param moms: the PS points in format [no_PS_points, points, 4]
:param labs: ground truth labels of squared matrix elements
'''
moms = kwargs.get('moms', self.momenta)
labs = kwargs.get('labs', self.labels)
momenta = np.array(moms)[:,3:,:] #pick out all but one jet
labels = np.array(labs)
x_standard = momenta.reshape(-1,4).copy() #shape for standardising each momentum element
self.x_mean = np.zeros(4)
self.x_std = np.zeros(4)
self.x_mean[0],self.x_std[0],x_standard[:,0] = self.standardise(momenta.reshape(-1,4)[:,0])
self.x_mean[1],self.x_std[1],x_standard[:,1] = self.standardise(momenta.reshape(-1,4)[:,1])
self.x_mean[2],self.x_std[2],x_standard[:,2] = self.standardise(momenta.reshape(-1,4)[:,2])
self.x_mean[3],self.x_std[3],x_standard[:,3] = self.standardise(momenta.reshape(-1,4)[:,3])
x_standard = x_standard.reshape(-1,self.input_size) #shape for passing into network
self.y_mean, self.y_std, y_standard = self.standardise(labels)
X_train, X_test, y_train, y_test = train_test_split(x_standard, y_standard, test_size=0.2)
return X_train, X_test, y_train, y_test, self.x_mean, self.x_std, self.y_mean, self.y_std
def baseline_model(self, layers, lr=0.001):
'define and compile model'
# create model
# at some point can use new Keras tuning feature for optimising this model
model = Sequential()
model.add(Dense(layers[0], input_dim=(self.input_size), activation='tanh', kernel_initializer = glorot_uniform(seed=1337)))
model.add(Dense(layers[1], activation='tanh', kernel_initializer = glorot_uniform(seed = 1337+123)))
model.add(Dense(layers[2], activation='tanh', kernel_initializer = glorot_uniform(seed = 1337+345)))
model.add(Dense(1, kernel_initializer = glorot_uniform(seed = 1337-545)))
# Compile model
model.compile(optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, amsgrad=False), loss = 'mean_squared_error')
return model
def fit(self, layers=[32,16,8], epochs=10000, lr=0.001, **kwargs):
'''
fit model
:param layers: an array of lengeth 3 providing the number of hidden nodes in the three layers
'''
random_state = kwargs.get('random_state', 42)
if len(layers) != 3:
raise Exception('the number of layers to be defined is 3, you have defined len(layers) layers')
X_train, X_test, y_train, y_test,_,_,_,_ = self.process_training_data(random_state = random_state)
print (X_train.shape)
self.model = self.baseline_model(layers=layers, lr=lr)
ES = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=0, restore_best_weights=True)
self.model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),callbacks=[ES], batch_size=512, shuffle=False)
return self.model, self.x_mean, self.x_std, self.y_mean, self.y_std
def standardise_test(self, data, mean, std):
array = np.array(data)
standard = (array-mean)/(std)
return standard
def process_testing_data(self, moms, **kwargs):
'''
**kwargs can take on:
:param x_mean, x_std, y_mean, y_std: mean and std of x and y values if not (properly) provided by class e.g. if using a pretrained model with known mean and std
'''
labs = kwargs.get('labs', None)
momenta = np.array(moms)[:,3:,:] #pick out all but one jet
y_mean = kwargs.get('y_mean', self.y_mean)
print_y = kwargs.get('print_y', True)
if print_y == True:
print ('Using y_mean of {} instead of {}'.format(y_mean, self.y_mean))
y_std = kwargs.get('y_std', self.y_std)
x_mean = kwargs.get('x_mean', self.x_mean)
x_std = kwargs.get('x_std', self.x_std)
if labs is not None:
labels = np.array(labs)
x_standard = momenta.reshape(-1,4).copy() #shape for standardising each momentum element
x_standard[:,0] = self.standardise_test(momenta.reshape(-1,4)[:,0],x_mean[0],x_std[0])
x_standard[:,1] = self.standardise_test(momenta.reshape(-1,4)[:,1],x_mean[1],x_std[1])
x_standard[:,2] = self.standardise_test(momenta.reshape(-1,4)[:,2],x_mean[2],x_std[2])
x_standard[:,3] = self.standardise_test(momenta.reshape(-1,4)[:,3],x_mean[3],x_std[3])
x_standard = x_standard.reshape(-1,self.input_size) #shape for passing into network
if labs is not None:
y_standard = self.standardise_test(labels,y_mean,y_std)
return x_standard, y_standard
else:
return x_standard
def destandardise(self, data, mean, std):
'destandardise array for inference and comparison'
array = np.array(data)
return (array*std) + mean
def destandardise_data(self, y_pred, x_pred=None, **kwargs):
'''
destandardise any standardised data
:param y_pred: squared matrix element values
:param x_pred: optional parameter of momenta values to be destandardised
**kwargs can take on:
:param x_mean, x_std, y_mean, y_std: mean and std of x and y values if not (properly) provided by class e.g. if using a pretrained model with known mean and std
note: when initialising the class with the data used to train a pretrained model, the standardised data will be the same as used in training if the dataset is loaded and passed correctly as the mean and std is independent of the data splitting
'''
y_mean = kwargs.get('y_mean', self.y_mean)
y_std = kwargs.get('y_std', self.y_std)
x_mean = kwargs.get('x_mean', self.x_mean)
x_std = kwargs.get('x_std', self.x_std)
y_destandard = self.destandardise(y_pred,y_mean,y_std)
if x_pred is not None:
x_pred = x_pred.reshape(-1,4)
x_destandard = x_pred.copy()
x_destandard[:,0] = self.destandardise(x_pred[:,0],x_mean[0],x_std[0])
x_destandard[:,1] = self.destandardise(x_pred[:,1],x_mean[1],x_std[1])
x_destandard[:,2] = self.destandardise(x_pred[:,2],x_mean[2],x_std[2])
x_destandard[:,3] = self.destandardise(x_pred[:,3],x_mean[3],x_std[3])
x_destandard = x_destandard.reshape(-1,(self.input_size)/4,4)
return x_destandard, y_destandard
else:
return y_destandard
|
py | b40b98f7c65f5cd0c5f34f44e577b23d816ddb01 | #!/usr/bin/python
''' MAUDE pipeline for downloading, joining and loading into elasticsearch
'''
import collections
import csv
import glob
import logging
import os
import re
import sys
from os.path import basename, dirname, join
from urllib.request import urlopen
import arrow
import luigi
from bs4 import BeautifulSoup
from openfda import common, parallel, index_util
from openfda import download_util
from openfda.common import newest_file_timestamp
from openfda.device_harmonization.pipeline import (Harmonized2OpenFDA,
DeviceAnnotateMapper)
from openfda.tasks import AlwaysRunTask, DependencyTriggeredTask
# Exceed default field_size limit, need to set to sys.maxsize
csv.field_size_limit(sys.maxsize)
RUN_DIR = dirname(dirname(os.path.abspath(__file__)))
BASE_DIR = './data/'
RAW_DIR = join(BASE_DIR, 'maude/raw/events')
# See https://github.com/FDA/openfda/issues/27
# Files for resolving device problem codes
DEVICE_PROBLEM_CODES_FILE = join(BASE_DIR, 'maude/extracted/events/deviceproblemcodes.txt')
PATIENT_PROBLEM_CODES_FILE = join(BASE_DIR, 'maude/extracted/events/patientproblemdata.txt')
# Use to ensure a standard naming of level db outputs is achieved across tasks.
DATE_FMT = 'YYYY-MM-DD'
CATEGORIES = ['foidevproblem', 'patientproblemcode', 'mdrfoi', 'patient', 'foidev', 'foitext', 'device']
IGNORE_FILES = ['deviceproblemcodes', 'patientproblemdata', 'add', 'change']
DEVICE_DOWNLOAD_PAGE = ('https://www.fda.gov/medical-devices/'
'mandatory-reporting-requirements-manufacturers-importers-and-device-user-facilities/'
'manufacturer-and-user-facility-device-experience-database-maude')
enum_file = join(RUN_DIR, 'maude/data/enums.csv')
enum_csv = csv.DictReader(open(enum_file))
ENUM = collections.defaultdict(lambda: collections.defaultdict(dict))
for row in enum_csv:
key = row['key_name']
code = row['code']
desc = row['code_desc']
ENUM[key][code] = desc
# patient and text records are missing header rows
FILE_HEADERS = {
'foidevproblem': [
'mdr_report_key',
'problem_code'],
'patientproblemcode': [
'mdr_report_key',
'patient_sequence_number',
'problem_code',
'date_added',
'date_changed'],
'patient': [
'mdr_report_key',
'patient_sequence_number',
'date_received',
'sequence_number_treatment',
'sequence_number_outcome'],
'foitext': [
'mdr_report_key',
'mdr_text_key',
'text_type_code',
'patient_sequence_number',
'date_report',
'text'],
'foidev': [
'mdr_report_key',
'device_event_key',
'implant_flag',
'date_removed_flag',
'device_sequence_number',
'date_received',
'brand_name',
'generic_name',
'manufacturer_d_name',
'manufacturer_d_address_1',
'manufacturer_d_address_2',
'manufacturer_d_city',
'manufacturer_d_state',
'manufacturer_d_zip_code',
'manufacturer_d_zip_code_ext',
'manufacturer_d_country',
'manufacturer_d_postal_code',
'expiration_date_of_device',
'model_number',
'catalog_number',
'lot_number',
'other_id_number',
'device_operator',
'device_availability',
'date_returned_to_manufacturer',
'device_report_product_code',
'device_age_text',
'device_evaluated_by_manufacturer',
'baseline_brand_name',
'baseline_generic_name',
'baseline_model_number',
'baseline_catalog_number',
'baseline_other_id_number',
'baseline_device_family',
'baseline_shelf_life_contained',
'baseline_shelf_life_in_months',
'baseline_pma_flag',
'baseline_pma_number',
'baseline_510_k__flag',
'baseline_510_k__number',
'baseline_preamendment_flag',
'baseline_transitional_flag',
'baseline_510_k__exempt_flag',
'baseline_date_first_marketed',
'baseline_date_ceased_marketing'
],
'device': [
'mdr_report_key',
'device_event_key',
'implant_flag',
'date_removed_flag',
'device_sequence_number',
'date_received',
'brand_name',
'generic_name',
'manufacturer_d_name',
'manufacturer_d_address_1',
'manufacturer_d_address_2',
'manufacturer_d_city',
'manufacturer_d_state',
'manufacturer_d_zip_code',
'manufacturer_d_zip_code_ext',
'manufacturer_d_country',
'manufacturer_d_postal_code',
'device_operator',
'expiration_date_of_device',
'model_number',
'catalog_number',
'lot_number',
'other_id_number',
'device_availability',
'date_returned_to_manufacturer',
'device_report_product_code',
'device_age_text',
'device_evaluated_by_manufacturer',
'combination_product_flag'
],
'mdrfoi': [
'mdr_report_key',
'event_key',
'report_number',
'report_source_code',
'manufacturer_link_flag',
'number_devices_in_event',
'number_patients_in_event',
'date_received',
'adverse_event_flag',
'product_problem_flag',
'date_report',
'date_of_event',
'reprocessed_and_reused_flag',
'reporter_occupation_code',
'health_professional',
'initial_report_to_fda',
'date_facility_aware',
'report_date',
'report_to_fda',
'date_report_to_fda',
'event_location',
'date_report_to_manufacturer',
'manufacturer_contact_t_name',
'manufacturer_contact_f_name',
'manufacturer_contact_l_name',
'manufacturer_contact_address_1',
'manufacturer_contact_address_2',
'manufacturer_contact_city',
'manufacturer_contact_state',
'manufacturer_contact_zip_code',
'manufacturer_contact_zip_ext',
'manufacturer_contact_country',
'manufacturer_contact_postal_code',
'manufacturer_contact_area_code',
'manufacturer_contact_exchange',
'manufacturer_contact_phone_number',
'manufacturer_contact_extension',
'manufacturer_contact_pcountry',
'manufacturer_contact_pcity',
'manufacturer_contact_plocal',
'manufacturer_g1_name',
'manufacturer_g1_address_1',
'manufacturer_g1_address_2',
'manufacturer_g1_city',
'manufacturer_g1_state',
'manufacturer_g1_zip_code',
'manufacturer_g1_zip_code_ext',
'manufacturer_g1_country',
'manufacturer_g1_postal_code',
'date_manufacturer_received',
'device_date_of_manufacturer',
'single_use_flag',
'remedial_action',
'previous_use_code',
'removal_correction_number',
'event_type',
'distributor_name',
'distributor_address_1',
'distributor_address_2',
'distributor_city',
'distributor_state',
'distributor_zip_code',
'distributor_zip_code_ext',
'report_to_manufacturer',
'manufacturer_name',
'manufacturer_address_1',
'manufacturer_address_2',
'manufacturer_city',
'manufacturer_state',
'manufacturer_zip_code',
'manufacturer_zip_code_ext',
'manufacturer_country',
'manufacturer_postal_code',
'type_of_report',
'source_type',
'date_added',
'date_changed',
'reporter_country_code',
'pma_pmn_number',
'exemption_number',
'summary_report_flag'
]
}
DATE_KEYS = [
'date_received',
'baseline_date_first_marketed',
'date_returned_to_manufacturer',
'date_report_to_fda',
'baseline_date_ceased_marketing',
'date_report_to_manufacturer',
'expiration_date_of_device',
'device_date_of_manufacturer',
'date_facility_aware',
'report_date',
'date_report',
'date_manufacturer_received',
'date_of_event',
'date_added',
'date_changed'
]
# split these keys in an array on ';'
SPLIT_KEYS = ['sequence_number_treatment', 'sequence_number_outcome']
# multiple submits are separated by ',' need to split these keys on ','
MULTI_SUBMIT = ['source_type', 'remedial_action', 'type_of_report']
# These keys have malformed integers in them: left-padded with a space and decimal point added.
MALFORMED_KEYS = ['mdr_report_key', 'device_event_key', 'device_sequence_number', 'patient_sequence_number']
def _fix_date(input_date):
''' Converts input dates for known formats to a standard format that is
Elasticsearch friendly.
Returns the input_date if it is not a known format
'''
supported_formats = [
'DD-MMM-YY',
'YYYY/MM/DD HH:mm:ss.SSS',
'MM/DD/YYYY',
'YYYYMMDD',
'YYYY/MM/DD'
]
# arrow needs 3 char months to be sentence case: e.g. Dec not DEC
formated_date = input_date.title()
try:
date = arrow.get(formated_date, supported_formats).format('YYYYMMDD')
return date.format('YYYYMMDD')
except:
if input_date:
logging.info('unparseable date: %s with input %s', input_date, formated_date)
return None
def _split(key, value, sep):
''' Helper function that splits a string into an array, swaps the encoded
values for more descriptive ones and then generates an _exact field
'''
value = value.split(sep)
if key in ENUM:
value = [ENUM[key].get(val, val) for val in value]
return key, value
class DownloadDeviceEvents(luigi.Task):
def requires(self):
return []
def output(self):
return luigi.LocalTarget(RAW_DIR)
def run(self):
zip_urls = []
soup = BeautifulSoup(urlopen(DEVICE_DOWNLOAD_PAGE).read(), "lxml")
for a in soup.find_all(href=re.compile('.*.zip')):
zip_urls.append(a['href'])
if not zip_urls:
logging.fatal('No MAUDE Zip Files Found At %s' % DEVICE_DOWNLOAD_PAGE)
for zip_url in zip_urls:
filename = zip_url.split('/')[-1]
common.download(zip_url, join(self.output().path, filename))
class ExtractAndCleanDownloadsMaude(luigi.Task):
''' Unzip each of the download files and remove all the non-UTF8 characters.
Unzip -p streams the data directly to iconv which then writes to disk.
'''
def requires(self):
return [DownloadDeviceEvents()]
def output(self):
return luigi.LocalTarget(join(BASE_DIR, 'maude/extracted'))
def run(self):
output_dir = self.output().path
common.shell_cmd('mkdir -p %s', output_dir)
for i in range(len(self.input())):
input_dir = self.input()[i].path
download_util.extract_and_clean(input_dir,
'ISO-8859-1//TRANSLIT',
'UTF-8',
'txt')
# This task no longer works properly. Needs refactoring
class PreprocessFilesToFixIssues(AlwaysRunTask):
''' The pipe-separated MAUDE files come with issues: no escaping of special characters.
Many foitext files contain pipe characters that are part of field values and thus are
breaking the layout due to not being escaped properly. In other cases new line characters appear
unescaped and break single lines into multi-line chunks. This task attempts to deal with these
issues via regular expression search & replace.
'''
def requires(self):
return ExtractAndCleanDownloadsMaude()
def output(self):
return luigi.LocalTarget(join(BASE_DIR, 'maude/extracted'))
def _run(self):
for filename in glob.glob(self.input().path + '/*/*foi*.txt') + glob.glob(self.input().path + '/*/device*.txt'):
logging.info('Pre-processing %s', filename)
filtered = filename + '.filtered'
out = open(filtered, 'w')
line_num = 0
bad_lines = 0
with open(filename, 'rU') as fp:
for line in fp:
line = line.strip()
if line_num < 1:
# First line is usually the header
out.write(line)
else:
if len(line.strip()) > 0:
if re.search(r'^\d{2,}(\.\d)?\|', line):
# Properly formatted line. Append it and move on.
out.write('\n'+line)
else:
# Bad line, most likely due to an unescaped carriage return. Tuck it onto the previous line
out.write(' ' + line)
bad_lines += 1
line_num += 1
logging.info('Issues found & fixed: %s', bad_lines)
out.close()
os.remove(filename)
os.rename(filtered, filename)
class CSV2JSONMapper(parallel.Mapper):
def __init__(self, device_problem_codes_ref, patient_problem_codes_ref):
parallel.Mapper.__init__(self)
self.device_problem_codes_ref = device_problem_codes_ref
self.patient_problem_codes_ref = patient_problem_codes_ref
def map_shard(self, map_input, map_output):
self.filename = map_input.filename
return parallel.Mapper.map_shard(self, map_input, map_output)
@staticmethod
def cleaner(k, v):
if k is None:
return None
if k in DATE_KEYS:
new_date = _fix_date(v)
return (k, new_date) if new_date else None
if k in SPLIT_KEYS:
return _split(k, v, ';')
if k in MULTI_SUBMIT:
return _split(k, v, ',')
# The DEVICE files have mdr_report_key padded with a space and in decimal format with a ".0" at the end.
if k in MALFORMED_KEYS:
v = v.strip().replace('.0', '')
if k in ENUM:
if v in ENUM[k]:
if isinstance(v, list):
v = [ENUM[k].get(val, val) for val in v]
else:
v = ENUM[k][v]
return (k, v)
# We are seeing a large number of foitext rows not following the column definition and thus
# getting rejected by the reducer. The root cause is the fact that the last column (FOI_TEXT) contains
# text that includes one or more "pipe" | characters that have not been properly escaped
# in the file and thus are throwing column count off.
# We are dealing with that by merely concatenating the extra text columns into a single
# string and stripping out the bogus columns at the end.
def handle_oversized_foitext(self, value):
no_columns = len(FILE_HEADERS['foitext'])
combined_text = '|'.join([t for t in value[no_columns - 1:]])
value[no_columns - 1] = combined_text[:-1] if combined_text.endswith("|") else combined_text
return value[0:no_columns]
def map(self, key, value, output):
if len(value) < 1:
return
mdr_key = self.cleaner('mdr_report_key', value[0])[1]
# Some of the files have headers, we will apply our own, so row that starts
# with this value is safe to skip
if 'MDR_REPORT_KEY' in mdr_key:
return
file_type = [s for s in CATEGORIES if s in self.filename][0]
if file_type == 'foitext' and len(value) > len(FILE_HEADERS[file_type]):
value = self.handle_oversized_foitext(value)
# We send all data anomalies to a reducer for each file type.
# These non-conforming data are written to a reject file for review.
# This file type as variable lengths over time, so it needs its own check
if file_type == 'foidev':
if len(value) not in [28, 45]:
logging.info('Does not conform to foidev structure. Skipping: %s, %s',
mdr_key, '#' * 5)
output.add(file_type, '%s: missing fields' % mdr_key + ':' + '|'.join(value))
return
elif file_type == 'mdrfoi':
if len(value) not in [77, 81]:
logging.info('Does not conform to mdrfoi structure. Skipping: %s, %s',
mdr_key, '#' * 5)
output.add(file_type, '%s: missing fields' % mdr_key + ':' + '|'.join(value))
return
elif len(value) != len(FILE_HEADERS[file_type]):
logging.info('Does not conform to %s structure. Skipping: %s, %s',
file_type, mdr_key, '#' * 5)
output.add(file_type, '%s: missing fields' % mdr_key + ':' + '|'.join(value))
return
if not isinstance(int(mdr_key), int):
logging.info('%s is not a number', mdr_key)
output.add(file_type, '%s: NaN' % mdr_key + ':' + '|'.join(value))
return
# If it makes it this far, it is a good record
new_value = dict(list(zip(FILE_HEADERS[file_type], value)))
new_value = common.transform_dict(new_value, self.cleaner)
# https://github.com/FDA/openfda/issues/27
# We need to see if device problem code is available for this report in the
# foidevproblem.txt file, resolve it to a problem description, and add it to the
# master record.
if file_type == 'foidevproblem':
product_problem = self.device_problem_codes_ref.get(new_value['problem_code'])
new_value['product_problem'] = product_problem
# Same applies to patient problem codes.
if file_type == 'patientproblemcode':
patient_problem = self.patient_problem_codes_ref.get(new_value['problem_code'])
new_value['patient_problem'] = patient_problem
output.add(mdr_key, (file_type, new_value))
class CSV2JSONJoinReducer(parallel.Reducer):
# File type to nested key name mapping
join_map = {
'device': 'device',
'foidev': 'device',
'foitext': 'mdr_text',
'patient': 'patient'
}
def _join(self, key, values):
val = parallel.pivot_values(values)
final = {
'device': [],
'mdr_text': [],
'patient': []
}
if not val.get('mdrfoi', []):
# logging.info('MDR REPORT %s: Missing mdrfoi record, Skipping join', key)
return
for i, main_report in enumerate(val.get('mdrfoi', [])):
final.update(main_report)
try:
int(final.get('mdr_report_key', None))
except TypeError:
logging.info('%s', '*' * 2400)
return
for source_file, target_key in self.join_map.items():
for row in val.get(source_file, []):
row.pop('mdr_report_key', 0) # No need to keep join key on nested data
final[target_key].append(row)
# Now tuck the device and patient problem codes onto the final record
if val.get('foidevproblem', []):
final['product_problems'] = list(map(lambda x: x['product_problem'], val['foidevproblem']))
for patient in final['patient']:
for patient_problem in val.get('patientproblemcode', []):
if patient['patient_sequence_number'] == patient_problem['patient_sequence_number']:
patient['patient_problems'] = [patient_problem['patient_problem']] if patient.get(
'patient_problems') is None else patient['patient_problems'] + [patient_problem['patient_problem']]
return final
def reduce(self, key, values, output):
# Write out the rejected records
if key in CATEGORIES:
with open(join(BASE_DIR, 'maude', key + '-rejects.txt'), 'a') as rejects:
for row in values:
rejects.write(row + '\n')
else:
output.put(key, self._join(key, values))
class CSV2JSON(luigi.Task):
''' Task that loads different CSV files, depending upon what the value of
`loader_task`.
`init`: process all files except those listed in IGNORE_FILES.
`add`: process all files with the word `add` in the filename.
`changes`: process all files with the word `change` in the filename.
'''
run_date = luigi.Parameter()
loader_task = luigi.Parameter()
def requires(self):
return PreprocessFilesToFixIssues()
def output(self):
file_name = '-'.join([self.loader_task, self.run_date, 'json.db'])
return luigi.LocalTarget(join(BASE_DIR, 'maude', file_name))
def run(self):
files = glob.glob(self.input().path + '/*/*.txt')
device_problems = glob.glob(self.input().path + '/*/foidevproblem*.txt')
patient_problems = glob.glob(self.input().path + '/*/patientproblemcode*.txt')
if self.loader_task == 'init':
input_files = [f for f in files if not any(i for i in IGNORE_FILES if i in f)]
else:
input_files = [f for f in files if self.loader_task in f] + device_problems + patient_problems
# Load and cache device problem codes.
device_problem_codes_ref = {}
reader = csv.reader(open(DEVICE_PROBLEM_CODES_FILE), quoting=csv.QUOTE_NONE, delimiter='|')
for idx, line in enumerate(reader):
if len(line) > 1:
device_problem_codes_ref[line[0]] = line[1].strip()
# Load and cache patient problem codes.
patient_problem_codes_ref = {}
reader = csv.reader(open(PATIENT_PROBLEM_CODES_FILE), quoting=csv.QUOTE_NONE, delimiter='|')
for idx, line in enumerate(reader):
if len(line) > 1:
patient_problem_codes_ref[line[0]] = line[1].strip()
parallel.mapreduce(
parallel.Collection.from_glob(
input_files, parallel.CSVSplitLineInput(quoting=csv.QUOTE_NONE, delimiter='|')),
mapper=CSV2JSONMapper(device_problem_codes_ref=device_problem_codes_ref,
patient_problem_codes_ref=patient_problem_codes_ref
),
reducer=CSV2JSONJoinReducer(),
output_prefix=self.output().path
)
class MergeUpdatesMapper(parallel.Mapper):
def map_shard(self, map_input, map_output):
self.filename = map_input.filename
self.table = basename(dirname(dirname(self.filename)))
return parallel.Mapper.map_shard(self, map_input, map_output)
def map(self, key, value, output):
source_type = None
if 'init' in self.filename:
source_type = 'init'
if 'add' in self.filename:
source_type = 'add'
if 'change' in self.filename:
source_type = 'change'
assert source_type, 'Unable to continue for source type %s' % self.filename
output.add(key, (source_type, value))
class MergeUpdatesReducer(parallel.Reducer):
''' This step resolves conflicting data for the same key, which is the result
of merging the init, add and change pipeline outputs.
Reducer that takes in an array of tuples:
[(source, value), (source, value), ...]
One and only one is selected by the reducer.
'''
def reduce(self, key, values, output):
def _safe_get(value):
if isinstance(value, list):
if len(value) > 0:
value = value[0]
else:
return None
return value
# If there is only one value, then we use it. If there are many then
# then choose the right one in the order: changed, added, or existing.
# Remember, we are merging the additions and updates with last weeks run,
# which is where the existing come from. All of this is due to the fact
# that a record can exist in all three places, which is not ideal but is
# reality.
if len(values) == 1:
value = _safe_get(values[0][1])
if value:
output.put(key, value)
elif len(values) > 1:
pivoted = parallel.pivot_values(values)
change = _safe_get(pivoted.get('change', []))
add = _safe_get(pivoted.get('add', []))
init = _safe_get(pivoted.get('init', []))
if change:
output.put(key, change)
elif add:
output.put(key, add)
else:
output.put(key, init)
class MergeUpdates(luigi.Task):
''' Task that takes all three loader_task (init, add, and change), streams
them into a reducer and picks one to write to the weekly output.
Please note that the `init` process attempts to use last weeks init file
as input. If it does not exist, it will make it first.
'''
run_date = luigi.Parameter()
def requires(self):
previous_run_date = arrow.get(self.run_date).shift(weeks=-1).format(DATE_FMT)
return [
CSV2JSON(loader_task='init', run_date=previous_run_date),
CSV2JSON(loader_task='add', run_date=self.run_date),
CSV2JSON(loader_task='change', run_date=self.run_date)
]
def output(self):
file_name = '-'.join(['init', self.run_date, 'json.db'])
return luigi.LocalTarget(join(BASE_DIR, 'maude', file_name))
def run(self):
db_list = [s.path for s in self.input()]
parallel.mapreduce(
parallel.Collection.from_sharded_list(db_list),
mapper=MergeUpdatesMapper(),
reducer=MergeUpdatesReducer(),
output_prefix=self.output().path)
class MaudeAnnotationMapper(DeviceAnnotateMapper):
def filter(self, data, lookup=None):
product_code = data['device_report_product_code']
harmonized = self.harmonized_db.get(product_code, None)
if harmonized:
# Taking a very conservative approach to annotation to start. Only
# including the classification data and a matching registration.
if '510k' in harmonized:
del harmonized['510k']
if 'device_pma' in harmonized:
del harmonized['device_pma']
registration = list(harmonized['registration'])
new_reg = [d for d in registration if d.get('registration_number') == lookup]
harmonized['registration'] = new_reg
return harmonized
return None
def harmonize(self, data):
result = dict(data)
report_number = data['report_number']
if not report_number:
return result
registration_number = report_number.split('-')[0]
if not registration_number:
return result
devices = []
for row in result.get('device', []):
d = dict(row)
harmonized = self.filter(row, lookup=registration_number)
if harmonized:
d['openfda'] = self.flatten(harmonized)
else:
d['openfda'] = {}
devices.append(d)
result['device'] = devices
return result
class AnnotateReport(DependencyTriggeredTask):
run_date = luigi.Parameter()
def requires(self):
return [Harmonized2OpenFDA(), MergeUpdates(run_date=self.run_date)]
def output(self):
return luigi.LocalTarget(join(BASE_DIR, 'maude', 'annotate.db'))
def run(self):
harmonized_db = parallel.ShardedDB.open(self.input()[0].path).as_dict()
db_list = [s.path for s in self.input()[1:]]
parallel.mapreduce(
parallel.Collection.from_sharded_list(db_list),
mapper=MaudeAnnotationMapper(harmonized_db=harmonized_db),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class LoadJSONByRunDate(index_util.LoadJSONBase):
run_date = luigi.Parameter()
index_name = 'deviceevent'
type_name = 'maude'
mapping_file = 'schemas/maude_mapping.json'
optimize_index = False
docid_key='mdr_report_key'
use_checksum = True
last_update_date = lambda _: newest_file_timestamp(RAW_DIR)
def _data(self):
return AnnotateReport(run_date=self.run_date)
class LoadJSON(luigi.WrapperTask):
run_date = arrow.utcnow().ceil('weeks').format(DATE_FMT)
def requires(self):
return LoadJSONByRunDate(run_date=self.run_date)
if __name__ == '__main__':
luigi.run()
|
py | b40b999990d2dea7edceb97cabd1a52c11119b6b | # -*- coding: utf-8 -*-
'''
The networking module for Windows based systems
'''
# Import python libs
import logging
import socket
import time
# Import salt libs
import salt.utils
import salt.utils.network
import salt.utils.validate.net
from salt.exceptions import (
CommandExecutionError,
SaltInvocationError
)
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'ip'
def __virtual__():
'''
Confine this module to Windows systems
'''
if salt.utils.is_windows():
return __virtualname__
return False
def _interface_configs():
'''
Return all interface configs
'''
cmd = 'netsh interface ip show config'
lines = __salt__['cmd.run'](cmd).splitlines()
iface = ''
ip = 0
dns_flag = None
wins_flag = None
ret = {}
for line in lines:
if dns_flag:
try:
socket.inet_aton(line.strip())
ret[iface][dns_flag].append(line.strip())
dns_flag = None
continue
except socket.error as exc:
dns_flag = None
if wins_flag:
try:
socket.inet_aton(line.strip())
ret[iface][wins_flag].append(line.strip())
wins_flag = None
continue
except socket.error as exc:
wins_flag = None
if not line:
iface = ''
continue
if 'Configuration for interface' in line:
_, iface = line.rstrip('"').split('"', 1) # get iface name
ret[iface] = {}
ip = 0
continue
try:
key, val = line.split(':', 1)
except ValueError as exc:
log.debug('Could not split line. Error was {0}.'.format(exc))
continue
if 'DNS Servers' in line:
dns_flag = key.strip()
ret[iface][key.strip()] = [val.strip()]
continue
if 'WINS Servers' in line:
wins_flag = key.strip()
ret[iface][key.strip()] = [val.strip()]
continue
if 'IP Address' in key:
if 'ip_addrs' not in ret[iface]:
ret[iface]['ip_addrs'] = []
ret[iface]['ip_addrs'].append(dict([(key.strip(), val.strip())]))
continue
if 'Subnet Prefix' in key:
subnet, _, netmask = val.strip().split(' ', 2)
ret[iface]['ip_addrs'][ip]['Subnet'] = subnet.strip()
ret[iface]['ip_addrs'][ip]['Netmask'] = netmask.lstrip().rstrip(')')
ip = ip + 1
continue
else:
ret[iface][key.strip()] = val.strip()
return ret
def raw_interface_configs():
'''
Return raw configs for all interfaces
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.raw_interface_configs
'''
cmd = 'netsh interface ip show config'
return __salt__['cmd.run'](cmd)
def get_all_interfaces():
'''
Return configs for all interfaces
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.get_all_interfaces
'''
return _interface_configs()
def get_interface(iface):
'''
Return the configuration of a network interface
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.get_interface 'Local Area Connection'
'''
return _interface_configs().get(iface, {})
def is_enabled(iface):
'''
Returns ``True`` if interface is enabled, otherwise ``False``
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.is_enabled 'Local Area Connection #2'
'''
cmd = 'netsh interface show interface name="{0}"'.format(iface)
iface_found = False
for line in __salt__['cmd.run'](cmd).splitlines():
if 'Connect state:' in line:
iface_found = True
return line.split()[-1] == 'Connected'
if not iface_found:
raise CommandExecutionError('Interface {0!r} not found')
return False
def is_disabled(iface):
'''
Returns ``True`` if interface is disabled, otherwise ``False``
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.is_disabled 'Local Area Connection #2'
'''
return not is_enabled(iface)
def enable(iface):
'''
Enable an interface
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.enable 'Local Area Connection #2'
'''
if is_enabled(iface):
return True
__salt__['cmd.run'](
'netsh interface set interface "{0}" admin=ENABLED'.format(iface)
)
return is_enabled(iface)
def disable(iface):
'''
Disable an interface
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.disable 'Local Area Connection #2'
'''
if is_disabled(iface):
return True
__salt__['cmd.run'](
'netsh interface set interface "{0}" admin=DISABLED'.format(iface)
)
return is_disabled(iface)
def get_subnet_length(mask):
'''
Convenience function to convert the netmask to the CIDR subnet length
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.get_subnet_length 255.255.255.0
'''
if not salt.utils.validate.net.netmask(mask):
raise SaltInvocationError('{0!r} is not a valid netmask'.format(mask))
return salt.utils.network.get_net_size(mask)
def set_static_ip(iface, addr, gateway=None, append=False):
'''
Set static IP configuration on a Windows NIC
iface
The name of the interface to manage
addr
IP address with subnet length (ex. ``10.1.2.3/24``). The
:mod:`ip.get_subnet_length <salt.modules.win_ip.get_subnet_length>`
function can be used to calculate the subnet length from a netmask.
gateway : None
If specified, the default gateway will be set to this value.
append : False
If ``True``, this IP address will be added to the interface. Default is
``False``, which overrides any existing configuration for the interface
and sets ``addr`` as the only address on the interface.
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.set_static_ip 'Local Area Connection' 10.1.2.3/24 gateway=10.1.2.1
salt -G 'os_family:Windows' ip.set_static_ip 'Local Area Connection' 10.1.2.4/24 append=True
'''
def _find_addr(iface, addr, timeout=1):
ip, cidr = addr.rsplit('/', 1)
netmask = salt.utils.network.cidr_to_ipv4_netmask(cidr)
for idx in xrange(timeout):
for addrinfo in get_interface(iface).get('ip_addrs', []):
if addrinfo['IP Address'] == ip \
and addrinfo['Netmask'] == netmask:
return addrinfo
time.sleep(1)
return {}
if not salt.utils.validate.net.ipv4_addr(addr):
raise SaltInvocationError('Invalid address {0!r}'.format(addr))
if gateway and not salt.utils.validate.net.ipv4_addr(addr):
raise SaltInvocationError(
'Invalid default gateway {0!r}'.format(gateway)
)
if '/' not in addr:
addr += '/32'
if append and _find_addr(iface, addr):
raise CommandExecutionError(
'Address {0!r} already exists on interface '
'{1!r}'.format(addr, iface)
)
# Do not use raw string formatting (ex. {1!r}) for interface name, as the
# windows command shell does not like single quotes.
cmd = (
'netsh interface ip {0} address name="{1}" {2} '
'address={3}{4}'.format(
'add' if append else 'set',
iface,
'' if append else 'source=static',
addr,
' gateway={0}'.format(gateway) if gateway else '',
)
)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(
'Unable to set IP address: {0}'.format(result['stderr'])
)
new_addr = _find_addr(iface, addr, timeout=10)
if not new_addr:
return {}
ret = {'Address Info': new_addr}
if gateway:
ret['Default Gateway'] = gateway
return ret
def set_dhcp_ip(iface):
'''
Set Windows NIC to get IP from DHCP
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.set_dhcp_ip 'Local Area Connection'
'''
cmd = 'netsh interface ip set address "{0}" dhcp'.format(iface)
__salt__['cmd.run'](cmd)
return {'Interface': iface, 'DHCP enabled': 'Yes'}
def set_static_dns(iface, *addrs):
'''
Set static DNS configuration on a Windows NIC
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.set_static_dns 'Local Area Connection' '192.168.1.1'
salt -G 'os_family:Windows' ip.set_static_dns 'Local Area Connection' '192.168.1.252' '192.168.1.253'
'''
addr_index = 1
for addr in addrs:
if addr_index == 1:
cmd = 'netsh int ip set dns "{0}" static {1} primary'.format(
iface,
addrs[0],
)
__salt__['cmd.run'](cmd)
addr_index = addr_index + 1
else:
cmd = 'netsh interface ip add dns name="{0}" addr="{1}" index={2}'
__salt__['cmd.run'](cmd.format(iface, addr, addr_index))
addr_index = addr_index + 1
return {'Interface': iface, 'DNS Server': addrs}
def set_dhcp_dns(iface):
'''
Set DNS source to DHCP on Windows
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.set_dhcp_dns 'Local Area Connection'
'''
cmd = 'netsh interface ip set dns "{0}" dhcp'.format(iface)
__salt__['cmd.run'](cmd)
return {'Interface': iface, 'DNS Server': 'DHCP'}
def set_dhcp_all(iface):
'''
Set both IP Address and DNS to DHCP
CLI Example:
..code-block:: bash
salt -G 'os_family:Windows' ip.set_dhcp_all 'Local Area Connection'
'''
set_dhcp_ip(iface)
set_dhcp_dns(iface)
return {'Interface': iface, 'DNS Server': 'DHCP', 'DHCP enabled': 'Yes'}
def get_default_gateway():
'''
Set DNS source to DHCP on Windows
CLI Example:
.. code-block:: bash
salt -G 'os_family:Windows' ip.get_default_gateway
'''
try:
return next(iter(
x.split()[-1] for x in __salt__['cmd.run'](
'netsh interface ip show config'
).splitlines()
if 'Default Gateway:' in x
))
except StopIteration:
raise CommandExecutionError('Unable to find default gateway')
|
py | b40b9bb1b9ec36dfa5929d1ebcd6d5522abc8b3b | """
Cosmology calculator.
Cosmology calculator based originally on http://www.kempner.net/cosmic.php
and featuring time and redshift conversion functions from Enzo..
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2014, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import functools
import numpy as np
from yt.units import dimensions
from yt.units.unit_registry import \
UnitRegistry
from yt.units.yt_array import \
YTArray, \
YTQuantity
from yt.utilities.physical_constants import \
gravitational_constant_cgs as G, \
speed_of_light_cgs
class Cosmology(object):
r"""
Create a cosmology calculator to compute cosmological distances and times.
For an explanation of the various cosmological measures, see, for example
Hogg (1999, http://xxx.lanl.gov/abs/astro-ph/9905116).
Parameters
----------
hubble_constant : float
The Hubble parameter at redshift zero in units of 100 km/s/Mpc.
Default: 0.71.
omega_matter : the fraction of the energy density of the Universe in
matter at redshift zero.
Default: 0.27.
omega_lambda : the fraction of the energy density of the Universe in
a cosmological constant.
Default: 0.73.
omega_curvature : the fraction of the energy density of the Universe in
curvature.
Default: 0.0.
Examples
--------
>>> from yt.utilities.cosmology import Cosmology
>>> co = Cosmology()
>>> print co.hubble_time(0.0).in_units("Gyr")
"""
def __init__(self, hubble_constant = 0.71,
omega_matter = 0.27,
omega_lambda = 0.73,
omega_curvature = 0.0,
unit_registry = None):
self.omega_matter = omega_matter
self.omega_lambda = omega_lambda
self.omega_curvature = omega_curvature
if unit_registry is None:
unit_registry = UnitRegistry()
unit_registry.modify("h", hubble_constant)
for my_unit in ["m", "pc", "AU", "au"]:
new_unit = "%scm" % my_unit
# technically not true, but distances here are actually comoving
unit_registry.add(new_unit, unit_registry.lut[my_unit][0],
dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
self.unit_registry = unit_registry
self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
def hubble_distance(self):
r"""
The distance corresponding to c / h, where c is the speed of light
and h is the Hubble parameter in units of 1 / time.
"""
return self.quan((speed_of_light_cgs / self.hubble_constant)).in_cgs()
def comoving_radial_distance(self, z_i, z_f):
r"""
The comoving distance along the line of sight to on object at redshift,
z_f, viewed at a redshift, z_i.
Parameters
----------
z_i : float
The redshift of the observer.
z_f : float
The redshift of the observed object.
Examples
--------
>>> co = Cosmology()
>>> print co.comoving_radial_distance(0., 1.).in_units("Mpccm")
"""
return (self.hubble_distance() *
trapzint(self.inverse_expansion_factor, z_i, z_f)).in_cgs()
def comoving_transverse_distance(self, z_i, z_f):
r"""
When multiplied by some angle, the distance between two objects
observed at redshift, z_f, with an angular separation given by that
angle, viewed by an observer at redshift, z_i (Hogg 1999).
Parameters
----------
z_i : float
The redshift of the observer.
z_f : float
The redshift of the observed object.
Examples
--------
>>> co = Cosmology()
>>> print co.comoving_transverse_distance(0., 1.).in_units("Mpccm")
"""
if (self.omega_curvature > 0):
return (self.hubble_distance() / np.sqrt(self.omega_curvature) *
np.sinh(np.sqrt(self.omega_curvature) *
self.comoving_radial_distance(z_i, z_f) /
self.hubble_distance())).in_cgs()
elif (self.omega_curvature < 0):
return (self.hubble_distance() /
np.sqrt(np.fabs(self.omega_curvature)) *
np.sin(np.sqrt(np.fabs(self.omega_curvature)) *
self.comoving_radial_distance(z_i, z_f) /
self.hubble_distance())).in_cgs()
else:
return self.comoving_radial_distance(z_i, z_f)
def comoving_volume(self, z_i, z_f):
r"""
"The comoving volume is the volume measure in which number densities
of non-evolving objects locked into Hubble flow are constant with
redshift." -- Hogg (1999)
Parameters
----------
z_i : float
The lower redshift of the interval.
z_f : float
The higher redshift of the interval.
Examples
--------
>>> co = Cosmology()
>>> print co.comoving_volume(0., 1.).in_units("Gpccm**3")
"""
if (self.omega_curvature > 0):
return (2 * np.pi * np.power(self.hubble_distance(), 3) /
self.omega_curvature *
(self.comoving_transverse_distance(z_i, z_f) /
self.hubble_distance() *
np.sqrt(1 + self.omega_curvature *
sqr(self.comoving_transverse_distance(z_i, z_f) /
self.hubble_distance())) -
np.sinh(np.fabs(self.omega_curvature) *
self.comoving_transverse_distance(z_i, z_f) /
self.hubble_distance()) /
np.sqrt(self.omega_curvature))).in_cgs()
elif (self.omega_curvature < 0):
return (2 * np.pi * np.power(self.hubble_distance(), 3) /
np.fabs(self.omega_curvature) *
(self.comoving_transverse_distance(z_i, z_f) /
self.hubble_distance() *
np.sqrt(1 + self.omega_curvature *
sqr(self.comoving_transverse_distance(z_i, z_f) /
self.hubble_distance())) -
np.arcsin(np.fabs(self.omega_curvature) *
self.comoving_transverse_distance(z_i, z_f) /
self.hubble_distance()) /
np.sqrt(np.fabs(self.omega_curvature)))).in_cgs()
else:
return (4 * np.pi *
np.power(self.comoving_transverse_distance(z_i, z_f), 3) /\
3).in_cgs()
def angular_diameter_distance(self, z_i, z_f):
r"""
Following Hogg (1999), the angular diameter distance is 'the ratio of
an object's physical transverse size to its angular size in radians.'
Parameters
----------
z_i : float
The redshift of the observer.
z_f : float
The redshift of the observed object.
Examples
--------
>>> co = Cosmology()
>>> print co.angular_diameter_distance(0., 1.).in_units("Mpc")
"""
return (self.comoving_transverse_distance(0, z_f) / (1 + z_f) -
self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_cgs()
def angular_scale(self, z_i, z_f):
r"""
The proper transverse distance between two points at redshift z_f
observed at redshift z_i per unit of angular separation.
Parameters
----------
z_i : float
The redshift of the observer.
z_f : float
The redshift of the observed object.
Examples
--------
>>> co = Cosmology()
>>> print co.angular_scale(0., 1.).in_units("kpc / arcsec")
"""
return self.angular_diameter_distance(z_i, z_f) / \
self.quan(1, "radian")
def luminosity_distance(self, z_i, z_f):
r"""
The distance that would be inferred from the inverse-square law of
light and the measured flux and luminosity of the observed object.
Parameters
----------
z_i : float
The redshift of the observer.
z_f : float
The redshift of the observed object.
Examples
--------
>>> co = Cosmology()
>>> print co.luminosity_distance(0., 1.).in_units("Mpc")
"""
return (self.comoving_transverse_distance(0, z_f) * (1 + z_f) -
self.comoving_transverse_distance(0, z_i) * (1 + z_i)).in_cgs()
def lookback_time(self, z_i, z_f):
r"""
The difference in the age of the Universe between the redshift interval
z_i to z_f.
Parameters
----------
z_i : float
The lower redshift of the interval.
z_f : float
The higher redshift of the interval.
Examples
--------
>>> co = Cosmology()
>>> print co.lookback_time(0., 1.).in_units("Gyr")
"""
return (trapzint(self.age_integrand, z_i, z_f) / \
self.hubble_constant).in_cgs()
def hubble_time(self, z, z_inf=1e6):
r"""
The age of the Universe at a given redshift.
Parameters
----------
z : float
Redshift.
z_inf : float
The upper bound of the integral of the age integrand.
Default: 1e6.
Examples
--------
>>> co = Cosmology()
>>> print co.hubble_time(0.).in_units("Gyr")
See Also
--------
t_from_z
"""
return (trapzint(self.age_integrand, z, z_inf) /
self.hubble_constant).in_cgs()
def critical_density(self, z):
r"""
The density required for closure of the Universe at a given
redshift in the proper frame.
Parameters
----------
z : float
Redshift.
Examples
--------
>>> co = Cosmology()
>>> print co.critical_density(0.).in_units("g/cm**3")
>>> print co.critical_density(0).in_units("Msun/Mpc**3")
"""
return (3.0 / 8.0 / np.pi *
self.hubble_constant**2 / G *
((1 + z)**3.0 * self.omega_matter +
self.omega_lambda)).in_cgs()
def hubble_parameter(self, z):
r"""
The value of the Hubble parameter at a given redshift.
Parameters
----------
z: float
Redshift.
Examples
--------
>>> co = Cosmology()
>>> print co.hubble_parameter(1.0).in_units("km/s/Mpc")
"""
return self.hubble_constant * self.expansion_factor(z)
def age_integrand(self, z):
return (1 / (z + 1) / self.expansion_factor(z))
def expansion_factor(self, z):
r"""
The ratio between the Hubble parameter at a given redshift and
redshift zero.
This is also the primary function integrated to calculate the
cosmological distances.
"""
return np.sqrt(self.omega_matter * ((1 + z)**3.0) +
self.omega_curvature * ((1 + z)**2.0) +
self.omega_lambda)
def inverse_expansion_factor(self, z):
return 1 / self.expansion_factor(z)
def path_length_function(self, z):
return ((1 + z)**2) * self.inverse_expansion_factor(z)
def path_length(self, z_i, z_f):
return trapzint(self.path_length_function, z_i, z_f)
def z_from_t(self, my_time):
"""
Compute the redshift from time after the big bang. This is based on
Enzo's CosmologyComputeExpansionFactor.C, but altered to use physical
units.
Parameters
----------
my_time : float
Age of the Universe in seconds.
Examples
--------
>>> co = Cosmology()
>>> print co.z_from_t(4.e17)
"""
omega_curvature = 1.0 - self.omega_matter - self.omega_lambda
OMEGA_TOLERANCE = 1e-5
ETA_TOLERANCE = 1.0e-10
# Convert the time to Time * H0.
if not isinstance(my_time, YTArray):
my_time = self.quan(my_time, "s")
t0 = (my_time.in_units("s") *
self.hubble_constant.in_units("1/s")).to_ndarray()
# 1) For a flat universe with omega_matter = 1, it's easy.
if ((np.fabs(self.omega_matter-1) < OMEGA_TOLERANCE) and
(self.omega_lambda < OMEGA_TOLERANCE)):
a = np.power(my_time/self.initial_time, 2.0/3.0)
# 2) For omega_matter < 1 and omega_lambda == 0 see
# Peebles 1993, eq. 13-3, 13-10.
# Actually, this is a little tricky since we must solve an equation
# of the form eta - np.sinh(eta) + x = 0..
if ((self.omega_matter < 1) and
(self.omega_lambda < OMEGA_TOLERANCE)):
x = 2*t0*np.power(1.0 - self.omega_matter, 1.5) / \
self.omega_matter;
# Compute eta in a three step process, first from a third-order
# Taylor expansion of the formula above, then use that in a fifth-order
# approximation. Then finally, iterate on the formula itself, solving for
# eta. This works well because parts 1 & 2 are an excellent approximation
# when x is small and part 3 converges quickly when x is large.
eta = np.power(6*x, 1.0/3.0) # part 1
eta = np.power(120*x/(20+eta*eta), 1.0/3.0) # part 2
for i in range(40): # part 3
eta_old = eta
eta = np.arcsinh(eta + x)
if (np.fabs(eta-eta_old) < ETA_TOLERANCE):
break
if (i == 39):
print("No convergence after %d iterations." % i)
# Now use eta to compute the expansion factor (eq. 13-10, part 2).
a = self.omega_matter/(2.0*(1.0 - self.omega_matter))*\
(np.cosh(eta) - 1.0)
# 3) For omega_matter > 1 and omega_lambda == 0, use sin/cos.
# Easy, but skip it for now.
if ((self.omega_matter > 1) and
(self.omega_lambda < OMEGA_TOLERANCE)):
print("Never implemented in Enzo, not implemented here.")
return 0
# 4) For flat universe, with non-zero omega_lambda, see eq. 13-20.
if ((np.fabs(omega_curvature) < OMEGA_TOLERANCE) and
(self.omega_lambda > OMEGA_TOLERANCE)):
a = np.power(self.omega_matter /
(1 - self.omega_matter), 1.0/3.0) * \
np.power(np.sinh(1.5 * np.sqrt(1.0 - self.omega_matter)*\
t0), 2.0/3.0)
redshift = (1.0/a) - 1.0
return redshift
def t_from_z(self, z):
"""
Compute the age of the Universe from redshift. This is based on Enzo's
CosmologyComputeTimeFromRedshift.C, but altered to use physical units.
Similar to hubble_time, but using an analytical function.
Parameters
----------
z : float
Redshift.
Examples
--------
>>> co = Cosmology()
>>> print co.t_from_z(0.).in_units("Gyr")
See Also
--------
hubble_time
"""
omega_curvature = 1.0 - self.omega_matter - self.omega_lambda
# 1) For a flat universe with omega_matter = 1, things are easy.
if ((self.omega_matter == 1.0) and (self.omega_lambda == 0.0)):
t0 = 2.0/3.0/np.power(1+z, 1.5)
# 2) For omega_matter < 1 and omega_lambda == 0 see
# Peebles 1993, eq. 13-3, 13-10.
if ((self.omega_matter < 1) and (self.omega_lambda == 0)):
eta = np.arccosh(1 +
2*(1-self.omega_matter)/self.omega_matter/(1+z))
t0 = self.omega_matter/ \
(2*np.power(1.0-self.omega_matter, 1.5))*\
(np.sinh(eta) - eta)
# 3) For omega_matter > 1 and omega_lambda == 0, use sin/cos.
if ((self.omega_matter > 1) and (self.omega_lambda == 0)):
eta = np.arccos(1 - 2*(1-self.omega_matter)/self.omega_matter/(1+z))
t0 = self.omega_matter/(2*np.power(1.0-self.omega_matter, 1.5))*\
(eta - np.sin(eta))
# 4) For flat universe, with non-zero omega_lambda, see eq. 13-20.
if ((np.fabs(omega_curvature) < 1.0e-3) and (self.omega_lambda != 0)):
t0 = 2.0/3.0/np.sqrt(1-self.omega_matter)*\
np.arcsinh(np.sqrt((1-self.omega_matter)/self.omega_matter)/ \
np.power(1+z, 1.5))
# Now convert from Time * H0 to time.
my_time = t0 / self.hubble_constant
return my_time.in_cgs()
_arr = None
@property
def arr(self):
if self._arr is not None:
return self._arr
self._arr = functools.partial(YTArray, registry = self.unit_registry)
return self._arr
_quan = None
@property
def quan(self):
if self._quan is not None:
return self._quan
self._quan = functools.partial(YTQuantity,
registry = self.unit_registry)
return self._quan
def trapzint(f, a, b, bins=10000):
zbins = np.logspace(np.log10(a + 1), np.log10(b + 1), bins) - 1
return np.trapz(f(zbins[:-1]), x=zbins[:-1], dx=np.diff(zbins))
|
py | b40b9bd9005e32e724a21bd083e3b5add6c81407 | #
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test FHIR STU3 parsing/printing functionality."""
import os
from typing import TypeVar, Type
from google.protobuf import message
from absl.testing import absltest
from absl.testing import parameterized
from proto.google.fhir.proto.stu3 import datatypes_pb2
from proto.google.fhir.proto.stu3 import resources_pb2
from google.fhir.json_format import json_format_test
from google.fhir.stu3 import json_format
from google.fhir.testing import testdata_utils
from google.fhir.utils import proto_utils
_BIGQUERY_PATH = os.path.join('testdata', 'stu3', 'bigquery')
_EXAMPLES_PATH = os.path.join('testdata', 'stu3', 'examples')
_FHIR_SPEC_PATH = os.path.join('spec', 'hl7.fhir.core', '3.0.1', 'package')
_VALIDATION_PATH = os.path.join('testdata', 'stu3', 'validation')
_T = TypeVar('_T', bound=message.Message)
class JsonFormatTest(json_format_test.JsonFormatTest):
"""Unit tests for functionality in json_format.py."""
@parameterized.named_parameters(
('_withAccountExample', 'Account-example'),
('_withAccountEwg', 'Account-ewg'),
)
def testJsonFormat_forValidAccount_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Account)
@parameterized.named_parameters(
('_withActivityDefinitionReferralPrimaryCareMentalHealth',
'ActivityDefinition-referralPrimaryCareMentalHealth'),
('_withActivityDefinitionCitalopramPrescription',
'ActivityDefinition-citalopramPrescription'),
('_withActivityDefinitionReferralPrimaryCareMentalHealthInitial',
'ActivityDefinition-referralPrimaryCareMentalHealth-initial'),
('_withActivityDefinitionHeartValveReplacement',
'ActivityDefinition-heart-valve-replacement'),
('_withActivityDefinitionBloodTubesSupply',
'ActivityDefinition-blood-tubes-supply'),
)
def testJsonFormat_forValidActivityDefinition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ActivityDefinition)
@parameterized.named_parameters(
('_withAdverseEventExample', 'AdverseEvent-example'),)
def testJsonFormat_forValidAdverseEvent_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.AdverseEvent)
@parameterized.named_parameters(
('_withAllergyIntoleranceExample', 'AllergyIntolerance-example'),)
def testJsonFormat_forValidAllergyIntolerance_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.AllergyIntolerance)
@parameterized.named_parameters(
('_withAppointmentExample', 'Appointment-example'),
('_withAppointment2docs', 'Appointment-2docs'),
('_withAppointmentExampleReq', 'Appointment-examplereq'),
)
def testJsonFormat_forValidAppointment_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Appointment)
@parameterized.named_parameters(
('_withAppointmentResponseExample', 'AppointmentResponse-example'),
('_withAppointmentResponseExampleResp',
'AppointmentResponse-exampleresp'),
)
def testJsonFormat_forValidAppointmentResponse_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.AppointmentResponse)
@parameterized.named_parameters(
('_withAuditEventExample', 'AuditEvent-example'),
('_withAuditEventExampleDisclosure', 'AuditEvent-example-disclosure'),
('_withAuditEventExampleLogin', 'AuditEvent-example-login'),
('_withAuditEventExampleLogout', 'AuditEvent-example-logout'),
('_withAuditEventExampleMedia', 'AuditEvent-example-media'),
('_withAuditEventExamplePixQuery', 'AuditEvent-example-pixQuery'),
('_withAuditEventExampleSearch', 'AuditEvent-example-search'),
('_withAuditEventExampleRest', 'AuditEvent-example-rest'),
)
def testJsonFormat_forValidAuditEvent_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.AuditEvent)
@parameterized.named_parameters(
('_withBasicReferral', 'Basic-referral'),
('_withBasicClassModel', 'Basic-classModel'),
('_withBasicBasicExampleNarrative', 'Basic-basic-example-narrative'),
)
def testJsonFormat_forValidBasic_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Basic)
@parameterized.named_parameters(
('_withBodySiteFetus', 'BodySite-fetus'),
('_withBodySiteSkinPatch', 'BodySite-skin-patch'),
('_withBodySiteTumor', 'BodySite-tumor'),
)
def testJsonFormat_forValidBodySite_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.BodySite)
@parameterized.named_parameters(
('_withBundleBundleExample', 'Bundle-bundle-example'),
('_withBundle72ac849352ac41bd8d5d7258c289b5ea',
'Bundle-72ac8493-52ac-41bd-8d5d-7258c289b5ea'),
('_withBundleHla1', 'Bundle-hla-1'),
('_withBundleFather', 'Bundle-father'),
('_withBundleB0a5e427783c44adb87e2E3efe3369b6f',
'Bundle-b0a5e4277-83c4-4adb-87e2-e3efe3369b6f'),
('_withBundle3ad0687eF477468cAfd5Fcc2bf897819',
'Bundle-3ad0687e-f477-468c-afd5-fcc2bf897819'),
('_withPatientExamplesCypressTemplate',
'patient-examples-cypress-template'),
('_withBundleB248b1b216864b94993637d7a5f94b51',
'Bundle-b248b1b2-1686-4b94-9936-37d7a5f94b51'),
('_withBundle3ad0687eF477468cAfd5Fcc2bf897809',
'Bundle-3ad0687e-f477-468c-afd5-fcc2bf897809'),
('_withBundle3ad0687eF477468cAfd5Fcc2bf897808',
'Bundle-3ad0687e-f477-468c-afd5-fcc2bf897808'),
('_withBundleUssgFht', 'Bundle-ussg-fht'),
('_withBundleXds', 'Bundle-xds'),
)
def testJsonFormat_forValidBundle_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Bundle)
@parameterized.named_parameters(
('_withCapabilityStatementExample', 'CapabilityStatement-example'),
('_withCapabilityStatementPhr', 'CapabilityStatement-phr'),
)
def testJsonFormat_forValidCapabilityStatement_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.CapabilityStatement)
@parameterized.named_parameters(
('_withCarePlanExample', 'CarePlan-example'),
('_withCarePlanF001', 'CarePlan-f001'),
('_withCarePlanF002', 'CarePlan-f002'),
('_withCarePlanF003', 'CarePlan-f003'),
('_withCarePlanF201', 'CarePlan-f201'),
('_withCarePlanF202', 'CarePlan-f202'),
('_withCarePlanF203', 'CarePlan-f203'),
('_withCarePlanGpvisit', 'CarePlan-gpvisit'),
('_withCarePlanIntegrate', 'CarePlan-integrate'),
('_withCarePlanObesityNarrative', 'CarePlan-obesity-narrative'),
('_withCarePlanPreg', 'CarePlan-preg'),
)
def testJsonFormat_forValidCarePlan_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.CarePlan)
@parameterized.named_parameters(
('_withCareTeamExample', 'CareTeam-example'),)
def testJsonFormat_forValidCareTeam_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.CareTeam)
@parameterized.named_parameters(
('_withChargeItemExample', 'ChargeItem-example'),)
def testJsonFormat_forValidChargeItem_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.ChargeItem)
@parameterized.named_parameters(
('_withClaim100150', 'Claim-100150'),
('_withClaim960150', 'Claim-960150'),
('_withClaim960151', 'Claim-960151'),
('_withClaim100151', 'Claim-100151'),
('_withClaim100156', 'Claim-100156'),
('_withClaim100152', 'Claim-100152'),
('_withClaim100155', 'Claim-100155'),
('_withClaim100154', 'Claim-100154'),
('_withClaim100153', 'Claim-100153'),
('_withClaim760150', 'Claim-760150'),
('_withClaim760152', 'Claim-760152'),
('_withClaim760151', 'Claim-760151'),
('_withClaim860150', 'Claim-860150'),
('_withClaim660150', 'Claim-660150'),
('_withClaim660151', 'Claim-660151'),
('_withClaim660152', 'Claim-660152'),
)
def testJsonFormat_forValidClaim_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Claim)
@parameterized.named_parameters(
('_withClaimResponseR3500', 'ClaimResponse-R3500'),)
def testJsonFormat_forValidClaimResponse_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.ClaimResponse)
@parameterized.named_parameters(
('_withClinicalImpressionExample', 'ClinicalImpression-example'),)
def testJsonFormat_forValidClinicalImpression_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ClinicalImpression)
@parameterized.named_parameters(
('_withCodeSystemExample', 'CodeSystem-example'),
('_withCodeSystemListExampleCodes', 'CodeSystem-list-example-codes'),
)
def testJsonFormat_forValidCodeSystem_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.CodeSystem)
@parameterized.named_parameters(
('_withCommunicationExample', 'Communication-example'),
('_withCommunicationFmAttachment', 'Communication-fm-attachment'),
('_withCommunicationFmSolicited', 'Communication-fm-solicited'),
)
def testJsonFormat_forValidCommunication_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Communication)
@parameterized.named_parameters(
('_withCommunicationRequestExample', 'CommunicationRequest-example'),
('_withCommunicationRequestFmSolicit', 'CommunicationRequest-fm-solicit'),
)
def testJsonFormat_forValidCommunicationRequest_succeeds(
self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.CommunicationRequest)
@parameterized.named_parameters(
('_withCompartmentDefinitionExample', 'CompartmentDefinition-example'),)
def testJsonFormat_forValidCompartmentDefinition_succeeds(
self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.CompartmentDefinition)
@parameterized.named_parameters(
('_withCompositionExample', 'Composition-example'),)
def testJsonFormat_forValidComposition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Composition)
@parameterized.named_parameters(
('_withConceptmapExample', 'conceptmap-example'),
('_withConceptmapExample2', 'conceptmap-example-2'),
('_withConceptmapExampleSpecimenType',
'conceptmap-example-specimen-type'),
)
def testJsonFormat_forValidConceptMap_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.ConceptMap)
@parameterized.named_parameters(
('_withConditionExample', 'Condition-example'),
('_withConditionExample2', 'Condition-example2'),
('_withConditionF001', 'Condition-f001'),
('_withConditionF002', 'Condition-f002'),
('_withConditionF003', 'Condition-f003'),
('_withConditionF201', 'Condition-f201'),
('_withConditionF202', 'Condition-f202'),
('_withConditionF203', 'Condition-f203'),
('_withConditionF204', 'Condition-f204'),
('_withConditionF205', 'Condition-f205'),
('_withConditionFamilyHistory', 'Condition-family-history'),
('_withConditionStroke', 'Condition-stroke'),
)
def testJsonFormat_forValidCondition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Condition)
@parameterized.named_parameters(
('_withConsentConsentExampleBasic', 'Consent-consent-example-basic'),
('_withConsentConsentExampleEmergency',
'Consent-consent-example-Emergency'),
('_withConsentConsentExampleGrantor', 'Consent-consent-example-grantor'),
('_withConsentConsentExampleNotAuthor',
'Consent-consent-example-notAuthor'),
('_withConsentConsentExampleNotOrg', 'Consent-consent-example-notOrg'),
('_withConsentConsentExampleNotThem', 'Consent-consent-example-notThem'),
('_withConsentConsentExampleNotThis', 'Consent-consent-example-notThis'),
('_withConsentConsentExampleNotTime', 'Consent-consent-example-notTime'),
('_withConsentConsentExampleOut', 'Consent-consent-example-Out'),
('_withConsentConsentExamplePkb', 'Consent-consent-example-pkb'),
('_withConsentConsentExampleSignature',
'Consent-consent-example-signature'),
('_withConsentConsentExampleSmartonfhir',
'Consent-consent-example-smartonfhir'),
)
def testJsonFormat_forValidConsent_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Consent)
@parameterized.named_parameters(
('_withAllergyIntoleranceExample', 'AllergyIntolerance-example',
resources_pb2.AllergyIntolerance, 'allergy_intolerance'),
('_withCapabilityStatementExample', 'CapabilityStatement-example',
resources_pb2.CapabilityStatement, 'capability_statement'),
('_withImmunizationExample', 'Immunization-example',
resources_pb2.Immunization, 'immunization'),
('_withMedicationMed0305', 'Medication-med0305', resources_pb2.Medication,
'medication'),
('_withObservationF004', 'Observation-f004', resources_pb2.Observation,
'observation'),
('_withPatientExample', 'patient-example', resources_pb2.Patient,
'patient'),
('_withPractitionerF003', 'Practitioner-f003', resources_pb2.Practitioner,
'practitioner'),
('_withProcedureAmbulation', 'Procedure-ambulation',
resources_pb2.Procedure, 'procedure'),
('_withTaskExample4', 'Task-example4', resources_pb2.Task, 'task'),
)
def testJsonFormat_forValidContainedResource_succeeds(
self, file_name: str, proto_cls: Type[message.Message],
contained_field: str):
"""Checks equality of print-parse 'round-trip' for a contained resource."""
proto_path = os.path.join(_EXAMPLES_PATH, file_name + '.prototxt')
golden_proto = testdata_utils.read_protos(proto_path, proto_cls)[0]
# Construct the contained resource to validate
contained = resources_pb2.ContainedResource()
proto_utils.set_value_at_field(contained, contained_field, golden_proto)
# Validate printing and then parsing the print output against the golden
contained_json_str = json_format.print_fhir_to_json_string(contained)
parsed_contained = json_format.json_fhir_string_to_proto(
contained_json_str,
resources_pb2.ContainedResource,
validate=True,
default_timezone='Australia/Sydney')
self.assertEqual(contained, parsed_contained)
@parameterized.named_parameters(
('_withContractC123', 'Contract-C-123'),
('_withContractC2121', 'Contract-C-2121'),
('_withContractPcdExampleNotAuthor', 'Contract-pcd-example-notAuthor'),
('_withContractPcdExampleNotLabs', 'Contract-pcd-example-notLabs'),
('_withContractPcdExampleNotOrg', 'Contract-pcd-example-notOrg'),
('_withContractPcdExampleNotThem', 'Contract-pcd-example-notThem'),
('_withContractPcdExampleNotThis', 'Contract-pcd-example-notThis'),
)
def testJsonFormat_forValidContract_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Contract)
@parameterized.named_parameters(
('_withCoverage9876B1', 'Coverage-9876B1'),
('_withCoverage7546D', 'Coverage-7546D'),
('_withCoverage7547E', 'Coverage-7547E'),
('_withCoverageSP1234', 'Coverage-SP1234'),
)
def testJsonFormat_forValidCoverage_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Coverage)
@parameterized.named_parameters(
('_withDataElementGender', 'DataElement-gender'),
('_withDataElementProthrombin', 'DataElement-prothrombin'),
)
def testJsonFormat_forValidDataElement_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.DataElement)
@parameterized.named_parameters(
('_withDetectedIssueDdi', 'DetectedIssue-ddi'),
('_withDetectedIssueAllergy', 'DetectedIssue-allergy'),
('_withDetectedIssueDuplicate', 'DetectedIssue-duplicate'),
('_withDetectedIssueLab', 'DetectedIssue-lab'),
)
def testJsonFormat_forValidDetectedIssue_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.DetectedIssue)
@parameterized.named_parameters(
('_withDeviceExample', 'Device-example'),
('_withDeviceF001', 'Device-f001'),
('_withDeviceIhePcd', 'Device-ihe-pcd'),
('_withDeviceExamplePacemaker', 'Device-example-pacemaker'),
('_withDeviceSoftware', 'Device-software'),
('_withDeviceExampleUdi1', 'Device-example-udi1'),
('_withDeviceExampleUdi2', 'Device-example-udi2'),
('_withDeviceExampleUdi3', 'Device-example-udi3'),
('_withDeviceExampleUdi4', 'Device-example-udi4'),
)
def testJsonFormat_forValidDevice_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Device)
@parameterized.named_parameters(
('_withDeviceComponentExample', 'DeviceComponent-example'),
('_withDeviceComponentExampleProdspec',
'DeviceComponent-example-prodspec'),
)
def testJsonFormat_forValidDeviceComponent_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.DeviceComponent)
@parameterized.named_parameters(
('_withDeviceMetricExample', 'DeviceMetric-example'),)
def testJsonFormat_forValidDeviceMetric_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.DeviceMetric)
@parameterized.named_parameters(
('_withDeviceRequestExample', 'DeviceRequest-example'),
('_withDeviceRequestInsulinPump', 'DeviceRequest-insulinpump'),
)
def testJsonFormat_forValidDeviceRequest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.DeviceRequest)
@parameterized.named_parameters(
('_withDeviceUseStatementExample', 'DeviceUseStatement-example'),)
def testJsonFormat_forValidDeviceUseStatement_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.DeviceUseStatement)
@parameterized.named_parameters(
('_withDiagnosticReport101', 'DiagnosticReport-101'),
('_withDiagnosticReport102', 'DiagnosticReport-102'),
('_withDiagnosticReportF001', 'DiagnosticReport-f001'),
('_withDiagnosticReportF201', 'DiagnosticReport-f201'),
('_withDiagnosticReportF202', 'DiagnosticReport-f202'),
('_withDiagnosticReportGhp', 'DiagnosticReport-ghp'),
('_withDiagnosticReportGingivalMass', 'DiagnosticReport-gingival-mass'),
('_withDiagnosticReportLipids', 'DiagnosticReport-lipids'),
('_withDiagnosticReportPap', 'DiagnosticReport-pap'),
('_withDiagnosticReportExamplePgx', 'DiagnosticReport-example-pgx'),
('_withDiagnosticReportUltrasound', 'DiagnosticReport-ultrasound'),
('_withDiagnosticReportDg2', 'DiagnosticReport-dg2'),
)
def testJsonFormat_forValidDiagnosticReport_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.DiagnosticReport)
@parameterized.named_parameters(
('_withDocumentManifestExample', 'DocumentManifest-example'),)
def testJsonFormat_forValidDocumentManifest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.DocumentManifest)
@parameterized.named_parameters(
('_withDocumentReferenceExample', 'DocumentReference-example'),)
def testJsonFormat_forValidDocumentReference_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.DocumentReference)
@parameterized.named_parameters(
('_withEligibilityRequest52345', 'EligibilityRequest-52345'),
('_withEligibilityRequest52346', 'EligibilityRequest-52346'),
)
def testJsonFormat_forValidEligibilityRequest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.EligibilityRequest)
@parameterized.named_parameters(
('_withEligibilityResponseE2500', 'EligibilityResponse-E2500'),
('_withEligibilityResponseE2501', 'EligibilityResponse-E2501'),
('_withEligibilityResponseE2502', 'EligibilityResponse-E2502'),
('_withEligibilityResponseE2503', 'EligibilityResponse-E2503'),
)
def testJsonFormat_forValidEligibilityResponse_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.EligibilityResponse)
@parameterized.named_parameters(
('_withParametersEmptyResource', 'Parameters-empty-resource',
resources_pb2.Parameters),)
def testJsonFormat_forValidEmptyNestedResource_succeeds(
self, file_name: str, proto_cls: Type[message.Message]):
self.assert_parse_and_print_examples_equals_golden(file_name, proto_cls)
@parameterized.named_parameters(
('_withEncounterExample', 'Encounter-example'),
('_withEncounterEmerg', 'Encounter-emerg'),
('_withEncounterF001', 'Encounter-f001'),
('_withEncounterF002', 'Encounter-f002'),
('_withEncounterF003', 'Encounter-f003'),
('_withEncounterF201', 'Encounter-f201'),
('_withEncounterF202', 'Encounter-f202'),
('_withEncounterF203', 'Encounter-f203'),
('_withEncounterHome', 'Encounter-home'),
('_withEncounterXcda', 'Encounter-xcda'),
)
def testJsonFormat_forValidEncounter_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Encounter)
@parameterized.named_parameters(
('_withEndpointExample', 'Endpoint-example'),
('_withEndpointExampleIid', 'Endpoint-example-iid'),
('_withEndpointExampleWadors', 'Endpoint-example-wadors'),
)
def testJsonFormat_forValidEndpoint_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Endpoint)
@parameterized.named_parameters(
('_withEnrollmentRequest22345', 'EnrollmentRequest-22345'),)
def testJsonFormat_forValidEnrollmentRequest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.EnrollmentRequest)
@parameterized.named_parameters(
('_withEnrollmentResponseEr2500', 'EnrollmentResponse-ER2500'),)
def testJsonFormat_forValidEnrollmentResponse_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.EnrollmentResponse)
@parameterized.named_parameters(
('_withEpisodeOfCareExample', 'EpisodeOfCare-example'),)
def testJsonFormat_forValidEpisodeOfCare_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.EpisodeOfCare)
@parameterized.named_parameters(
('_withExpansionProfileExample', 'ExpansionProfile-example'),)
def testJsonFormat_forValidExpansionProfile_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ExpansionProfile)
@parameterized.named_parameters(
('_withExplanationOfBenefitEb3500', 'ExplanationOfBenefit-EB3500'),)
def testJsonFormat_forValidExplanationOfBenefit_succeeds(
self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ExplanationOfBenefit)
@parameterized.named_parameters(
('_withFamilyMemberHistoryFather', 'FamilyMemberHistory-father'),
('_withFamilyMemberHistoryMother', 'FamilyMemberHistory-mother'),
)
def testJsonFormat_forValidFamilyMemberHistory_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.FamilyMemberHistory)
@parameterized.named_parameters(
('_withFlagExample', 'Flag-example'),
('_withFlagExampleEncounter', 'Flag-example-encounter'),
)
def testJsonFormat_forValidFlag_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Flag)
@parameterized.named_parameters(
('_withGoalExample', 'Goal-example'),
('_withGoalStopSmoking', 'Goal-stop-smoking'),
)
def testJsonFormat_forValidGoal_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Goal)
@parameterized.named_parameters(
('_withGraphDefinitionExample', 'GraphDefinition-example'),)
def testJsonFormat_forValidGraphDefinition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.GraphDefinition)
@parameterized.named_parameters(
('_withGroup101', 'Group-101'),
('_withGroup102', 'Group-102'),
)
def testJsonFormat_forValidGroup_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Group)
@parameterized.named_parameters(
('_withGuidanceResponseExample', 'GuidanceResponse-example'),)
def testJsonFormat_forValidGuidanceResponse_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.GuidanceResponse)
@parameterized.named_parameters(
('_withHealthcareServiceExample', 'HealthcareService-example'),)
def testJsonFormat_forValidHealthcareService_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.HealthcareService)
@parameterized.named_parameters(
('_withImagingManifestExample', 'ImagingManifest-example'),)
def testJsonFormat_forValidImagingManifest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ImagingManifest)
@parameterized.named_parameters(
('_withImagingStudyExample', 'ImagingStudy-example'),
('_withImagingStudyExampleXr', 'ImagingStudy-example-xr'),
)
def testJsonFormat_forValidImagingStudy_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.ImagingStudy)
@parameterized.named_parameters(
('_withImmunizationExample', 'Immunization-example'),
('_withImmunizationHistorical', 'Immunization-historical'),
('_withImmunizationNotGiven', 'Immunization-notGiven'),
)
def testJsonFormat_forValidImmunization_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Immunization)
@parameterized.named_parameters(
('_withImmunizationRecommendationExample',
'ImmunizationRecommendation-example'),
('_withImmunizationRecommendationTargetDiseaseExample',
'immunizationrecommendation-target-disease-example'),
)
def testJsonFormat_forValidImmunizationRecommendation_succeeds(
self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ImmunizationRecommendation)
@parameterized.named_parameters(
('_withImplementationGuideExample', 'ImplementationGuide-example'),)
def testJsonFormat_forValidImplementationGuide_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ImplementationGuide)
@parameterized.named_parameters(
('_withLibraryLibraryCms146Example', 'Library-library-cms146-example'),
('_withLibraryCompositionExample', 'Library-composition-example'),
('_withLibraryExample', 'Library-example'),
('_withLibraryLibraryFhirHelpersPredecessor',
'Library-library-fhir-helpers-predecessor'),
)
def testJsonFormat_forValidLibrary_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Library)
@parameterized.named_parameters(
('_withLinkageExample', 'Linkage-example'),)
def testJsonFormat_forValidLinkage_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Linkage)
@parameterized.named_parameters(
('_withListExample', 'List-example'),
('_withListCurrentAllergies', 'List-current-allergies'),
('_withListExampleDoubleCousinRelationship',
'List-example-double-cousin-relationship'),
('_withListExampleEmpty', 'List-example-empty'),
('_withListF201', 'List-f201'),
('_withListGenetic', 'List-genetic'),
('_withListPrognosis', 'List-prognosis'),
('_withListMedList', 'List-med-list'),
('_withListExampleSimpleEmpty', 'List-example-simple-empty'),
)
def testJsonFormat_forValidList_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.List)
@parameterized.named_parameters(
('_withLocation1', 'Location-1'),
('_withLocationAmb', 'Location-amb'),
('_withLocationHl7', 'Location-hl7'),
('_withLocationPh', 'Location-ph'),
('_withLocation2', 'Location-2'),
('_withLocationUkp', 'Location-ukp'),
)
def testJsonFormat_forValidLocation_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Location)
@parameterized.named_parameters(
('_withMeasureMeasureCms146Example', 'Measure-measure-cms146-example'),
('_withMeasureComponentAExample', 'Measure-component-a-example'),
('_withMeasureComponentBExample', 'Measure-component-b-example'),
('_withMeasureCompositeExample', 'Measure-composite-example'),
('_withMeasureMeasurePredecessorExample',
'Measure-measure-predecessor-example'),
)
def testJsonFormat_forValidMeasure_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Measure)
@parameterized.named_parameters(
('_withMeasureReportMeasureReportCms146Cat1Example',
'MeasureReport-measurereport-cms146-cat1-example'),
('_withMeasureReportMeasurereportCms146Cat2Example',
'MeasureReport-measurereport-cms146-cat2-example'),
('_withMeasureReportMeasurereportCms146Cat3Example',
'MeasureReport-measurereport-cms146-cat3-example'),
)
def testJsonFormat_forValidMeasureReport_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.MeasureReport)
@parameterized.named_parameters(
('_withMediaExample', 'Media-example'),
('_withMedia1_2_840_11361907579238403408700_3_0_14_19970327150033',
'Media-1.2.840.11361907579238403408700.3.0.14.19970327150033'),
('_withMediaSound', 'Media-sound'),
('_withMediaXray', 'Media-xray'),
)
def testJsonFormat_forValidMedia_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Media)
@parameterized.named_parameters(
('_withMedicationMed0301', 'Medication-med0301'),
('_withMedicationMed0302', 'Medication-med0302'),
('_withMedicationMed0303', 'Medication-med0303'),
('_withMedicationMed0304', 'Medication-med0304'),
('_withMedicationMed0305', 'Medication-med0305'),
('_withMedicationMed0306', 'Medication-med0306'),
('_withMedicationMed0307', 'Medication-med0307'),
('_withMedicationMed0308', 'Medication-med0308'),
('_withMedicationMed0309', 'Medication-med0309'),
('_withMedicationMed0310', 'Medication-med0310'),
('_withMedicationMed0311', 'Medication-med0311'),
('_withMedicationMed0312', 'Medication-med0312'),
('_withMedicationMed0313', 'Medication-med0313'),
('_withMedicationMed0314', 'Medication-med0314'),
('_withMedicationMed0315', 'Medication-med0315'),
('_withMedicationMed0316', 'Medication-med0316'),
('_withMedicationMed0317', 'Medication-med0317'),
('_withMedicationMed0318', 'Medication-med0318'),
('_withMedicationMed0319', 'Medication-med0319'),
('_withMedicationMed0320', 'Medication-med0320'),
('_withMedicationMed0321', 'Medication-med0321'),
('_withMedicationMedicationExample1', 'Medication-medicationexample1'),
('_withMedicationMedExample015', 'Medication-medexample015'),
)
def testJsonFormat_forValidMedication_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Medication)
@parameterized.named_parameters(
('_withMedicationAdministrationMedadminExample03',
'MedicationAdministration-medadminexample03'),)
def testJsonFormat_forValidMedicationAdministration_succeeds(
self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.MedicationAdministration)
@parameterized.named_parameters(
('_withMedicationDispenseMeddisp008', 'MedicationDispense-meddisp008'),)
def testJsonFormat_forValidMedicationDispense_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.MedicationDispense)
@parameterized.named_parameters(
('_withMedicationRequestMedrx0311', 'MedicationRequest-medrx0311'),
('_withMedicationRequestMedrx002', 'MedicationRequest-medrx002'),
)
def testJsonFormat_forValidMedicationRequest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.MedicationRequest)
@parameterized.named_parameters(
('_withMedicationStatementExample001', 'MedicationStatement-example001'),
('_withMedicationStatementExample002', 'MedicationStatement-example002'),
('_withMedicationStatementExample003', 'MedicationStatement-example003'),
('_withMedicationStatementExample004', 'MedicationStatement-example004'),
('_withMedicationStatementExample005', 'MedicationStatement-example005'),
('_withMedicationStatementExample006', 'MedicationStatement-example006'),
('_withMedicationStatementExample007', 'MedicationStatement-example007'),
)
def testJsonFormat_forValidMedicationStatement_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.MedicationStatement)
@parameterized.named_parameters(
('_withMessageDefinitionExample', 'MessageDefinition-example'),)
def testJsonFormat_forValidMessageDefinition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.MessageDefinition)
@parameterized.named_parameters(
('_withMessageHeader1cbdfb97585948a48301D54eab818d68',
'MessageHeader-1cbdfb97-5859-48a4-8301-d54eab818d68'),)
def testJsonFormat_forValidMessageHeader_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.MessageHeader)
@parameterized.named_parameters(
('_withNamingSystemExample', 'NamingSystem-example'),
('_withNamingSystemExampleId', 'NamingSystem-example-id'),
('_withNamingSystemExampleReplaced', 'NamingSystem-example-replaced'),
)
def testJsonFormat_forValidNamingSystem_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.NamingSystem)
@parameterized.named_parameters(
('_withNutritionOrderCardiacDiet', 'NutritionOrder-cardiacdiet'),
('_withNutritionOrderDiabeticDiet', 'NutritionOrder-diabeticdiet'),
('_withNutritionOrderDiabeticSupplement',
'NutritionOrder-diabeticsupplement'),
('_withNutritionOrderEnergySupplement',
'NutritionOrder-energysupplement'),
('_withNutritionOrderEnteralbolus', 'NutritionOrder-enteralbolus'),
('_withNutritionOrderEnteralContinuous',
'NutritionOrder-enteralcontinuous'),
('_withNutritionOrderFiberRestrictedDiet',
'NutritionOrder-fiberrestricteddiet'),
('_withNutritionOrderInfantenteral', 'NutritionOrder-infantenteral'),
('_withNutritionOrderProteinSupplement',
'NutritionOrder-proteinsupplement'),
('_withNutritionOrderPureedDiet', 'NutritionOrder-pureeddiet'),
('_withNutritionOrderPureeddietSimple',
'NutritionOrder-pureeddiet-simple'),
('_withNutritionOrderRenalDiet', 'NutritionOrder-renaldiet'),
('_withNutritionOrderTextureModified', 'NutritionOrder-texturemodified'),
)
def testJsonFormat_forValidNutritionOrder_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.NutritionOrder)
@parameterized.named_parameters(
('_withObservationExample', 'Observation-example'),
('_withObservation10MinuteApgarScore',
'Observation-10minute-apgar-score'),
('_withObservation1MinuteApgarScore', 'Observation-1minute-apgar-score'),
('_withObservation20MinuteApgarScore',
'Observation-20minute-apgar-score'),
('_withObservation2MinuteApgarScore', 'Observation-2minute-apgar-score'),
('_withObservation5MinuteApgarScore', 'Observation-5minute-apgar-score'),
('_withObservationBloodPressure', 'Observation-blood-pressure'),
('_withObservationBloodPressureCancel',
'Observation-blood-pressure-cancel'),
('_withObservationBloodPressureDar', 'Observation-blood-pressure-dar'),
('_withObservationBmd', 'Observation-bmd'),
('_withObservationBmi', 'Observation-bmi'),
('_withObservationBodyHeight', 'Observation-body-height'),
('_withObservationBodyLength', 'Observation-body-length'),
('_withObservationBodyTemperature', 'Observation-body-temperature'),
('_withObservationDateLastmp', 'Observation-date-lastmp'),
('_withObservationExampleDiplotype1', 'Observation-example-diplotype1'),
('_withObservationEyeColor', 'Observation-eye-color'),
('_withObservationF001', 'Observation-f001'),
('_withObservationF002', 'Observation-f002'),
('_withObservationF003', 'Observation-f003'),
('_withObservationF004', 'Observation-f004'),
('_withObservationF005', 'Observation-f005'),
('_withObservationF202', 'Observation-f202'),
('_withObservationF203', 'Observation-f203'),
('_withObservationF204', 'Observation-f204'),
('_withObservationF205', 'Observation-f205'),
('_withObservationF206', 'Observation-f206'),
('_withObservationExampleGenetics1', 'Observation-example-genetics-1'),
('_withObservationExampleGenetics2', 'Observation-example-genetics-2'),
('_withObservationExampleGenetics3', 'Observation-example-genetics-3'),
('_withObservationExampleGenetics4', 'Observation-example-genetics-4'),
('_withObservationExampleGenetics5', 'Observation-example-genetics-5'),
('_withObservationGlasgow', 'Observation-glasgow'),
('_withObservationGcsQa', 'Observation-gcs-qa'),
('_withObservationExampleHaplotype1', 'Observation-example-haplotype1'),
('_withObservationExampleHaplotype2', 'Observation-example-haplotype2'),
('_withObservationHeadCircumference', 'Observation-head-circumference'),
('_withObservationHeartRate', 'Observation-heart-rate'),
('_withObservationMbp', 'Observation-mbp'),
('_withObservationExamplePhenotype', 'Observation-example-phenotype'),
('_withObservationRespiratoryRate', 'Observation-respiratory-rate'),
('_withObservationEkg', 'Observation-ekg'),
('_withObservationSatO2', 'Observation-satO2'),
('_withObservationExampleTpmtDiplotype',
'Observation-example-TPMT-diplotype'),
('_withObservationExampleTpmtHaplotypeOne',
'Observation-example-TPMT-haplotype-one'),
('_withObservationExampleTpmtHaplotypeTwo',
'Observation-example-TPMT-haplotype-two'),
('_withObservationUnsat', 'Observation-unsat'),
('_withObservationVitalsPanel', 'Observation-vitals-panel'),
)
def testJsonFormat_forValidObservation_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Observation)
@parameterized.named_parameters(
('_withOperationDefinitionExample', 'OperationDefinition-example'),)
def testJsonFormat_forValidOperationDefinition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.OperationDefinition)
@parameterized.named_parameters(
('_withOperationOutcome101', 'OperationOutcome-101'),
('_withOperationOutcomeAllok', 'OperationOutcome-allok'),
('_withOperationOutcomeBreakTheGlass',
'OperationOutcome-break-the-glass'),
('_withOperationOutcomeException', 'OperationOutcome-exception'),
('_withOperationOutcomeSearchFail', 'OperationOutcome-searchfail'),
('_withOperationOutcomeValidationFail',
'OperationOutcome-validationfail'),
)
def testJsonFormat_forValidOperationOutcome_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.OperationOutcome)
@parameterized.named_parameters(
('_withOrganizationHl7', 'Organization-hl7'),
('_withOrganizationF001', 'Organization-f001'),
('_withOrganizationF002', 'Organization-f002'),
('_withOrganizationF003', 'Organization-f003'),
('_withOrganizationF201', 'Organization-f201'),
('_withOrganizationF203', 'Organization-f203'),
('_withOrganization1', 'Organization-1'),
('_withOrganization2_16_840_1_113883_19_5',
'Organization-2.16.840.1.113883.19.5'),
('_withOrganization2', 'Organization-2'),
('_withOrganization1832473e2fe0452dAbe93cdb9879522f',
'Organization-1832473e-2fe0-452d-abe9-3cdb9879522f'),
('_withOrganizationMmanu', 'Organization-mmanu'),
)
def testJsonFormat_forValidOrganization_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Organization)
@parameterized.named_parameters(
('_withParametersExample', 'Parameters-example'),)
def testJsonFormat_forValidParameters_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Parameters)
@parameterized.named_parameters(
('_withPatientExample', 'patient-example'),
('_withPatientExampleA', 'patient-example-a'),
('_withPatientExampleAnimal', 'patient-example-animal'),
('_withPatientExampleB', 'patient-example-b'),
('_withPatientExampleC', 'patient-example-c'),
('_withPatientExampleChinese', 'patient-example-chinese'),
('_withPatientExampleD', 'patient-example-d'),
('_withPatientExampleDicom', 'patient-example-dicom'),
('_withPatientExampleF001Pieter', 'patient-example-f001-pieter'),
('_withPatientExampleF201Roel', 'patient-example-f201-roel'),
('_withPatientExampleIhePcd', 'patient-example-ihe-pcd'),
('_withPatientExampleProband', 'patient-example-proband'),
('_withPatientExampleXcda', 'patient-example-xcda'),
('_withPatientExampleXds', 'patient-example-xds'),
('_withPatientGeneticsExample1', 'patient-genetics-example1'),
('_withPatientGlossyExample', 'patient-glossy-example'),
)
def testJsonFormat_forValidPatient_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Patient)
@parameterized.named_parameters(
('_withPaymentNotice77654', 'PaymentNotice-77654'),)
def testJsonFormat_forValidPaymentNotice_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.PaymentNotice)
@parameterized.named_parameters(
('_withPaymentReconciliationER2500', 'PaymentReconciliation-ER2500'),)
def testJsonFormat_forValidPaymentReconciliation_succeeds(
self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.PaymentReconciliation)
@parameterized.named_parameters(
('_withPersonExample', 'Person-example'),
('_withPersonF002', 'Person-f002'),
)
def testJsonFormat_forValidPerson_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Person)
@parameterized.named_parameters(
('_withPlanDefinitionLowSuicideRiskOrderSet',
'PlanDefinition-low-suicide-risk-order-set'),
('_withPlanDefinitionKdn5', 'PlanDefinition-KDN5'),
('_withPlanDefinitionOptionsExample', 'PlanDefinition-options-example'),
('_withPlanDefinitionZikaVirusInterventionInitial',
'PlanDefinition-zika-virus-intervention-initial'),
('_withPlanDefinitionProtocolExample', 'PlanDefinition-protocol-example'),
)
def testJsonFormat_forValidPlanDefinition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.PlanDefinition)
@parameterized.named_parameters(
('_withPractitionerExample', 'Practitioner-example'),
('_withPractitionerF001', 'Practitioner-f001'),
('_withPractitionerF002', 'Practitioner-f002'),
('_withPractitionerF003', 'Practitioner-f003'),
('_withPractitionerF004', 'Practitioner-f004'),
('_withPractitionerF005', 'Practitioner-f005'),
('_withPractitionerF006', 'Practitioner-f006'),
('_withPractitionerF007', 'Practitioner-f007'),
('_withPractitionerF201', 'Practitioner-f201'),
('_withPractitionerF202', 'Practitioner-f202'),
('_withPractitionerF203', 'Practitioner-f203'),
('_withPractitionerF204', 'Practitioner-f204'),
('_withPractitionerXcda1', 'Practitioner-xcda1'),
('_withPractitionerXcdaAuthor', 'Practitioner-xcda-author'),
)
def testJsonFormat_forValidPractitioner_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Practitioner)
@parameterized.named_parameters(
('_withPractitionerRoleExample', 'PractitionerRole-example'),)
def testJsonFormat_forValidPractitionerRole_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.PractitionerRole)
@parameterized.named_parameters(
('_withBase64Binary', 'base64_binary', datatypes_pb2.Base64Binary),
('_withBoolean', 'boolean', datatypes_pb2.Boolean),
('_withCode', 'code', datatypes_pb2.Code),
('_withDate', 'date', datatypes_pb2.Date),
('_withDateTime', 'date_time', datatypes_pb2.DateTime),
('_withDecimal', 'decimal', datatypes_pb2.Decimal),
('_withId', 'id', datatypes_pb2.Id),
('_withInstant', 'instant', datatypes_pb2.Instant),
('_withInteger', 'integer', datatypes_pb2.Integer),
('_withMarkdown', 'markdown', datatypes_pb2.Markdown),
('_withOid', 'oid', datatypes_pb2.Oid),
('_withPositiveInt', 'positive_int', datatypes_pb2.PositiveInt),
('_withString', 'string', datatypes_pb2.String),
('_withTime', 'time', datatypes_pb2.Time),
('_withUnsignedInt', 'unsigned_int', datatypes_pb2.UnsignedInt),
('_withUri', 'uri', datatypes_pb2.Uri),
('_withXhtml', 'xhtml', datatypes_pb2.Xhtml),
)
def testJsonFormat_forValidPrimitive_succeeds(
self, file_name: str, primitive_cls: Type[message.Message]):
json_path = os.path.join(_VALIDATION_PATH, file_name + '.valid.ndjson')
proto_path = os.path.join(_VALIDATION_PATH, file_name + '.valid.prototxt')
self.assert_parse_equals_golden(
json_path,
proto_path,
primitive_cls,
parse_f=json_format.json_fhir_string_to_proto,
json_delimiter='\n',
proto_delimiter='\n---\n',
validate=True,
default_timezone='Australia/Sydney')
self.assert_print_equals_golden(
json_path,
proto_path,
primitive_cls,
print_f=json_format.pretty_print_fhir_to_json_string,
json_delimiter='\n',
proto_delimiter='\n---\n')
@parameterized.named_parameters(
('_withProcedureExample', 'Procedure-example'),
('_withProcedureAmbulation', 'Procedure-ambulation'),
('_withProcedureAppendectomyNarrative',
'Procedure-appendectomy-narrative'),
('_withProcedureBiopsy', 'Procedure-biopsy'),
('_withProcedureColonBiopsy', 'Procedure-colon-biopsy'),
('_withProcedureColonoscopy', 'Procedure-colonoscopy'),
('_withProcedureEducation', 'Procedure-education'),
('_withProcedureF001', 'Procedure-f001'),
('_withProcedureF002', 'Procedure-f002'),
('_withProcedureF003', 'Procedure-f003'),
('_withProcedureF004', 'Procedure-f004'),
('_withProcedureF201', 'Procedure-f201'),
('_withProcedureExampleImplant', 'Procedure-example-implant'),
('_withProcedureOb', 'Procedure-ob'),
('_withProcedurePhysicalTherapy', 'Procedure-physical-therapy'),
)
def testJsonFormat_forValidProcedure_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Procedure)
@parameterized.named_parameters(
('_withProcedureRequestExample', 'ProcedureRequest-example'),
('_withProcedureRequestPhysiotherapy', 'ProcedureRequest-physiotherapy'),
('_withProcedureRequestDoNotTurn', 'ProcedureRequest-do-not-turn'),
('_withProcedureRequestBenchpress', 'ProcedureRequest-benchpress'),
('_withProcedureRequestAmbulation', 'ProcedureRequest-ambulation'),
('_withProcedureRequestAppendectomyNarrative',
'ProcedureRequest-appendectomy-narrative'),
('_withProcedureRequestColonoscopy', 'ProcedureRequest-colonoscopy'),
('_withProcedureRequestColonBiopsy', 'ProcedureRequest-colon-biopsy'),
('_withProcedureRequestDi', 'ProcedureRequest-di'),
('_withProcedureRequestEducation', 'ProcedureRequest-education'),
('_withProcedureRequestFt4', 'ProcedureRequest-ft4'),
('_withProcedureRequestExampleImplant',
'ProcedureRequest-example-implant'),
('_withProcedureRequestLipid', 'ProcedureRequest-lipid'),
('_withProcedureRequestOb', 'ProcedureRequest-ob'),
('_withProcedureRequestExamplePgx', 'ProcedureRequest-example-pgx'),
('_withProcedureRequestPhysicalTherapy',
'ProcedureRequest-physical-therapy'),
('_withProcedureRequestSubrequest', 'ProcedureRequest-subrequest'),
('_withProcedureRequestOgExample1', 'ProcedureRequest-og-example1'),
)
def testJsonFormat_forValidProcedureRequest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ProcedureRequest)
@parameterized.named_parameters(
('_withProcessRequest1110', 'ProcessRequest-1110'),
('_withProcessRequest1115', 'ProcessRequest-1115'),
('_withProcessRequest1113', 'ProcessRequest-1113'),
('_withProcessRequest1112', 'ProcessRequest-1112'),
('_withProcessRequest1114', 'ProcessRequest-1114'),
('_withProcessRequest1111', 'ProcessRequest-1111'),
('_withProcessRequest44654', 'ProcessRequest-44654'),
('_withProcessRequest87654', 'ProcessRequest-87654'),
('_withProcessRequest87655', 'ProcessRequest-87655'),
)
def testJsonFormat_forValidProcessRequest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.ProcessRequest)
@parameterized.named_parameters(
('_withProcessResponseSR2500', 'ProcessResponse-SR2500'),
('_withProcessResponseSR2349', 'ProcessResponse-SR2349'),
('_withProcessResponseSR2499', 'ProcessResponse-SR2499'),
)
def testJsonFormat_forValidProcessResponse_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ProcessResponse)
@parameterized.named_parameters(
('_withProvenanceExample', 'Provenance-example'),
('_withProvenanceExampleBiocomputeObject',
'Provenance-example-biocompute-object'),
('_withProvenanceExampleCwl', 'Provenance-example-cwl'),
('_withProvenanceSignature', 'Provenance-signature'),
)
def testJsonFormat_forValidProvenance_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Provenance)
@parameterized.named_parameters(
('_withQuestionnaire3141', 'Questionnaire-3141'),
('_withQuestionnaireBb', 'Questionnaire-bb'),
('_withQuestionnaireF201', 'Questionnaire-f201'),
('_withQuestionnaireGcs', 'Questionnaire-gcs'),
)
def testJsonFormat_forValidQuestionnaire_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Questionnaire)
@parameterized.named_parameters(
('_withQuestionnaireResponse3141', 'QuestionnaireResponse-3141'),
('_withQuestionnaireResponseBb', 'QuestionnaireResponse-bb'),
('_withQuestionnaireResponseF201', 'QuestionnaireResponse-f201'),
('_withQuestionnaireResponseGcs', 'QuestionnaireResponse-gcs'),
('_withQuestionnaireResponseUssgFhtAnswers',
'QuestionnaireResponse-ussg-fht-answers'),
)
def testJsonFormat_forValidQuestionnaireResponse_succeeds(
self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.QuestionnaireResponse)
@parameterized.named_parameters(
('_withReferralRequestExample', 'ReferralRequest-example'),)
def testJsonFormat_forValidReferralRequest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ReferralRequest)
@parameterized.named_parameters(
('_withRelatedPersonBenedicte', 'RelatedPerson-benedicte'),
('_withRelatedPersonF001', 'RelatedPerson-f001'),
('_withRelatedPersonF002', 'RelatedPerson-f002'),
('_withRelatedPersonPeter', 'RelatedPerson-peter'),
)
def testJsonFormat_forValidRelatedPerson_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.RelatedPerson)
@parameterized.named_parameters(
('_withRequestGroupExample', 'RequestGroup-example'),
('_withRequestGroupKdn5Example', 'RequestGroup-kdn5-example'),
)
def testJsonFormat_forValidRequestGroup_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.RequestGroup)
@parameterized.named_parameters(
('_withResearchStudyExample', 'ResearchStudy-example'),)
def testJsonFormat_forValidResearchStudy_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.ResearchStudy)
@parameterized.named_parameters(
('_withResearchSubjectExample', 'ResearchSubject-example'),)
def testJsonFormat_forValidResearchSubject_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ResearchSubject)
@parameterized.named_parameters(
('_withRiskAssessmentGenetic', 'RiskAssessment-genetic'),
('_withRiskAssessmentCardiac', 'RiskAssessment-cardiac'),
('_withRiskAssessmentPopulation', 'RiskAssessment-population'),
('_withRiskAssessmentPrognosis', 'RiskAssessment-prognosis'),
)
def testJsonFormat_forValidRiskAssessment_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.RiskAssessment)
@parameterized.named_parameters(
('_withScheduleExample', 'Schedule-example'),
('_withScheduleExampleloc1', 'Schedule-exampleloc1'),
('_withScheduleExampleloc2', 'Schedule-exampleloc2'),
)
def testJsonFormat_forValidSchedule_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Schedule)
@parameterized.named_parameters(
('_withSearchParameterExample', 'SearchParameter-example'),
('_withSearchParameterExampleExtension',
'SearchParameter-example-extension'),
('_withSearchParameterExampleReference',
'SearchParameter-example-reference'),
)
def testJsonFormat_forValidSearchParameter_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.SearchParameter)
@parameterized.named_parameters(
('_withSequenceCoord0Base', 'Sequence-coord-0-base'),
('_withSequenceCoord1Base', 'Sequence-coord-1-base'),
('_withSequenceExample', 'Sequence-example'),
('_withSequenceFdaExample', 'Sequence-fda-example'),
('_withSequenceFdaVcfComparison', 'Sequence-fda-vcf-comparison'),
('_withSequenceFdaVcfevalComparison', 'Sequence-fda-vcfeval-comparison'),
('_withSequenceExamplePgx1', 'Sequence-example-pgx-1'),
('_withSequenceExamplePgx2', 'Sequence-example-pgx-2'),
('_withSequenceExampleTPMTOne', 'Sequence-example-TPMT-one'),
('_withSequenceExampleTPMTTwo', 'Sequence-example-TPMT-two'),
('_withSequenceGraphicExample1', 'Sequence-graphic-example-1'),
('_withSequenceGraphicExample2', 'Sequence-graphic-example-2'),
('_withSequenceGraphicExample3', 'Sequence-graphic-example-3'),
('_withSequenceGraphicExample4', 'Sequence-graphic-example-4'),
('_withSequenceGraphicExample5', 'Sequence-graphic-example-5'),
)
def testJsonFormat_forValidSequence_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Sequence)
@parameterized.named_parameters(
('_withServiceDefinitionExample', 'ServiceDefinition-example'),)
def testJsonFormat_forValidServiceDefinition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ServiceDefinition)
@parameterized.named_parameters(
('_withSlotExample', 'Slot-example'),
('_withSlot1', 'Slot-1'),
('_withSlot2', 'Slot-2'),
('_withSlot3', 'Slot-3'),
)
def testJsonFormat_forValidSlot_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Slot)
@parameterized.named_parameters(
('_withSpecimen101', 'Specimen-101'),
('_withSpecimenIsolate', 'Specimen-isolate'),
('_withSpecimenSst', 'Specimen-sst'),
('_withSpecimenVmaUrine', 'Specimen-vma-urine'),
)
def testJsonFormat_forValidSpecimen_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Specimen)
@parameterized.named_parameters(
('_withStructureDefinitionExample', 'StructureDefinition-example'),)
def testJsonFormat_forValidStructureDefinition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.StructureDefinition)
@parameterized.named_parameters(
('_withStructureMapExample', 'StructureMap-example'),)
def testJsonFormat_forValidStructureMap_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.StructureMap)
@parameterized.named_parameters(
('_withSubscriptionExample', 'Subscription-example'),
('_withSubscriptionExampleError', 'Subscription-example-error'),
)
def testJsonFormat_forValidSubscription_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Subscription)
@parameterized.named_parameters(
('_withSubstanceExample', 'Substance-example'),
('_withSubstanceF205', 'Substance-f205'),
('_withSubstanceF201', 'Substance-f201'),
('_withSubstanceF202', 'Substance-f202'),
('_withSubstanceF203', 'Substance-f203'),
('_withSubstanceF204', 'Substance-f204'),
)
def testJsonFormat_forValidSubstance_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Substance)
@parameterized.named_parameters(
('_withSupplyDeliverySimpleDelivery', 'SupplyDelivery-simpledelivery'),
('_withSupplyDeliveryPumpDelivery', 'SupplyDelivery-pumpdelivery'),
)
def testJsonFormat_forValidSupplyDelivery_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.SupplyDelivery)
@parameterized.named_parameters(
('_withSupplyRequestSimpleOrder', 'SupplyRequest-simpleorder'),)
def testJsonFormat_forValidSupplyRequest_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.SupplyRequest)
@parameterized.named_parameters(
('_withTaskExample1', 'Task-example1'),
('_withTaskExample2', 'Task-example2'),
('_withTaskExample3', 'Task-example3'),
('_withTaskExample4', 'Task-example4'),
('_withTaskExample5', 'Task-example5'),
('_withTaskExample6', 'Task-example6'),
)
def testJsonFormat_forValidTask_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Task)
@parameterized.named_parameters(
('_withTestReportTestReportExample', 'TestReport-testreport-example'),)
def testJsonFormat_forValidTestReport_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.TestReport)
@parameterized.named_parameters(
('_withTestScriptTestScriptExample', 'TestScript-testscript-example'),
('_withTestScriptTestScriptExampleHistory',
'TestScript-testscript-example-history'),
('_withTestScriptTestScriptExampleMultisystem',
'TestScript-testscript-example-multisystem'),
('_withTestScriptTestScriptExampleReadtest',
'TestScript-testscript-example-readtest'),
('_withTestScriptTestScriptExampleRule',
'TestScript-testscript-example-rule'),
('_withTestScriptTestScriptExampleSearch',
'TestScript-testscript-example-search'),
('_withTestScriptTestScriptExampleUpdate',
'TestScript-testscript-example-update'),
)
def testJsonFormat_forValidTestScript_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.TestScript)
@parameterized.named_parameters(
('_withValueSetExampleExpansion', 'ValueSet-example-expansion'),
('_withValueSetExampleExtensional', 'ValueSet-example-extensional'),
('_withValueSetExampleIntensional', 'ValueSet-example-intensional'),
)
def testJsonFormat_forValidValueSet_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.ValueSet)
@parameterized.named_parameters(
('_withVisionPrescription33123', 'VisionPrescription-33123'),
('_withVisionPrescription33124', 'VisionPrescription-33124'),
)
def testJsonFormat_forValidVisionPrescription_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.VisionPrescription)
@parameterized.named_parameters(
('_withCompositionExample', 'Composition-example',
resources_pb2.Composition),
('_withEcounterHome', 'Encounter-home', resources_pb2.Encounter),
('_withObservationExampleGenetics1', 'Observation-example-genetics-1',
resources_pb2.Observation),
('_withPatientExample', 'patient-example', resources_pb2.Patient),
)
def testPrintForAnalytics_forValidResource_succeeds(
self, file_name: str, proto_cls: Type[message.Message]):
json_path = os.path.join(_BIGQUERY_PATH, file_name + '.json')
proto_path = os.path.join(_EXAMPLES_PATH, file_name + '.prototxt')
# Assert print for analytics (standard and "pretty")
self.assert_print_equals_golden(
json_path,
proto_path,
proto_cls,
print_f=json_format.print_fhir_to_json_string_for_analytics)
self.assert_print_equals_golden(
json_path,
proto_path,
proto_cls,
print_f=json_format.pretty_print_fhir_to_json_string_for_analytics)
def assert_parse_and_print_examples_equals_golden(
self, file_name: str, proto_cls: Type[message.Message]):
"""Convenience method for performing assertions on FHIR STU3 examples."""
json_path = os.path.join(_EXAMPLES_PATH, file_name + '.json')
proto_path = os.path.join(_EXAMPLES_PATH, file_name + '.prototxt')
self.assert_parse_and_print_equals_golden(json_path, proto_path, proto_cls)
def assert_parse_and_print_spec_equals_golden(
self, file_name: str, proto_cls: Type[message.Message]):
"""Convenience method for performing assertions on the FHIR STU3 spec."""
json_path = os.path.join(_FHIR_SPEC_PATH, file_name + '.json')
proto_path = os.path.join(_EXAMPLES_PATH, file_name + '.prototxt')
self.assert_parse_and_print_equals_golden(json_path, proto_path, proto_cls)
def assert_parse_and_print_equals_golden(self, json_path: str,
proto_path: str,
proto_cls: Type[message.Message]):
"""Convenience method for performing assertions against goldens."""
# Assert parse
self.assert_parse_equals_golden(
json_path,
proto_path,
proto_cls,
parse_f=json_format.json_fhir_string_to_proto,
validate=True,
default_timezone='Australia/Sydney')
# Assert print (standard and "pretty")
self.assert_print_equals_golden(
json_path,
proto_path,
proto_cls,
print_f=json_format.print_fhir_to_json_string)
self.assert_print_equals_golden(
json_path,
proto_path,
proto_cls,
print_f=json_format.pretty_print_fhir_to_json_string)
if __name__ == '__main__':
absltest.main()
|
py | b40b9caf44e65c4399a635378b1d237530cb79ab | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class TrainingAppConfig(AppConfig):
name = "app.training"
verbose_name = 'Учебные курсы'
def ready(self):
import app.training.signals
|
py | b40b9cfc347036f230d729e21c799f4fac12d371 | import roslib
roslib.load_manifest('lia_avi_writer')
import rospy
from lia_services.srv import RecordingCmd
def test_recording_cmd(cmd):
#rospy.wait_for_service('recording_cmd')
recording_cmd_proxy = rospy.ServiceProxy('recording_cmd',RecordingCmd)
try:
response = recording_cmd_proxy(cmd,'dummy_file.avi', 5.0)
print response
except rospy.ServiceException, e:
print 'Service call failed: %s'%(e,)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
import sys
cmd = sys.argv[1]
test_recording_cmd(cmd)
|
py | b40b9d0204c9e0cfc087d852b1a8050fa4c2c20f | """MultiMatch sequenctial association of DIASources into DIAObjects.
"""
import numpy as np
from collections import defaultdict
import pandas as pd
import lsst.afw.table as afwTable
import lsst.afw.detection as afwDet
import lsst.pex.config as pexConfig
import lsst.pipe.base as pipeBase
import lsst.geom as geom
import lsst.afw.image as afwImage
from .parquetTable import ParquetTable
__all__ = ["MultiMatchAssociationConfig", "MultiMatchAssociationTask"]
def scaleFlux(flux, flux_err, calib, new_calib):
"""Scale flux and error to new zeropoint
"""
mag = calib.instFluxToMagnitude(flux, flux_err)
flux = new_calib.magnitudeToInstFlux(mag.value)
flux_err = flux*0.4*np.log(10)*mag.error
return flux, flux_err
class MultiMatchAssociationConfig(pexConfig.Config):
"""Configuration parameters for the MultiMatchAssociationTask
"""
tolerance = pexConfig.Field(
dtype=float,
doc='maximum distance to match sources together in arcsec',
default=0.5
)
fluxType = pexConfig.Field(
dtype=str,
doc='Keep track of the average flux of this type',
default='base_PsfFlux_instFlux',
)
filters = pexConfig.ListField(
dtype=str,
doc='Which filters will be averaged over',
default=['u', 'g', 'r', 'i', 'z', 'y']
)
commonZp = pexConfig.Field(
dtype=float,
doc='Put all fluxes on common zeropoint',
default=27
)
class MultiMatchAssociationTask(pipeBase.Task):
"""Construct DIAObjects from a list of DIASources
"""
ConfigClass = MultiMatchAssociationConfig
_DefaultName = "MultiMatch_association"
def __init__(self, **kwargs):
pipeBase.Task.__init__(self, **kwargs)
self.multi_matches = None
self.calib = afwImage.makePhotoCalibFromCalibZeroPoint(10**(0.4*self.config.commonZp))
self.calibDict = defaultdict(dict)
def addCatalog(self, src, filter, visit, ccd, calib, footprints):
"""Add objects from a catalog to the existing MultiMatch
@param[in] srcCat An SourceCatalog of objects to be added.
@param[in] filt The filter of the catalog
@param[in] visit The visit number
@param[in] ccd The ccd number
@param[in] footprints A list of footprints that have been transformed to the
WCS of the coadd patch.
"""
if self.multi_matches is None:
# The data id for multiMatch does not take strings so we need to convert filter to a string
self.multi_matches = afwTable.MultiMatch(src.schema, {'visit': np.int32, 'ccd': np.int32,
'filter': np.int32},
radius=geom.Angle(self.config.tolerance/3600.,
geom.degrees))
for s, foot in zip(src, footprints):
s.setFootprint(foot)
self.multi_matches.add(src, {'visit': visit, 'ccd': ccd,
'filter': self.config.filters.index(filter)})
self.calibDict[visit][ccd] = calib
def initialize(self, schema, idFactory):
pass
def finalize(self, idFactory):
"""Finalize construction of the catalog.
Create a SourceCatalog from the MultiMatch object and compute the corresponding
merged footprint.
@param[in] idFactory Used to generate ids.
@return SourceCatalog of DIAObjects
"""
if self.multi_matches is None:
return None
schema = afwTable.SourceTable.makeMinimalSchema()
nobsKey = schema.addField("nobs", type=np.int32, doc='Number of times observed')
keys = {}
for filter in self.config.filters:
flux = self.config.fluxType
keys[f'{flux}_Mean_{filter}'] = schema.addField(f"{flux}_Mean_{filter}", type=float,
doc=f'Mean {flux} in filter {filter}')
keys[f'{flux}_MeanErr_{filter}'] = schema.addField(f"{flux}_MeanErr_{filter}", type=float,
doc=f'MeanErr {flux} in filter {filter}')
keys[f'{flux}_Sigma_{filter}'] = schema.addField(f"{flux}_Sigma_{filter}", type=float,
doc=f'Sigma {flux} in filter {filter}')
keys[f'{flux}_Ndata_{filter}'] = schema.addField(f"{flux}_NData_{filter}", type=np.int32,
doc=f'Number of observations in filter {filter}')
keys[f'{flux}_Chi2_{filter}'] = schema.addField(f"{flux}_Chi2_{filter}", type=float,
doc=f'Chi2 of {flux} for {filter}')
raKey = schema['coord_ra'].asKey()
decKey = schema['coord_dec'].asKey()
table = afwTable.SourceTable.make(schema, idFactory)
cat = afwTable.SourceCatalog(table)
results = self.multi_matches.finish(removeAmbiguous=False)
allMatches = afwTable.GroupView.build(results)
raKey = allMatches.schema.find("coord_ra").key
decKey = allMatches.schema.find("coord_dec").key
ave_ra = allMatches.aggregate(np.mean, field=raKey)
ave_dec = allMatches.aggregate(np.mean, field=decKey)
# Merge the footprints from the same object together and accumulate
# information
object_ids = np.unique(results['object'])
footprints = []
all_fluxes = []
all_flux_errs = []
num_nobs = []
self.diaSrcIds = []
self.diaObjectIds = []
for id in object_ids:
mask = results['object'] == id
num_nobs.append(np.sum(mask))
footprint = None
src_ids = []
fluxes = defaultdict(list)
flux_errs = defaultdict(list)
for rec in results[mask]:
if footprint is None:
footprint = rec.getFootprint()
else:
footprint = afwDet.mergeFootprints(footprint, rec.getFootprint())
src_ids.append(rec.get('id'))
flux = rec.get(self.config.fluxType)
if np.isfinite(flux) is False:
continue
filter = self.config.filters[rec.get('filter')]
calib = self.calibDict[rec.get('visit')][rec.get('ccd')]
flux_err = rec.get(self.config.fluxType + "Err")
new_val, new_val_err = scaleFlux(flux, flux_err, calib, self.calib)
fluxes[filter].append(new_val)
flux_errs[filter].append(new_val_err)
self.diaSrcIds.append(src_ids)
footprints.append(footprint)
all_fluxes.append(fluxes)
all_flux_errs.append(flux_errs)
for i in range(len(ave_ra)):
rec = cat.addNew()
self.diaObjectIds.append(rec.get('id'))
rec.setFootprint(footprints[i])
rec.set(raKey, ave_ra[i]*geom.radians)
rec.set(decKey, ave_dec[i]*geom.radians)
rec.set(nobsKey, num_nobs[i])
for filter in self.config.filters:
fluxes = np.array(all_fluxes[i][filter])
if len(fluxes) == 0:
continue
flux_errs = np.array(all_flux_errs[i][filter])
flux = self.config.fluxType
rec.set(keys[f'{flux}_Mean_{filter}'], np.mean(fluxes))
rec.set(keys[f'{flux}_Sigma_{filter}'], np.std(fluxes, ddof=1))
rec.set(keys[f'{flux}_Ndata_{filter}'], len(fluxes))
rec.set(keys[f'{flux}_MeanErr_{filter}'],
rec.get(f'{flux}_Sigma_{filter}')/np.sqrt(len(fluxes)))
residuals = fluxes - rec.get(keys[f'{flux}_Mean_{filter}'])
rec.set(keys[f'{flux}_Chi2_{filter}'], np.sum((residuals/flux_errs)**2))
return cat
def getObjectIds(self):
"""Get a list of id's corresponding to the objects in this catalog
@return pandas DataFrame of matching diaObject ids to diaSrc ids
"""
data = {}
data['diaObjectId'] = self.diaObjectIds
data['diaSrcIds'] = self.diaSrcIds
df = pd.DataFrame(data)
table = ParquetTable(dataFrame=df)
return table
|
py | b40b9d2cac170fdaa89450abb71a69ea8bb7fc18 | import sys
import click
import requests
URL_BLOCKED_IPS = 'https://api.reserve-rbl.ru/api/v2/ips/json'
def get_ips_only_list(url: str) -> dict:
res = requests.get(url=url)
if res.status_code == 200:
return res.json()
return None
def validate_ipaddr(ip: str, ipadds_list: list):
"""Если переданный IP не глобальный - проверка провалена"""
if ip in ipadds_list:
return True
return False
@click.command()
@click.option('--ipaddr', required=True, help='IP address for check in RKN')
def check_ipaddr(ipaddr: str):
ips_list = get_ips_only_list(URL_BLOCKED_IPS)
if ips_list:
if validate_ipaddr(ip=ipaddr, ipadds_list=ips_list):
sys.stdout.write(str({
'status': 'BLOCKED',
'ipaddr': ipaddr,
}))
else:
sys.stdout.write(str({
'status': 'FREE',
'ipaddr': ipaddr,
}))
@click.group()
def cli():
pass
cli.add_command(check_ipaddr)
if __name__ == '__main__':
cli()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.