ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4ed0db3cf2dda589475008254ecc5f2a723327 | from __future__ import unicode_literals
from django.apps import AppConfig
class DjangoRestHipchatConfig(AppConfig):
name = 'django_rest_hipchat'
|
py | 1a4ed13b6233dec3e63ea6f86db16fc882a84ef6 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# import django
# sys.path.insert(0, os.path.abspath('..'))
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# django.setup()
# -- Project information -----------------------------------------------------
project = "tweetme"
copyright = """2020, DEGNON Tobi"""
author = "DEGNON Tobi"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help core. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
|
py | 1a4ed1de9f0af44b8f43128aaec987710baebac5 | import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter('ignore')
import numpy as np
from fuzzywuzzy import fuzz
import json
import tensorflow as tf
from collections import Counter
from ._utils._utils import load_graph, check_file
from .num2word import to_cardinal
from .texts._text_functions import (
normalizer_textcleaning,
stemmer_str_idx,
pad_sentence_batch,
)
from .texts._tatabahasa import (
rules_normalizer,
consonants,
vowels,
sounds,
GO,
PAD,
EOS,
UNK,
)
from .spell import _return_possible, _edit_normalizer, _return_known
from .topic_influencer import is_location
from ._utils._paths import MALAY_TEXT, PATH_NORMALIZER, S3_PATH_NORMALIZER
class _DEEP_NORMALIZER:
def __init__(self, x, logits, sess, dicts):
self._sess = sess
self._x = x
self._logits = logits
self._dicts = dicts
self._dicts['rev_dictionary_to'] = {
int(k): v for k, v in self._dicts['rev_dictionary_to'].items()
}
def normalize(self, string):
"""
Normalize a string.
Parameters
----------
string : str
Returns
-------
string: normalized string
"""
assert isinstance(string, str), 'input must be a string'
token_strings = normalizer_textcleaning(string).split()
idx = stemmer_str_idx(token_strings, self._dicts['dictionary_from'])
predicted = self._sess.run(
self._logits, feed_dict = {self._x: pad_sentence_batch(idx, PAD)[0]}
)
results = []
for word in predicted:
results.append(
''.join(
[
self._dicts['rev_dictionary_to'][c]
for c in word
if c not in [GO, PAD, EOS, UNK]
]
)
)
return ' '.join(results)
class _SPELL_NORMALIZE:
def __init__(self, corpus):
self.corpus = Counter(corpus)
def normalize(self, string, debug = True):
"""
Normalize a string
Parameters
----------
string : str
debug : bool, optional (default=True)
If true, it will print character similarity distances.
Returns
-------
string: normalized string
"""
assert isinstance(string, str), 'input must be a string'
result = []
for word in normalizer_textcleaning(string).split():
if word.istitle():
result.append(word)
continue
if word[0] == 'x' and len(word) > 1:
result_string = 'tak '
word = word[1:]
else:
result_string = ''
if word[-2:] == 'la':
end_result_string = ' lah'
word = word[:-2]
elif word[-3:] == 'lah':
end_result_string = ' lah'
word = word[:-3]
else:
end_result_string = ''
if word in sounds:
result.append(result_string + sounds[word] + end_result_string)
continue
if word in rules_normalizer:
result.append(
result_string + rules_normalizer[word] + end_result_string
)
continue
if word in self.corpus:
result.append(result_string + word + end_result_string)
continue
candidates = (
_return_known([word], self.corpus)
or _return_known(_edit_normalizer(word), self.corpus)
or _return_possible(word, self.corpus, _edit_normalizer)
or [word]
)
candidates = list(candidates)
candidates = [
(candidate, is_location(candidate))
for candidate in list(candidates)
]
if debug:
print([(k, fuzz.ratio(string, k[0])) for k in candidates], '\n')
strings = [fuzz.ratio(string, k[0]) for k in candidates]
descending_sort = np.argsort(strings)[::-1]
selected = None
for index in descending_sort:
if not candidates[index][1]:
selected = candidates[index][0]
break
selected = (
candidates[descending_sort[0]][0] if not selected else selected
)
result.append(result_string + selected + end_result_string)
return ' '.join(result)
class _FUZZY_NORMALIZE:
def __init__(self, normalized, corpus):
self.normalized = normalized
self.corpus = corpus
def normalize(self, string):
"""
Normalize a string.
Parameters
----------
string : str
Returns
-------
string: normalized string
"""
assert isinstance(string, str), 'input must be a string'
result = []
for word in normalizer_textcleaning(string).split():
if word.istitle():
result.append(word)
continue
if word[0] == 'x' and len(word) > 1:
result_string = 'tak '
word = word[1:]
else:
result_string = ''
if word[-2:] == 'la':
end_result_string = ' lah'
word = word[:-2]
elif word[-3:] == 'lah':
end_result_string = ' lah'
word = word[:-3]
else:
end_result_string = ''
if word in sounds:
result.append(result_string + sounds[word] + end_result_string)
continue
if word in rules_normalizer:
result.append(
result_string + rules_normalizer[word] + end_result_string
)
continue
if word in self.corpus:
result.append(result_string + word + end_result_string)
continue
results = []
for i in range(len(self.normalized)):
results.append(
np.mean([fuzz.ratio(word, k) for k in self.normalized[i]])
)
if len(np.where(np.array(results) > 70)[0]) < 1:
result.append(result_string + word + end_result_string)
continue
result.append(
result_string
+ self.corpus[np.argmax(results)]
+ end_result_string
)
return ' '.join(result)
def fuzzy(corpus):
"""
Train a fuzzy logic Normalizer
Parameters
----------
corpus : list of strings. Prefer to feed with malaya.load_malay_dictionary()
Returns
-------
FUZZY_NORMALIZE: Trained malaya.normalizer._FUZZY_NORMALIZE class
"""
assert isinstance(corpus, list) and isinstance(
corpus[0], str
), 'input must be list of strings'
transform = []
for i in corpus:
i = i.lower()
result = []
result.append(i)
result.append(''.join(char for char in i if char not in vowels))
if i[0] in consonants and i[-1] in consonants:
result.append(i[0] + i[-1])
if i[-1] == 'a':
result.append(i[:-1] + 'e')
result.append(i + 'k')
if i[1] in vowels and i[0] in consonants:
result.append(i[0] + i[2:])
if i[-2] in vowels and i[-1] in consonants:
result.append(i[:-2] + i[-1])
result.append(i[0] + i[-1])
if i[-2:] == 'ar':
result.append(i[:-2] + 'o')
if i[:2] == 'ha':
result.append(i[1:])
transform.append(list(set(result)))
return _FUZZY_NORMALIZE(transform, corpus)
def spell(corpus):
"""
Train a Spelling Normalizer
Parameters
----------
corpus : list of strings. Prefer to feed with malaya.load_malay_dictionary()
Returns
-------
SPELL_NORMALIZE: Trained malaya.normalizer._SPELL_NORMALIZE class
"""
assert isinstance(corpus, list) and isinstance(
corpus[0], str
), 'input must be list of strings'
return _SPELL_NORMALIZE(corpus)
def basic(string):
"""
Use basic rules-based to normalize a string.
Parameters
----------
string: str
Returns
-------
string: normalized string
"""
assert isinstance(string, str), 'input must be a string'
result = []
for word in normalizer_textcleaning(string).split():
if word.istitle():
result.append(word)
continue
if word in sounds:
result.append(sounds[word])
elif word[-1] == '2':
result.append(word[:-1])
else:
result.append(word)
return ' '.join(result)
def deep_model():
"""
Load deep-learning model to normalize a string. This model totally more sucks than fuzzy based, Husein still need to read more.
Returns
-------
DEEP_NORMALIZER: malaya.normalizer._DEEP_NORMALIZER class
"""
check_file(PATH_NORMALIZER['deep'], S3_PATH_NORMALIZER['deep'])
try:
with open(PATH_NORMALIZER['deep']['setting'], 'r') as fopen:
dic_normalizer = json.load(fopen)
g = load_graph(PATH_NORMALIZER['deep']['model'])
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('normalizer') and try again"
)
return _DEEP_NORMALIZER(
g.get_tensor_by_name('import/Placeholder:0'),
g.get_tensor_by_name('import/logits:0'),
tf.InteractiveSession(graph = g),
dic_normalizer,
)
|
py | 1a4ed26a600389e2f935507c614c690b2334b513 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import Axon
from Axon.AdaptiveCommsComponent import AdaptiveCommsComponent
from Axon.Ipc import shutdownMicroprocess, producerFinished
class undef(object):
pass
class DemuxRemuxTuple(AdaptiveCommsComponent):
"""\
#
# FIXME: derived from the PAR component.
# FIXME: This should really be a PAR component with a new policy.
# FIXME: For the moment we'll leave it like this to see how this plays out.
#
PAR(inputpolicy=None, outputpolicy=None, *components) -> new PAR component
Activates all the components contained inside in parallel (Hence the name - from Occam).
Inputs to inboxes can be controlled by passing in a policy. The default
policy is this::
messages to "control" are forwarded to all children
if a control is a shutdownMicroprocess, shutdown
when all children exit, exit.
messages to "inbox" are forwarded to all components by default.
See the module docs on writing a policy function.
Outputs from all outboxes are sent to the graphline's corresponding
outbox. At present supported outboxes replicated are: "outbox", and
"signal".
For more complex wiring/policies you probably ought to use a Graphline
component.
Keyword arguments:
- policy -- policy function regarding input mapping.
- components -- list of components to be activated.
"""
Inboxes = {"inbox":"",
"control":""}
Outboxes = {"outbox":"",
"signal":"",
"_co": "For passing data to subcomponents based on a policy (unusued at present)",
"_cs": "For signaling to subcomponents shutdown",
}
policy = None
def __init__(self, *components, **argv):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(DemuxRemuxTuple,self).__init__(**argv)
self.components = list(components)
def main(self):
"""Main loop."""
link_to_component_control = {}
noControlPassthru=True
noSignalPassthru=True
subcomponent_inboxes = {}
subcomponent_controlboxes = {}
i = 0
for c in self.components:
subcomponent_inboxes[i] = self.addInbox("_subinbox_")
subcomponent_controlboxes[i] = self.addInbox("_subcontrol_")
self.link( (c, "outbox"), (self, subcomponent_inboxes[i]) )
self.link( (c, "signal"), (self, subcomponent_controlboxes[i]))
i += 1
c.activate()
self.addChildren(*self.components)
yield 1
shutdown = False
shutdownMessage = None
box_values = dict( (x,undef) for x in subcomponent_inboxes)
while not shutdown:
# If all the children exit, then exit
if self.childrenDone():
shutdown = True
break
# If we reach here there may be data in an inbox.
# May, because a child terminating wakes us up as well.
if self.policy == None:
# Default policy: discard all messages sent to the main inbox
for msg in self.Inbox("inbox"):
i = 0
for c in self.components:
L = self.link( (self, "_co"), (c, "inbox"))
self.send( msg[i], "_co")
self.unlink(thelinkage=L)
i += 1
# Default policy, pass on all control messages to all sub components
# Shutdown the PAR component if the message is a shutdownMicroprocess message
for msg in self.Inbox("control"):
for c in self.components:
L = self.link( (self, "_cs"), (c, "control"))
self.send( msg, "_cs")
self.unlink(thelinkage=L)
if isinstance(msg, shutdownMicroprocess) or (msg==shutdownMicroprocess):
shutdown = True
shutdownMessage = msg
for boxkey in box_values:
if box_values[boxkey] is undef:
if self.dataReady(subcomponent_inboxes[boxkey]):
message = self.recv(subcomponent_inboxes[boxkey])
box_values[boxkey] = message
if len([x for x in box_values if box_values[x] is undef]) == 0:
self.send( tuple([ box_values[x] for x in box_values ]), "outbox")
box_values = dict( (x,undef) for x in subcomponent_inboxes)
for component_name in subcomponent_controlboxes:
if self.dataReady(subcomponent_controlboxes[component_name]):
message = self.recv(subcomponent_controlboxes[component_name])
self.send(message, "signal")
# If there's nothing to do, then sleep
while not self.anyReady() and not shutdown and not self.childrenDone():
self.pause()
yield 1
yield 1
for boxkey in box_values:
if box_values[boxkey] is undef:
if self.dataReady(subcomponent_inboxes[boxkey]):
message = self.recv(subcomponent_inboxes[boxkey])
box_values[boxkey] = message
if len([x for x in box_values if box_values[x] is undef]) == 0:
self.send( tuple([ box_values[x] for x in box_values ]), "outbox")
box_values = dict( (x,undef) for x in subcomponent_inboxes)
if shutdownMessage:
self.send(shutdownMessage, "signal")
else:
self.send(producerFinished(), "signal")
for child in self.childComponents():
self.removeChild(child) # deregisters linkages for
def childrenDone(self):
"""Unplugs any children that have terminated, and returns true if there are no
running child components left (ie. their microproceses have finished)
"""
for child in self.childComponents():
if child._isStopped():
self.removeChild(child) # deregisters linkages for us
return 0==len(self.childComponents())
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.DataSource import DataSource
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Util.PureTransformer import PureTransformer
Pipeline(
DataSource([
(1,"one"),
(2,"two"),
(3,"three"),
(4,"four"),
(5,"five"),
(6,"six"),
]),
DemuxRemuxTuple( # Detuple
PureTransformer(lambda x: x*x), # Process First item from tuple
PureTransformer(lambda x: x+" "+x), # Process Second item from tuple
), # Retuple
PureTransformer(lambda x: repr(x)+"\n"),
ConsoleEchoer(),
).run()
|
py | 1a4ed35bb7eab41085b23885a2410a3e81c9f86b | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://logging.googleapis.com/v2/'
DOCS_URL = 'https://cloud.google.com/logging/docs/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
BILLINGACCOUNTS = (
'billingAccounts',
'billingAccounts/{billingAccountsId}',
{},
[u'billingAccountsId'],
True
)
BILLINGACCOUNTS_BUCKETS = (
'billingAccounts.buckets',
'{+name}',
{
'':
'billingAccounts/{billingAccountsId}/buckets/{bucketsId}',
},
[u'name'],
True
)
BILLINGACCOUNTS_EXCLUSIONS = (
'billingAccounts.exclusions',
'{+name}',
{
'':
'billingAccounts/{billingAccountsId}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
BILLINGACCOUNTS_SINKS = (
'billingAccounts.sinks',
'{+sinkName}',
{
'':
'billingAccounts/{billingAccountsId}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
BUCKETS = (
'buckets',
'{+name}',
{
'':
'{v2Id}/{v2Id1}/buckets/{bucketsId}',
},
[u'name'],
True
)
EXCLUSIONS = (
'exclusions',
'{+name}',
{
'':
'{v2Id}/{v2Id1}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
FOLDERS = (
'folders',
'folders/{foldersId}',
{},
[u'foldersId'],
True
)
FOLDERS_EXCLUSIONS = (
'folders.exclusions',
'{+name}',
{
'':
'folders/{foldersId}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
FOLDERS_LOCATIONS = (
'folders.locations',
'folders/{foldersId}/locations/{locationsId}',
{},
[u'foldersId', u'locationsId'],
True
)
FOLDERS_LOCATIONS_BUCKETS = (
'folders.locations.buckets',
'{+name}',
{
'':
'folders/{foldersId}/locations/{locationsId}/buckets/'
'{bucketsId}',
},
[u'name'],
True
)
FOLDERS_SINKS = (
'folders.sinks',
'{+sinkName}',
{
'':
'folders/{foldersId}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
ORGANIZATIONS = (
'organizations',
'organizations/{organizationsId}',
{},
[u'organizationsId'],
True
)
ORGANIZATIONS_EXCLUSIONS = (
'organizations.exclusions',
'{+name}',
{
'':
'organizations/{organizationsId}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
ORGANIZATIONS_LOCATIONS = (
'organizations.locations',
'organizations/{organizationsId}/locations/{locationsId}',
{},
[u'organizationsId', u'locationsId'],
True
)
ORGANIZATIONS_LOCATIONS_BUCKETS = (
'organizations.locations.buckets',
'{+name}',
{
'':
'organizations/{organizationsId}/locations/{locationsId}/'
'buckets/{bucketsId}',
},
[u'name'],
True
)
ORGANIZATIONS_SINKS = (
'organizations.sinks',
'{+sinkName}',
{
'':
'organizations/{organizationsId}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
[u'projectsId'],
True
)
PROJECTS_EXCLUSIONS = (
'projects.exclusions',
'{+name}',
{
'':
'projects/{projectsId}/exclusions/{exclusionsId}',
},
[u'name'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'projects/{projectsId}/locations/{locationsId}',
{},
[u'projectsId', u'locationsId'],
True
)
PROJECTS_LOCATIONS_BUCKETS = (
'projects.locations.buckets',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/buckets/'
'{bucketsId}',
},
[u'name'],
True
)
PROJECTS_METRICS = (
'projects.metrics',
'{+metricName}',
{
'':
'projects/{projectsId}/metrics/{metricsId}',
},
[u'metricName'],
True
)
PROJECTS_SINKS = (
'projects.sinks',
'{+sinkName}',
{
'':
'projects/{projectsId}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
SINKS = (
'sinks',
'{+sinkName}',
{
'':
'{v2Id}/{v2Id1}/sinks/{sinksId}',
},
[u'sinkName'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
py | 1a4ed63d855ec2c8c71c57d4fef3d76d34e5e22b | # ========================================================================
#
# Imports
#
# ========================================================================
import numpy as np
# ========================================================================
#
# Function definitions
#
# ========================================================================
# ========================================================================
def max_wave_speed(u):
"""Returns the maximum wave speed for advection"""
return 1
# ========================================================================
def riemann_upwinding(ul, ur):
"""Returns the interface flux for the advection equation (simple upwinding)"""
return ul
# ========================================================================
def interior_flux(ug):
"""Returns the interior flux for the advection equation"""
return ug
# ========================================================================
def sensing(sensors, thresholds, solution):
"""A simple sensor which just calculates the difference between the
left/right cell solutions for the advection equation.
"""
# left/right solution
ul = solution.u[0, :-solution.N_F]
ur = solution.u[0, solution.N_F:]
# Calculate the sensor
phi = np.fabs(ur - ul)
PHI = 2 * phi / ((1 + phi) * (1 + phi))
# Find where the sensor exceeds the threshold value
idx = np.array(np.where(PHI > thresholds[0]))
sensors[idx] = 1
sensors[idx + 1] = 1
|
py | 1a4ed729a48da3f98064a542254c4fb4a4f76dc4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Matching functions'''
import numpy as np
import numba
from .exceptions import ParameterError
from .utils import valid_intervals
__all__ = ['match_intervals', 'match_events']
@numba.jit(nopython=True, cache=True)
def __jaccard(int_a, int_b): # pragma: no cover
'''Jaccard similarity between two intervals
Parameters
----------
int_a, int_b : np.ndarrays, shape=(2,)
Returns
-------
Jaccard similarity between intervals
'''
ends = [int_a[1], int_b[1]]
if ends[1] < ends[0]:
ends.reverse()
starts = [int_a[0], int_b[0]]
if starts[1] < starts[0]:
starts.reverse()
intersection = ends[0] - starts[1]
if intersection < 0:
intersection = 0.
union = ends[1] - starts[0]
if union > 0:
return intersection / union
return 0.0
@numba.jit(nopython=True, cache=True)
def __match_interval_overlaps(query, intervals_to, candidates): # pragma: no cover
'''Find the best Jaccard match from query to candidates'''
best_score = -1
best_idx = -1
for idx in candidates:
score = __jaccard(query, intervals_to[idx])
if score > best_score:
best_score, best_idx = score, idx
return best_idx
@numba.jit(nopython=True, cache=True)
def __match_intervals(intervals_from, intervals_to, strict=True): # pragma: no cover
'''Numba-accelerated interval matching algorithm.
'''
# sort index of the interval starts
start_index = np.argsort(intervals_to[:, 0])
# sort index of the interval ends
end_index = np.argsort(intervals_to[:, 1])
# and sorted values of starts
start_sorted = intervals_to[start_index, 0]
# and ends
end_sorted = intervals_to[end_index, 1]
search_ends = np.searchsorted(start_sorted, intervals_from[:, 1], side='right')
search_starts = np.searchsorted(end_sorted, intervals_from[:, 0], side='left')
output = np.empty(len(intervals_from), dtype=numba.uint32)
for i in range(len(intervals_from)):
query = intervals_from[i]
# Find the intervals that start after our query ends
after_query = search_ends[i]
# And the intervals that end after our query begins
before_query = search_starts[i]
# Candidates for overlapping have to (end after we start) and (begin before we end)
candidates = set(start_index[:after_query]) & set(end_index[before_query:])
# Proceed as before
if len(candidates) > 0:
output[i] = __match_interval_overlaps(query, intervals_to, candidates)
elif strict:
# Numba only lets us use compile-time constants in exception messages
raise ParameterError
else:
# Find the closest interval
# (start_index[after_query] - query[1]) is the distance to the next interval
# (query[0] - end_index[before_query])
dist_before = np.inf
dist_after = np.inf
if search_starts[i] > 0:
dist_before = query[0] - end_sorted[search_starts[i]-1]
if search_ends[i] + 1 < len(intervals_to):
dist_after = start_sorted[search_ends[i]+1] - query[1]
if dist_before < dist_after:
output[i] = end_index[search_starts[i]-1]
else:
output[i] = start_index[search_ends[i]+1]
return output
def match_intervals(intervals_from, intervals_to, strict=True):
'''Match one set of time intervals to another.
This can be useful for tasks such as mapping beat timings
to segments.
Each element `[a, b]` of `intervals_from` is matched to the
element `[c, d]` of `intervals_to` which maximizes the
Jaccard similarity between the intervals:
`max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|`
In `strict=True` mode, if there is no interval with positive
intersection with `[a,b]`, an exception is thrown.
In `strict=False` mode, any interval `[a, b]` that has no
intersection with any element of `intervals_to` is instead
matched to the interval `[c, d]` which minimizes
`min(|b - c|, |a - d|)`
that is, the disjoint interval `[c, d]` with a boundary closest
to `[a, b]`.
.. note:: An element of `intervals_to` may be matched to multiple
entries of `intervals_from`.
Parameters
----------
intervals_from : np.ndarray [shape=(n, 2)]
The time range for source intervals.
The `i` th interval spans time `intervals_from[i, 0]`
to `intervals_from[i, 1]`.
`intervals_from[0, 0]` should be 0, `intervals_from[-1, 1]`
should be the track duration.
intervals_to : np.ndarray [shape=(m, 2)]
Analogous to `intervals_from`.
strict : bool
If `True`, intervals can only match if they intersect.
If `False`, disjoint intervals can match.
Returns
-------
interval_mapping : np.ndarray [shape=(n,)]
For each interval in `intervals_from`, the
corresponding interval in `intervals_to`.
See Also
--------
match_events
Raises
------
ParameterError
If either array of input intervals is not the correct shape
If `strict=True` and some element of `intervals_from` is disjoint from
every element of `intervals_to`.
Examples
--------
>>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])
>>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])
>>> librosa.util.match_intervals(ints_from, ints_to)
array([2, 1, 2], dtype=uint32)
>>> # [3, 5] => [4, 5] (ints_to[2])
>>> # [1, 4] => [1, 3] (ints_to[1])
>>> # [4, 5] => [4, 5] (ints_to[2])
The reverse matching of the above is not possible in `strict` mode
because `[6, 7]` is disjoint from all intervals in `ints_from`.
With `strict=False`, we get the following:
>>> librosa.util.match_intervals(ints_to, ints_from, strict=False)
array([1, 1, 2, 2], dtype=uint32)
>>> # [0, 2] => [1, 4] (ints_from[1])
>>> # [1, 3] => [1, 4] (ints_from[1])
>>> # [4, 5] => [4, 5] (ints_from[2])
>>> # [6, 7] => [4, 5] (ints_from[2])
'''
if len(intervals_from) == 0 or len(intervals_to) == 0:
raise ParameterError('Attempting to match empty interval list')
# Verify that the input intervals has correct shape and size
valid_intervals(intervals_from)
valid_intervals(intervals_to)
try:
return __match_intervals(intervals_from, intervals_to, strict=strict)
except ParameterError as exc:
raise ParameterError('Unable to match intervals with strict={}'.format(strict)) from exc
def match_events(events_from, events_to, left=True, right=True):
'''Match one set of events to another.
This is useful for tasks such as matching beats to the nearest
detected onsets, or frame-aligned events to the nearest zero-crossing.
.. note:: A target event may be matched to multiple source events.
Examples
--------
>>> # Sources are multiples of 7
>>> s_from = np.arange(0, 100, 7)
>>> s_from
array([ 0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91,
98])
>>> # Targets are multiples of 10
>>> s_to = np.arange(0, 100, 10)
>>> s_to
array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
>>> # Find the matching
>>> idx = librosa.util.match_events(s_from, s_to)
>>> idx
array([0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9])
>>> # Print each source value to its matching target
>>> zip(s_from, s_to[idx])
[(0, 0), (7, 10), (14, 10), (21, 20), (28, 30), (35, 30),
(42, 40), (49, 50), (56, 60), (63, 60), (70, 70), (77, 80),
(84, 80), (91, 90), (98, 90)]
Parameters
----------
events_from : ndarray [shape=(n,)]
Array of events (eg, times, sample or frame indices) to match from.
events_to : ndarray [shape=(m,)]
Array of events (eg, times, sample or frame indices) to
match against.
left : bool
right : bool
If `False`, then matched events cannot be to the left (or right)
of source events.
Returns
-------
event_mapping : np.ndarray [shape=(n,)]
For each event in `events_from`, the corresponding event
index in `events_to`.
`event_mapping[i] == arg min |events_from[i] - events_to[:]|`
See Also
--------
match_intervals
Raises
------
ParameterError
If either array of input events is not the correct shape
'''
if len(events_from) == 0 or len(events_to) == 0:
raise ParameterError('Attempting to match empty event list')
# If we can't match left or right, then only strict equivalence
# counts as a match.
if not (left or right) and not np.all(np.in1d(events_from, events_to)):
raise ParameterError('Cannot match events with left=right=False '
'and events_from is not contained '
'in events_to')
# If we can't match to the left, then there should be at least one
# target event greater-equal to every source event
if (not left) and max(events_to) < max(events_from):
raise ParameterError('Cannot match events with left=False '
'and max(events_to) < max(events_from)')
# If we can't match to the right, then there should be at least one
# target event less-equal to every source event
if (not right) and min(events_to) > min(events_from):
raise ParameterError('Cannot match events with right=False '
'and min(events_to) > min(events_from)')
# array of matched items
output = np.empty_like(events_from, dtype=np.int)
return __match_events_helper(output, events_from, events_to, left, right)
@numba.jit(nopython=True, cache=True)
def __match_events_helper(output, events_from, events_to, left=True, right=True): # pragma: no cover
# mock dictionary for events
from_idx = np.argsort(events_from)
sorted_from = events_from[from_idx]
to_idx = np.argsort(events_to)
sorted_to = events_to[to_idx]
# find the matching indices
matching_indices = np.searchsorted(sorted_to, sorted_from)
# iterate over indices in matching_indices
for ind, middle_ind in enumerate(matching_indices):
left_flag = False
right_flag = False
left_ind = -1
right_ind = len(matching_indices)
left_diff = 0
right_diff = 0
mid_diff = 0
middle_ind = matching_indices[ind]
sorted_from_num = sorted_from[ind]
# Prevent oob from chosen index
if middle_ind == len(sorted_to):
middle_ind -= 1
# Permitted to look to the left
if left and middle_ind > 0:
left_ind = middle_ind - 1
left_flag = True
# Permitted to look to right
if right and middle_ind < len(sorted_to) - 1:
right_ind = middle_ind + 1
right_flag = True
mid_diff = abs(sorted_to[middle_ind] - sorted_from_num)
if left and left_flag:
left_diff = abs(sorted_to[left_ind] - sorted_from_num)
if right and right_flag:
right_diff = abs(sorted_to[right_ind] - sorted_from_num)
if left_flag and (not right and (sorted_to[middle_ind] > sorted_from_num) or
(not right_flag and left_diff < mid_diff) or
(left_diff < right_diff and left_diff < mid_diff)):
output[ind] = to_idx[left_ind]
# Check if right should be chosen
elif right_flag and (right_diff < mid_diff):
output[ind] = to_idx[right_ind]
# Selected index wins
else:
output[ind] = to_idx[middle_ind]
# Undo sorting
solutions = np.empty_like(output)
solutions[from_idx] = output
return solutions
|
py | 1a4ed79045312329d9387b4dfb0873559da4e216 | import sys
if sys.version_info < (2, 7): # noqa
import unittest2 as unittest
else:
import unittest
from okonomiyaki.errors import OkonomiyakiError
from okonomiyaki.platforms import EPDPlatform
from okonomiyaki.versions import RuntimeVersion
from ..setuptools_egg import SetuptoolsEggMetadata, parse_filename
from .common import (
PIP_SETUPTOOLS_EGG, TRAITS_SETUPTOOLS_EGG, TRAITS_SETUPTOOLS_OSX_cp38_EGG,
TRAITS_SETUPTOOLS_WIN_cp38_EGG, TRAITS_SETUPTOOLS_LINUX_cp38_EGG)
class TestParseFilename(unittest.TestCase):
def test_simple(self):
# Given
path = "nose-1.2.1-py2.6.egg"
# When
name, version, pyver, platform = parse_filename(path)
# Then
self.assertEqual(name, "nose")
self.assertEqual(version, "1.2.1")
self.assertEqual(pyver, "2.6")
self.assertIsNone(platform)
def test_simple_with_extension_osx(self):
# Given
path = "dc_analysis-1.0-py2.7-macosx-10.6-x86_64.egg"
# When
name, version, pyver, platform = parse_filename(path)
# Then
self.assertEqual(name, "dc_analysis")
self.assertEqual(version, "1.0")
self.assertEqual(pyver, "2.7")
self.assertEqual(platform, "macosx-10.6-x86_64")
def test_simple_with_extension(self):
# Given
path = "numpy-1.9.1-py2.6-win-amd64.egg"
# When
name, version, pyver, platform = parse_filename(path)
# Then
self.assertEqual(name, "numpy")
self.assertEqual(version, "1.9.1")
self.assertEqual(pyver, "2.6")
self.assertEqual(platform, "win-amd64")
def test_enthought_egg(self):
# Given
path = "nose-1.2.1-1.egg"
# When/Then
with self.assertRaises(OkonomiyakiError):
parse_filename(path)
class TestSetuptoolsEggMetadata(unittest.TestCase):
def test_simple(self):
# Given
path = PIP_SETUPTOOLS_EGG
# When
metadata = SetuptoolsEggMetadata.from_egg(path)
# Then
self.assertEqual(metadata.name, "pip")
self.assertEqual(metadata.version, "7.0.3")
self.assertEqual(metadata.python_tag, "cp34")
self.assertIsNone(metadata.abi_tag)
self.assertIsNone(metadata.platform_tag)
# When
metadata = SetuptoolsEggMetadata.from_egg(path, abi_tag=None)
# Then
self.assertEqual(metadata.name, "pip")
self.assertEqual(metadata.version, "7.0.3")
self.assertEqual(metadata.python_tag, "cp34")
self.assertIsNone(metadata.abi_tag)
self.assertIsNone(metadata.platform_tag)
# Given
platform = EPDPlatform.from_epd_string("win-32")
python_tag = "cp34"
abi_tag = "cp34m"
# When
metadata = SetuptoolsEggMetadata.from_egg(
path, platform, python_tag, abi_tag)
# Then
self.assertEqual(metadata.name, "pip")
self.assertEqual(metadata.version, "7.0.3")
self.assertEqual(metadata.python_tag, "cp34")
self.assertEqual(metadata.abi_tag, "cp34m")
self.assertEqual(metadata.platform_tag, "win32")
def test_platform_specific(self):
# Given
path = TRAITS_SETUPTOOLS_EGG
platform = EPDPlatform.from_epd_string("osx-64")
# When
metadata = SetuptoolsEggMetadata.from_egg(path, platform)
# Then
self.assertEqual(metadata.name, "traits")
self.assertEqual(metadata.version, "4.6.0.dev235")
self.assertEqual(metadata.python_tag, "cp27")
self.assertEqual(metadata.abi_tag, "cp27m")
self.assertEqual(metadata.platform_tag, "macosx_10_6_x86_64")
# When/Then
with self.assertRaises(OkonomiyakiError):
SetuptoolsEggMetadata.from_egg(path)
def test_macos_cp38_egg(self):
# Given
path = TRAITS_SETUPTOOLS_OSX_cp38_EGG
python = RuntimeVersion.from_string('3.8.10')
platform = EPDPlatform.from_epd_string("osx-64", python)
# When
metadata = SetuptoolsEggMetadata.from_egg(path, platform)
# Then
self.assertEqual(metadata.name, "traits")
self.assertEqual(metadata.version, "6.3.0.dev1702")
self.assertEqual(metadata.python_tag, "cp38")
self.assertEqual(metadata.abi_tag, "cp38")
self.assertEqual(metadata.platform_tag, "macosx_10_14_x86_64")
# When/Then
with self.assertRaises(OkonomiyakiError):
SetuptoolsEggMetadata.from_egg(path)
def test_linux_cp38_egg(self):
# Given
path = TRAITS_SETUPTOOLS_LINUX_cp38_EGG
python = RuntimeVersion.from_string('3.8.10')
platform = EPDPlatform.from_epd_string("rh7-64", python)
# When
metadata = SetuptoolsEggMetadata.from_egg(path, platform)
# Then
self.assertEqual(metadata.name, "traits")
self.assertEqual(metadata.version, "6.3.0.dev1702")
self.assertEqual(metadata.python_tag, "cp38")
self.assertEqual(metadata.abi_tag, "cp38")
self.assertEqual(metadata.platform_tag, "linux_x86_64")
# When/Then
with self.assertRaises(OkonomiyakiError):
SetuptoolsEggMetadata.from_egg(path)
def test_windows_cp38_egg(self):
# Given
path = TRAITS_SETUPTOOLS_WIN_cp38_EGG
python = RuntimeVersion.from_string('3.8.10')
platform = EPDPlatform.from_epd_string("win-64", python)
# When
metadata = SetuptoolsEggMetadata.from_egg(path, platform)
# Then
self.assertEqual(metadata.name, "traits")
self.assertEqual(metadata.version, "6.3.0.dev1702")
self.assertEqual(metadata.python_tag, "cp38")
self.assertEqual(metadata.abi_tag, "cp38")
self.assertEqual(metadata.platform_tag, "win_amd64")
# When/Then
with self.assertRaises(OkonomiyakiError):
SetuptoolsEggMetadata.from_egg(path)
|
py | 1a4ed79b379ac5fc511125dad6276a17c649c9b9 | """
The multigrid module provides a framework for solving elliptic
problems. A multigrid object is just a list of grids, from the finest
mesh down (by factors of two) to a single interior zone (each grid has
the same number of guardcells).
The main multigrid class is setup to solve a constant-coefficient
Helmholtz equation::
(alpha - beta L) phi = f
where L is the Laplacian and alpha and beta are constants. If alpha =
0 and beta = -1, then this is the Poisson equation.
We support Dirichlet or Neumann BCs, or a periodic domain.
The general usage is as follows::
a = multigrid.CellCenterMG2d(nx, ny, verbose=1, alpha=alpha, beta=beta)
this creates the multigrid object a, with a finest grid of nx by ny
zones and the default boundary condition types. alpha and beta are
the coefficients of the Helmholtz equation. Setting verbose = 1
causing debugging information to be output, so you can see the
residual errors in each of the V-cycles.
Initialization is done as::
a.init_zeros()
this initializes the solution vector with zeros (this is not necessary
if you just created the multigrid object, but it can be used to reset
the solution between runs on the same object).
Next::
a.init_RHS(zeros((nx, ny), numpy.float64))
this initializes the RHS on the finest grid to 0 (Laplace's equation).
Any RHS can be set by passing through an array of (nx, ny) values here.
Then to solve, you just do::
a.solve(rtol = 1.e-10)
where rtol is the desired tolerance (residual norm / source norm)
to access the final solution, use the get_solution method::
v = a.get_solution()
For convenience, the grid information on the solution level is available as
attributes to the class,
a.ilo, a.ihi, a.jlo, a.jhi are the indices bounding the interior
of the solution array (i.e. excluding the ghost cells).
a.x and a.y are the coordinate arrays
a.dx and a.dy are the grid spacings
"""
from __future__ import print_function
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import mesh.boundary as bnd
import mesh.patch as patch
from util import msg
class CellCenterMG2d(object):
"""
The main multigrid class for cell-centered data.
We require that nx = ny be a power of 2 and dx = dy, for
simplicity
"""
def __init__(self, nx, ny, ng=1,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0,
xl_BC_type="dirichlet", xr_BC_type="dirichlet",
yl_BC_type="dirichlet", yr_BC_type="dirichlet",
xl_BC=None, xr_BC=None,
yl_BC=None, yr_BC=None,
alpha=0.0, beta=-1.0,
nsmooth=10, nsmooth_bottom=50,
verbose=0,
aux_field=None, aux_bc=None,
true_function=None, vis=0, vis_title=""):
"""
Create the CellCenterMG2d object. Note that this requires a
grid to be a power of 2 in size and square.
Parameters
----------
nx : int
number of cells in x-direction
ny : int
number of cells in y-direction.
xmin : float, optional
minimum physical coordinate in x-direction
xmax : float, optional
maximum physical coordinate in x-direction
ymin : float, optional
minimum physical coordinate in y-direction
ymax : float, optional
maximum physical coordinate in y-direction
xl_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional
boundary condition to enforce on lower x face
xr_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional
boundary condition to enforce on upper x face
yl_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional
boundary condition to enforce on lower y face
yr_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional
boundary condition to enforce on upper y face
xl_BC : function, optional
function (of y) to call to get -x boundary values
(homogeneous assumed otherwise)
xr_BC : function, optional
function (of y) to call to get +x boundary values
(homogeneous assumed otherwise)
yl_BC : function, optional
function (of x) to call to get -y boundary values
(homogeneous assumed otherwise)
yr_BC : function, optional
function (of x) to call to get +y boundary values
(homogeneous assumed otherwise)
alpha : float, optional
coefficient in Helmholtz equation (alpha - beta L) phi = f
beta : float, optional
coefficient in Helmholtz equation (alpha - beta L) phi = f
nsmooth : int, optional
number of smoothing iterations to be done at each intermediate
level in the V-cycle (up and down)
nsmooth_bottom : int, optional
number of smoothing iterations to be done during the bottom
solve
verbose : int, optional
increase verbosity during the solve (for verbose=1)
aux_field : list of str, optional
extra fields to define and carry at each level.
Useful for subclassing.
aux_bc : list of BC objects, optional
the boundary conditions corresponding to the aux fields
true_function : function, optional
a function (of x,y) that provides the exact solution to
the elliptic problem we are solving. This is used only
for visualization purposes
vis : int, optional
output a detailed visualization of every smoothing step
all throughout the V-cycle (if vis=1)
vis_title : string, optional
a descriptive title to write on the visualization plots
Returns
-------
out: CellCenterMG2d object
"""
if nx != ny:
raise ValueError("ERROR: multigrid currently requires nx = ny")
self.nx = nx
self.ny = ny
self.ng = ng
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
if (xmax-xmin) != (ymax-ymin):
raise ValueError("ERROR: multigrid currently requires a square domain")
self.alpha = alpha
self.beta = beta
self.nsmooth = nsmooth
self.nsmooth_bottom = nsmooth_bottom
self.max_cycles = 100
self.verbose = verbose
# for visualization purposes, we can set a function name that
# provides the true solution to our elliptic problem.
if true_function is not None:
self.true_function = true_function
# a small number used in computing the error, so we don't divide by 0
self.small = 1.e-16
# keep track of whether we've initialized the RHS
self.initialized_rhs = 0
# assume that self.nx = 2^(nlevels-1) and that nx = ny
# this defines nlevels such that we end exactly on a 2x2 grid
self.nlevels = int(math.log(self.nx)/math.log(2.0))
# a multigrid object will be a list of grids
self.grids = []
# create the grids. Here, self.grids[0] will be the coarsest
# grid and self.grids[nlevel-1] will be the finest grid
# we store the solution, v, the rhs, f.
# create the boundary condition object
bc = bnd.BC(xlb=xl_BC_type, xrb=xr_BC_type,
ylb=yl_BC_type, yrb=yr_BC_type)
nx_t = ny_t = 2
for i in range(self.nlevels):
# create the grid
my_grid = patch.Grid2d(nx_t, ny_t, ng=self.ng,
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# add a CellCenterData2d object for this level to our list
self.grids.append(patch.CellCenterData2d(my_grid, dtype=np.float64))
# create the phi BC object -- this only applies for the finest
# level. On the coarser levels, phi represents the residual,
# which has homogeneous BCs
bc_p = bnd.BC(xlb=xl_BC_type, xrb=xr_BC_type,
ylb=yl_BC_type, yrb=yr_BC_type,
xl_func=xl_BC, xr_func=xr_BC,
yl_func=yl_BC, yr_func=yr_BC, grid=my_grid)
if i == self.nlevels-1:
self.grids[i].register_var("v", bc_p)
else:
self.grids[i].register_var("v", bc)
self.grids[i].register_var("f", bc)
self.grids[i].register_var("r", bc)
if aux_field is not None:
for f, b in zip(aux_field, aux_bc):
self.grids[i].register_var(f, b)
self.grids[i].create()
if self.verbose:
print(self.grids[i])
nx_t = nx_t*2
ny_t = ny_t*2
# provide coordinate and indexing information for the solution mesh
soln_grid = self.grids[self.nlevels-1].grid
self.ilo = soln_grid.ilo
self.ihi = soln_grid.ihi
self.jlo = soln_grid.jlo
self.jhi = soln_grid.jhi
self.x = soln_grid.x
self.dx = soln_grid.dx
self.x2d = soln_grid.x2d
self.y = soln_grid.y
self.dy = soln_grid.dy # note, dy = dx is assumed
self.y2d = soln_grid.y2d
self.soln_grid = soln_grid
# store the source norm
self.source_norm = 0.0
# after solving, keep track of the number of cycles taken, the
# relative error from the previous cycle, and the residual error
# (normalized to the source norm)
self.num_cycles = 0
self.residual_error = 1.e33
self.relative_error = 1.e33
# keep track of where we are in the V
self.current_cycle = -1
self.current_level = -1
self.up_or_down = ""
# for visualization -- what frame are we outputting?
self.vis = vis
self.vis_title = vis_title
self.frame = 0
# these draw functions are for visualization purposes and are
# not ordinarily used, except for plotting the progression of the
# solution within the V
def _draw_V(self):
""" draw the V-cycle on our optional visualization """
xdown = np.linspace(0.0, 0.5, self.nlevels)
xup = np.linspace(0.5, 1.0, self.nlevels)
ydown = np.linspace(1.0, 0.0, self.nlevels)
yup = np.linspace(0.0, 1.0, self.nlevels)
plt.plot(xdown, ydown, lw=2, color="k")
plt.plot(xup, yup, lw=2, color="k")
plt.scatter(xdown, ydown, marker="o", color="k", s=40)
plt.scatter(xup, yup, marker="o", color="k", s=40)
if self.up_or_down == "down":
plt.scatter(xdown[self.nlevels-self.current_level-1],
ydown[self.nlevels-self.current_level-1],
marker="o", color="r", zorder=100, s=38)
else:
plt.scatter(xup[self.current_level], yup[self.current_level],
marker="o", color="r", zorder=100, s=38)
plt.text(0.7, 0.1, "V-cycle %d" % (self.current_cycle))
plt.axis("off")
def _draw_solution(self):
""" plot the current solution on our optional visualization """
myg = self.grids[self.current_level].grid
v = self.grids[self.current_level].get_var("v")
cm = "viridis"
plt.imshow(np.transpose(v[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1]),
interpolation="nearest", origin="lower",
extent=[self.xmin, self.xmax, self.ymin, self.ymax], cmap=cm)
#plt.xlabel("x")
plt.ylabel("y")
if self.current_level == self.nlevels-1:
plt.title(r"solving $L\phi = f$")
else:
plt.title(r"solving $Le = r$")
formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
cb = plt.colorbar(format=formatter, shrink=0.5)
cb.ax.yaxis.offsetText.set_fontsize("small")
cl = plt.getp(cb.ax, 'ymajorticklabels')
plt.setp(cl, fontsize="small")
def _draw_main_solution(self):
"""
plot the solution at the finest level on our optional
visualization
"""
myg = self.grids[self.nlevels-1].grid
v = self.grids[self.nlevels-1].get_var("v")
cm = "viridis"
plt.imshow(np.transpose(v[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1]),
interpolation="nearest", origin="lower",
extent=[self.xmin, self.xmax, self.ymin, self.ymax], cmap=cm)
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"current fine grid solution")
formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
cb = plt.colorbar(format=formatter, shrink=0.5)
cb.ax.yaxis.offsetText.set_fontsize("small")
cl = plt.getp(cb.ax, 'ymajorticklabels')
plt.setp(cl, fontsize="small")
def _draw_main_error(self):
"""
plot the error with respect to the true solution on our optional
visualization
"""
myg = self.grids[self.nlevels-1].grid
v = self.grids[self.nlevels-1].get_var("v")
e = v - self.true_function(myg.x2d, myg.y2d)
cmap = "viridis"
plt.imshow(np.transpose(e[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1]),
interpolation="nearest", origin="lower",
extent=[self.xmin, self.xmax, self.ymin, self.ymax], cmap=cmap)
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"current fine grid error")
formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)
cb = plt.colorbar(format=formatter, shrink=0.5)
cb.ax.yaxis.offsetText.set_fontsize("small")
cl = plt.getp(cb.ax, 'ymajorticklabels')
plt.setp(cl, fontsize="small")
def grid_info(self, level, indent=0):
"""
Report simple grid information
"""
print("{}level: {}, grid: {} x {}".format(
indent*" ", level, self.grids[level].grid.nx, self.grids[level].grid.ny))
def get_solution(self, grid=None):
"""
Return the solution after doing the MG solve
If a grid object is passed in, then the solution is put on that
grid -- not the passed in grid must have the same dx and dy
Returns
-------
out : ndarray
"""
v = self.grids[self.nlevels-1].get_var("v")
if grid is None:
return v.copy()
else:
myg = self.soln_grid
assert grid.dx == myg.dx and grid.dy == myg.dy
sol = grid.scratch_array()
sol.v(buf=1)[:, :] = v.v(buf=1)
return sol
def get_solution_gradient(self, grid=None):
"""
Return the gradient of the solution after doing the MG solve. The
x- and y-components are returned in separate arrays.
If a grid object is passed in, then the gradient is computed on that
grid. Note: the passed-in grid must have the same dx, dy
Returns
-------
out : ndarray, ndarray
"""
myg = self.soln_grid
if grid is None:
og = self.soln_grid
else:
og = grid
assert og.dx == myg.dx and og.dy == myg.dy
v = self.grids[self.nlevels-1].get_var("v")
gx = og.scratch_array()
gy = og.scratch_array()
gx.v()[:, :] = 0.5*(v.ip(1) - v.ip(-1))/myg.dx
gy.v()[:, :] = 0.5*(v.jp(1) - v.jp(-1))/myg.dy
return gx, gy
def get_solution_object(self):
"""
Return the full solution data object at the finest resolution
after doing the MG solve
Returns
-------
out : CellCenterData2d object
"""
return self.grids[self.nlevels-1]
def init_solution(self, data):
"""
Initialize the solution to the elliptic problem by passing in
a value for all defined zones
Parameters
----------
data : ndarray
An array (of the same size as the finest MG level) with the
values to initialize the solution to the elliptic problem.
"""
v = self.grids[self.nlevels-1].get_var("v")
v[:, :] = data.copy()
def init_zeros(self):
"""
Set the initial solution to zero
"""
v = self.grids[self.nlevels-1].get_var("v")
v[:, :] = 0.0
def init_RHS(self, data):
"""
Initialize the right hand side, f, of the Helmholtz equation
(alpha - beta L) phi = f
Parameters
----------
data : ndarray
An array (of the same size as the finest MG level) with the
values to initialize the solution to the elliptic problem.
"""
f = self.grids[self.nlevels-1].get_var("f")
f[:, :] = data.copy()
# store the source norm
self.source_norm = f.norm()
if self.verbose:
print("Source norm = ", self.source_norm)
self.initialized_rhs = 1
def _compute_residual(self, level):
""" compute the residual and store it in the r variable"""
v = self.grids[level].get_var("v")
f = self.grids[level].get_var("f")
r = self.grids[level].get_var("r")
myg = self.grids[level].grid
# compute the residual
# r = f - alpha phi + beta L phi
r.v()[:, :] = f.v()[:, :] - self.alpha*v.v()[:, :] + \
self.beta*((v.ip(-1) + v.ip(1) - 2*v.v())/myg.dx**2 +
(v.jp(-1) + v.jp(1) - 2*v.v())/myg.dy**2)
def smooth(self, level, nsmooth):
"""
Use red-black Gauss-Seidel iterations to smooth the solution
at a given level. This is used at each stage of the V-cycle
(up and down) in the MG solution, but it can also be called
directly to solve the elliptic problem (although it will take
many more iterations).
Parameters
----------
level : int
The level in the MG hierarchy to smooth the solution
nsmooth : int
The number of r-b Gauss-Seidel smoothing iterations to perform
"""
v = self.grids[level].get_var("v")
f = self.grids[level].get_var("f")
myg = self.grids[level].grid
self.grids[level].fill_BC("v")
xcoeff = self.beta/myg.dx**2
ycoeff = self.beta/myg.dy**2
# do red-black G-S
for i in range(nsmooth):
# do the red black updating in four decoupled groups
#
#
# | | |
# --+-------+-------+--
# | | |
# | 4 | 3 |
# | | |
# --+-------+-------+--
# | | |
# jlo | 1 | 2 |
# | | |
# --+-------+-------+--
# | ilo | |
#
# groups 1 and 3 are done together, then we need to
# fill ghost cells, and then groups 2 and 4
for n, (ix, iy) in enumerate([(0, 0), (1, 1), (1, 0), (0, 1)]):
v.ip_jp(ix, iy, s=2)[:, :] = (f.ip_jp(ix, iy, s=2) +
xcoeff*(v.ip_jp(1+ix, iy, s=2) + v.ip_jp(-1+ix, iy, s=2)) +
ycoeff*(v.ip_jp(ix, 1+iy, s=2) + v.ip_jp(ix, -1+iy, s=2))) / \
(self.alpha + 2.0*xcoeff + 2.0*ycoeff)
if n == 1 or n == 3:
self.grids[level].fill_BC("v")
if self.vis == 1:
plt.clf()
plt.subplot(221)
self._draw_solution()
plt.subplot(222)
self._draw_V()
plt.subplot(223)
self._draw_main_solution()
plt.subplot(224)
self._draw_main_error()
plt.suptitle(self.vis_title, fontsize=18)
plt.pause(0.001)
plt.draw()
plt.savefig("mg_%4.4d.png" % (self.frame))
self.frame += 1
def solve(self, rtol=1.e-11):
"""
The main driver for the multigrid solution of the Helmholtz
equation. This controls the V-cycles, smoothing at each
step of the way and uses simple smoothing at the coarsest
level to perform the bottom solve.
Parameters
----------
rtol : float
The relative tolerance (residual norm / source norm) to
solve to. Note that if the source norm is 0 (e.g. the
righthand side of our equation is 0), then we just use
the norm of the residual.
"""
# start by making sure that we've initialized the RHS
if not self.initialized_rhs:
msg.fail("ERROR: RHS not initialized")
if self.verbose:
print("source norm = ", self.source_norm)
old_phi = self.grids[self.nlevels-1].get_var("v").copy()
residual_error = 1.e33
cycle = 1
# V-cycles until we achieve the L2 norm of the residual < rtol
while residual_error > rtol and cycle <= self.max_cycles:
self.current_cycle = cycle
# zero out the solution on all but the finest grid
for level in range(self.nlevels-1):
self.grids[level].zero("v")
if self.verbose:
print("<<< beginning V-cycle (cycle {}) >>>\n".format(cycle))
# do V-cycles through the entire hierarchy
level = self.nlevels-1
self.v_cycle(level)
# compute the error with respect to the previous solution
# this is for diagnostic purposes only -- it is not used to
# determine convergence
soln = self.grids[self.nlevels-1]
diff = (soln.get_var("v") - old_phi)/(soln.get_var("v") + self.small)
relative_error = soln.grid.norm(diff)
old_phi = soln.get_var("v").copy()
# compute the residual error, relative to the source norm
self._compute_residual(self.nlevels-1)
fp = self.grids[level]
r = fp.get_var("r")
if self.source_norm != 0.0:
residual_error = r.norm()/self.source_norm
else:
residual_error = r.norm()
if self.verbose:
print("cycle {}: relative err = {}, residual err = {}\n".format(
cycle, relative_error, residual_error))
cycle += 1
self.num_cycles = cycle-1
self.relative_error = relative_error
self.residual_error = residual_error
fp.fill_BC("v")
def v_cycle(self, level):
"""
Perform a V-cycle for a single 2-level solve. This is applied
recursively do V-cycle through the entire hierarchy.
"""
if level > 0:
self.current_level = level
self.up_or_down = "down"
# pointers to the fine and coarse data
fp = self.grids[level]
cp = self.grids[level-1]
if self.verbose:
self._compute_residual(level)
self.grid_info(level, indent=2)
print(" before G-S, residual L2: {}".format(fp.get_var("r").norm()))
# smooth on the current level
self.smooth(level, self.nsmooth)
# compute the residual
self._compute_residual(level)
if self.verbose:
print(" after G-S, residual L2: {}\n".format(fp.get_var("r").norm()))
# restrict the residual down to the RHS of the coarser level
f_coarse = cp.get_var("f")
f_coarse.v()[:, :] = fp.restrict("r").v()
# solve the coarse problem
self.v_cycle(level-1)
# ascending part
self.current_level = level
self.up_or_down = "up"
fp = self.grids[level]
cp = self.grids[level-1]
# prolong the error up from the coarse grid
e = cp.prolong("v")
# correct the solution on the current grid
v = fp.get_var("v")
v.v()[:, :] += e.v()
fp.fill_BC("v")
if self.verbose:
self._compute_residual(level)
self.grid_info(level, indent=2)
print(" before G-S, residual L2: {}".format(fp.get_var("r").norm()))
# smooth
self.smooth(level, self.nsmooth)
if self.verbose:
self._compute_residual(level)
print(" after G-S, residual L2: {}\n".format(fp.get_var("r").norm()))
else:
# bottom solve: solve the discrete coarse problem. We
# could use any number of different matrix solvers here
# (like CG), but since we are 2x2 by design at this point,
# we will just smooth
if self.verbose:
print(" bottom solve:")
self.current_level = level
bp = self.grids[level]
if self.verbose:
self.grid_info(level, indent=2)
print("")
self.smooth(level, self.nsmooth_bottom)
bp.fill_BC("v")
|
py | 1a4ed9c8aabbb538b551075d31ee680d88543e08 | #!/usr/bin/env python
# encoding: utf-8
"""
Run.py
Created by Tomas Knapen on 2010-09-15.
Copyright (c) 2010 Tomas Knapen. All rights reserved.
"""
import os, sys, datetime
from subprocess import *
#from volumesAndSurfaces import *
from Tools.Sessions import *
from Operators.BehaviorOperator import *
class Run(object):
def __init__(self, **kwargs ): #ID, condition, dataType,
"""
run takes an ID, condition, dataType, rawDataFilePath
"""
# integer that will tell the run what number it is in the session
self.indexInSession = None
self.behaviorFile = None
self.eyeLinkFile = None
self.trialList = []
for k,v in kwargs.items():
setattr(self, k, v) # here the object gets all the attributes listed in the arguments
if not hasattr(self, 'condition'):
self.condition = ''
if hasattr(self, 'rawDataFilePath'): # datetime of this run is the creation time of the raw data file
if os.path.isfile(self.rawDataFilePath) :
self.dateTime = os.path.getctime(self.rawDataFilePath)
else:
print 'rawDataFilePath %s is not file.' % self.rawDataFilePath
elif hasattr(self, 'behaviorFile'):
# self.dateTime = os.path.getctime(self.behaviorFile)
self.dateTime = datetime.date.today()
elif hasattr(self, 'eyeFile'):
self.dateTime = os.path.getctime(self.eyeFile)
def addTrial(self, trial):
"""docstring for addTrial"""
trial.indexInRun = trialList.len()
self.trialList.append(trial)
|
py | 1a4edae642e64a999cf0b57bcb9d020ba55f6942 | __all__ = ['api_provider'] |
py | 1a4edb056c0dd9361008a4f56de2045dbf75b3a8 | #===============================================================================
# Copyright 2017-2019 Intel Corporation
# All Rights Reserved.
#
# If this software was obtained under the Intel Simplified Software License,
# the following terms apply:
#
# The source code, information and material ("Material") contained herein is
# owned by Intel Corporation or its suppliers or licensors, and title to such
# Material remains with Intel Corporation or its suppliers or licensors. The
# Material contains proprietary information of Intel or its suppliers and
# licensors. The Material is protected by worldwide copyright laws and treaty
# provisions. No part of the Material may be used, copied, reproduced,
# modified, published, uploaded, posted, transmitted, distributed or disclosed
# in any way without Intel's prior express written permission. No license under
# any patent, copyright or other intellectual property rights in the Material
# is granted to or conferred upon you, either expressly, by implication,
# inducement, estoppel or otherwise. Any license under such intellectual
# property rights must be express and approved by Intel in writing.
#
# Unless otherwise agreed by Intel in writing, you may not remove or alter this
# notice or any other notice embedded in Materials by Intel or Intel's
# suppliers or licensors in any way.
#
#
# If this software was obtained under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#
# Intel(R) Integrated Performance Primitives (Intel(R) IPP) Cryptography
#
import re
import sys
import os
import hashlib
Header = sys.argv[1] ## Intel(R) IPP Crypto dispatcher will be generated for fucntions in Header
OutDir = sys.argv[2] ## Output folder for generated files
cpulist = sys.argv[3] ## Actual CPU list: semicolon separated string
cpulist = cpulist.split(';')
headerID= False ## Header ID define to avoid multiple include like: #if !defined( __IPPCP_H__ )
from gen_disp_common import readNextFunction
HDR= open( Header, 'r' )
h= HDR.readlines()
HDR.close()
## keep filename only
(incdir, Header)= os.path.split(Header)
## original header name to declare external functions as internal for dispatcher
OrgH= Header
isFunctionFound = True
curLine = 0
FunName = ""
FunArg = ""
while (isFunctionFound == True):
result = readNextFunction(h, curLine, headerID)
curLine = result['curLine']
FunName = result['FunName']
FunArg = result['FunArg']
isFunctionFound = result['success']
if (isFunctionFound == True):
##################################################
## create dispatcher files: C file with inline asm
##################################################
filename = "jmp_{}_{}".format(FunName, hashlib.sha512(FunName.encode('utf-8')).hexdigest()[:8])
DISP= open( os.sep.join([OutDir, filename + ".asm"]), 'w' )
for cpu in cpulist:
DISP.write("EXTRN "+cpu+"_"+FunName+":PROC\n")
DISP.write("EXTRN ippcpJumpIndexForMergedLibs:DWORD\n")
DISP.write("EXTRN ippcpSafeInit:PROC\n\n")
DISP.write("_DATA SEGMENT\n\n")
DISP.write(" DQ in_"+FunName+"\n")
DISP.write(FunName+"_arraddr")
for cpu in cpulist:
DISP.write(" DQ "+cpu+"_"+FunName+"\n")
DISP.write("""
_DATA ENDS
_TEXT SEGMENT
in_{FunName} PROC PRIVATE
call ippcpSafeInit
ALIGN 16
{FunName} PROC PUBLIC
movsxd rax, DWORD PTR ippcpJumpIndexForMergedLibs
lea r10, {FunName}_arraddr
jmp qword ptr [r10+rax*8]
{FunName} ENDP
in_{FunName} ENDP
_TEXT ENDS
END
""".format(FunName=FunName))
DISP.close()
|
py | 1a4edb646561f386ce950c81ba27f24781e0b631 | """Tests for certbot_dns_joker.dns_joker."""
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from requests.exceptions import HTTPError
import urllib.parse
import requests
import requests_mock
from certbot.compat import os
from certbot.errors import PluginError
from certbot.plugins import dns_test_common
from certbot.plugins.dns_test_common import DOMAIN
from certbot.tests import util as test_util
FAKE_USERNAME = 'fake_username'
FAKE_PASSWORD = 'fake_password'
MOCK_ENDPOINT = 'mock://endpoint'
class AuthenticatorTest(test_util.TempDirTestCase,
dns_test_common.BaseAuthenticatorTest):
def setUp(self):
super(AuthenticatorTest, self).setUp()
from certbot_dns_joker.dns_joker import Authenticator
path = os.path.join(self.tempdir, 'file.ini')
dns_test_common.write({
# 'certbot_dns_joker:dns_joker_username': FAKE_USERNAME,
# 'certbot_dns_joker:dns_joker_password': FAKE_PASSWORD,
'joker_username': FAKE_USERNAME,
'joker_password': FAKE_PASSWORD,
}, path)
self.config = mock.MagicMock(joker_credentials=path,
joker_propagation_seconds=0) # don't wait during tests
# self.auth = Authenticator(self.config, "certbot_dns_joker:dns_joker")
self.auth = Authenticator(self.config, "joker")
self.mock_client = mock.MagicMock()
# _get_joker_client | pylint: disable=protected-access
self.auth._get_joker_client = mock.MagicMock(return_value=self.mock_client)
def test_perform(self):
self.auth.perform([self.achall])
expected = [
mock.call.add_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client.mock_calls)
def test_cleanup(self):
# _attempt_cleanup | pylint: disable=protected-access
self.auth._attempt_cleanup = True
self.auth.cleanup([self.achall])
expected = [
mock.call.del_txt_record(
DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY
)
]
self.assertEqual(expected, self.mock_client.mock_calls)
class JokerClientTest(unittest.TestCase):
record_name = "_acme-challenge." + DOMAIN
record_content = "bar"
record_ttl = 42
def setUp(self):
from certbot_dns_joker.dns_joker import _JokerClient
self.client = _JokerClient(FAKE_USERNAME, FAKE_PASSWORD, DOMAIN,
self.record_ttl, endpoint=MOCK_ENDPOINT)
self.adapter = requests_mock.Adapter()
self.client.session.mount('mock://', self.adapter)
def _register_response(self, response='good', subdomain=None, additional_matcher=None, **kwargs):
def add_matcher(request):
data = urllib.parse.parse_qs(request.text)
add_result = True
if additional_matcher is not None:
add_result = additional_matcher(request)
def submatch(label):
if subdomain:
print(f'checking label:{label} subdomain:{subdomain}')
return len(label) > len(subdomain) and label[-len(subdomain)-1:] == '.' + subdomain
else:
return True
# The error message is unhelpful (NoMockAddress) if this fails.
return (
("username" in data and data["username"] == [FAKE_USERNAME]) and
("password" in data and data["password"] == [FAKE_PASSWORD]) and
("zone" in data and data["zone"] == [DOMAIN]) and
("label" in data and submatch(data["label"][0])) and
add_result
)
self.adapter.register_uri(
requests_mock.ANY,
MOCK_ENDPOINT,
text=response,
status_code=200 if response == 'good' else 400,
additional_matcher=add_matcher,
**kwargs
)
def test_add_txt_record(self):
self._register_response()
self.client.add_txt_record(
DOMAIN, self.record_name, self.record_content
)
def test_add_txt_record_fail_to_authenticate(self):
self._register_response(response='badauth')
with self.assertRaises(PluginError) as context:
self.client.add_txt_record(
DOMAIN, self.record_name, self.record_content
)
def test_add_txt_record_fail_to_find_domain(self):
self._register_response(response='nohost')
with self.assertRaises(PluginError) as context:
self.client.add_txt_record(
DOMAIN, self.record_name, self.record_content
)
def test_add_txt_record_subdomain(self):
self._register_response(subdomain='sub')
self.client.add_txt_record(
'sub.' + DOMAIN, 'challenge.sub.' + DOMAIN, self.record_content
)
def test_del_txt_record(self):
self._register_response()
self.client.del_txt_record(
DOMAIN, self.record_name, self.record_content
)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
py | 1a4ede71baf968133b666ccc673058d011b2d2ff | # Ex046.2
"""Make a program that shows on the screen a countdown to the fireworks burst,
Going from 10 to 0 with a pause of 1 second between them"""
from time import sleep
import emoji
for counting in range(10, 0 - 1, - 1):
print(counting)
sleep(1)
print(emoji.emojize(":boom::boom::boom::boom::boom::boom:", use_aliases=True))
print('FIREWORKS!!!')
print(emoji.emojize(":boom::boom::boom::boom::boom::boom:", use_aliases=True))
|
py | 1a4edef99c70471e6812498ce687462a8bfbe0d9 | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_mo_ref import MoMoRef
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.organization_organization_relationship import OrganizationOrganizationRelationship
from intersight.model.policy_abstract_config_profile_relationship import PolicyAbstractConfigProfileRelationship
from intersight.model.storage_drive_group_relationship import StorageDriveGroupRelationship
from intersight.model.storage_m2_virtual_drive_config import StorageM2VirtualDriveConfig
from intersight.model.storage_r0_drive import StorageR0Drive
from intersight.model.storage_storage_policy import StorageStoragePolicy
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoMoRef'] = MoMoRef
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['OrganizationOrganizationRelationship'] = OrganizationOrganizationRelationship
globals()['PolicyAbstractConfigProfileRelationship'] = PolicyAbstractConfigProfileRelationship
globals()['StorageDriveGroupRelationship'] = StorageDriveGroupRelationship
globals()['StorageM2VirtualDriveConfig'] = StorageM2VirtualDriveConfig
globals()['StorageR0Drive'] = StorageR0Drive
globals()['StorageStoragePolicy'] = StorageStoragePolicy
class StorageStoragePolicyRelationship(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'MO.MOREF': "mo.MoRef",
},
('unused_disks_state',): {
'NOCHANGE': "NoChange",
'UNCONFIGUREDGOOD': "UnconfiguredGood",
'JBOD': "Jbod",
},
('object_type',): {
'AAA.AUDITRECORD': "aaa.AuditRecord",
'AAA.RETENTIONCONFIG': "aaa.RetentionConfig",
'AAA.RETENTIONPOLICY': "aaa.RetentionPolicy",
'ACCESS.POLICY': "access.Policy",
'ADAPTER.CONFIGPOLICY': "adapter.ConfigPolicy",
'ADAPTER.EXTETHINTERFACE': "adapter.ExtEthInterface",
'ADAPTER.HOSTETHINTERFACE': "adapter.HostEthInterface",
'ADAPTER.HOSTFCINTERFACE': "adapter.HostFcInterface",
'ADAPTER.HOSTISCSIINTERFACE': "adapter.HostIscsiInterface",
'ADAPTER.UNIT': "adapter.Unit",
'ADAPTER.UNITEXPANDER': "adapter.UnitExpander",
'APPLIANCE.APPSTATUS': "appliance.AppStatus",
'APPLIANCE.AUTORMAPOLICY': "appliance.AutoRmaPolicy",
'APPLIANCE.BACKUP': "appliance.Backup",
'APPLIANCE.BACKUPPOLICY': "appliance.BackupPolicy",
'APPLIANCE.CERTIFICATESETTING': "appliance.CertificateSetting",
'APPLIANCE.DATAEXPORTPOLICY': "appliance.DataExportPolicy",
'APPLIANCE.DEVICECERTIFICATE': "appliance.DeviceCertificate",
'APPLIANCE.DEVICECLAIM': "appliance.DeviceClaim",
'APPLIANCE.DEVICEUPGRADEPOLICY': "appliance.DeviceUpgradePolicy",
'APPLIANCE.DIAGSETTING': "appliance.DiagSetting",
'APPLIANCE.EXTERNALSYSLOGSETTING': "appliance.ExternalSyslogSetting",
'APPLIANCE.FILEGATEWAY': "appliance.FileGateway",
'APPLIANCE.FILESYSTEMSTATUS': "appliance.FileSystemStatus",
'APPLIANCE.GROUPSTATUS': "appliance.GroupStatus",
'APPLIANCE.IMAGEBUNDLE': "appliance.ImageBundle",
'APPLIANCE.NODEINFO': "appliance.NodeInfo",
'APPLIANCE.NODESTATUS': "appliance.NodeStatus",
'APPLIANCE.RELEASENOTE': "appliance.ReleaseNote",
'APPLIANCE.REMOTEFILEIMPORT': "appliance.RemoteFileImport",
'APPLIANCE.RESTORE': "appliance.Restore",
'APPLIANCE.SETUPINFO': "appliance.SetupInfo",
'APPLIANCE.SYSTEMINFO': "appliance.SystemInfo",
'APPLIANCE.SYSTEMSTATUS': "appliance.SystemStatus",
'APPLIANCE.UPGRADE': "appliance.Upgrade",
'APPLIANCE.UPGRADEPOLICY': "appliance.UpgradePolicy",
'ASSET.CLUSTERMEMBER': "asset.ClusterMember",
'ASSET.DEPLOYMENT': "asset.Deployment",
'ASSET.DEPLOYMENTDEVICE': "asset.DeploymentDevice",
'ASSET.DEVICECLAIM': "asset.DeviceClaim",
'ASSET.DEVICECONFIGURATION': "asset.DeviceConfiguration",
'ASSET.DEVICECONNECTORMANAGER': "asset.DeviceConnectorManager",
'ASSET.DEVICECONTRACTINFORMATION': "asset.DeviceContractInformation",
'ASSET.DEVICECONTRACTNOTIFICATION': "asset.DeviceContractNotification",
'ASSET.DEVICEREGISTRATION': "asset.DeviceRegistration",
'ASSET.SUBSCRIPTION': "asset.Subscription",
'ASSET.SUBSCRIPTIONACCOUNT': "asset.SubscriptionAccount",
'ASSET.SUBSCRIPTIONDEVICECONTRACTINFORMATION': "asset.SubscriptionDeviceContractInformation",
'ASSET.TARGET': "asset.Target",
'BIOS.BOOTDEVICE': "bios.BootDevice",
'BIOS.BOOTMODE': "bios.BootMode",
'BIOS.POLICY': "bios.Policy",
'BIOS.SYSTEMBOOTORDER': "bios.SystemBootOrder",
'BIOS.TOKENSETTINGS': "bios.TokenSettings",
'BIOS.UNIT': "bios.Unit",
'BIOS.VFSELECTMEMORYRASCONFIGURATION': "bios.VfSelectMemoryRasConfiguration",
'BOOT.CDDDEVICE': "boot.CddDevice",
'BOOT.DEVICEBOOTMODE': "boot.DeviceBootMode",
'BOOT.DEVICEBOOTSECURITY': "boot.DeviceBootSecurity",
'BOOT.HDDDEVICE': "boot.HddDevice",
'BOOT.ISCSIDEVICE': "boot.IscsiDevice",
'BOOT.NVMEDEVICE': "boot.NvmeDevice",
'BOOT.PCHSTORAGEDEVICE': "boot.PchStorageDevice",
'BOOT.PRECISIONPOLICY': "boot.PrecisionPolicy",
'BOOT.PXEDEVICE': "boot.PxeDevice",
'BOOT.SANDEVICE': "boot.SanDevice",
'BOOT.SDDEVICE': "boot.SdDevice",
'BOOT.UEFISHELLDEVICE': "boot.UefiShellDevice",
'BOOT.USBDEVICE': "boot.UsbDevice",
'BOOT.VMEDIADEVICE': "boot.VmediaDevice",
'BULK.EXPORT': "bulk.Export",
'BULK.EXPORTEDITEM': "bulk.ExportedItem",
'BULK.MOCLONER': "bulk.MoCloner",
'BULK.MOMERGER': "bulk.MoMerger",
'BULK.REQUEST': "bulk.Request",
'BULK.SUBREQUESTOBJ': "bulk.SubRequestObj",
'CAPABILITY.ADAPTERUNITDESCRIPTOR': "capability.AdapterUnitDescriptor",
'CAPABILITY.CATALOG': "capability.Catalog",
'CAPABILITY.CHASSISDESCRIPTOR': "capability.ChassisDescriptor",
'CAPABILITY.CHASSISMANUFACTURINGDEF': "capability.ChassisManufacturingDef",
'CAPABILITY.CIMCFIRMWAREDESCRIPTOR': "capability.CimcFirmwareDescriptor",
'CAPABILITY.EQUIPMENTPHYSICALDEF': "capability.EquipmentPhysicalDef",
'CAPABILITY.EQUIPMENTSLOTARRAY': "capability.EquipmentSlotArray",
'CAPABILITY.FANMODULEDESCRIPTOR': "capability.FanModuleDescriptor",
'CAPABILITY.FANMODULEMANUFACTURINGDEF': "capability.FanModuleManufacturingDef",
'CAPABILITY.IOCARDCAPABILITYDEF': "capability.IoCardCapabilityDef",
'CAPABILITY.IOCARDDESCRIPTOR': "capability.IoCardDescriptor",
'CAPABILITY.IOCARDMANUFACTURINGDEF': "capability.IoCardManufacturingDef",
'CAPABILITY.PORTGROUPAGGREGATIONDEF': "capability.PortGroupAggregationDef",
'CAPABILITY.PSUDESCRIPTOR': "capability.PsuDescriptor",
'CAPABILITY.PSUMANUFACTURINGDEF': "capability.PsuManufacturingDef",
'CAPABILITY.SERVERMODELSCAPABILITYDEF': "capability.ServerModelsCapabilityDef",
'CAPABILITY.SERVERSCHEMADESCRIPTOR': "capability.ServerSchemaDescriptor",
'CAPABILITY.SIOCMODULECAPABILITYDEF': "capability.SiocModuleCapabilityDef",
'CAPABILITY.SIOCMODULEDESCRIPTOR': "capability.SiocModuleDescriptor",
'CAPABILITY.SIOCMODULEMANUFACTURINGDEF': "capability.SiocModuleManufacturingDef",
'CAPABILITY.SWITCHCAPABILITY': "capability.SwitchCapability",
'CAPABILITY.SWITCHDESCRIPTOR': "capability.SwitchDescriptor",
'CAPABILITY.SWITCHMANUFACTURINGDEF': "capability.SwitchManufacturingDef",
'CERTIFICATEMANAGEMENT.POLICY': "certificatemanagement.Policy",
'CHASSIS.CONFIGCHANGEDETAIL': "chassis.ConfigChangeDetail",
'CHASSIS.CONFIGIMPORT': "chassis.ConfigImport",
'CHASSIS.CONFIGRESULT': "chassis.ConfigResult",
'CHASSIS.CONFIGRESULTENTRY': "chassis.ConfigResultEntry",
'CHASSIS.IOMPROFILE': "chassis.IomProfile",
'CHASSIS.PROFILE': "chassis.Profile",
'CLOUD.AWSBILLINGUNIT': "cloud.AwsBillingUnit",
'CLOUD.AWSKEYPAIR': "cloud.AwsKeyPair",
'CLOUD.AWSNETWORKINTERFACE': "cloud.AwsNetworkInterface",
'CLOUD.AWSORGANIZATIONALUNIT': "cloud.AwsOrganizationalUnit",
'CLOUD.AWSSECURITYGROUP': "cloud.AwsSecurityGroup",
'CLOUD.AWSSUBNET': "cloud.AwsSubnet",
'CLOUD.AWSVIRTUALMACHINE': "cloud.AwsVirtualMachine",
'CLOUD.AWSVOLUME': "cloud.AwsVolume",
'CLOUD.AWSVPC': "cloud.AwsVpc",
'CLOUD.COLLECTINVENTORY': "cloud.CollectInventory",
'CLOUD.REGIONS': "cloud.Regions",
'CLOUD.SKUCONTAINERTYPE': "cloud.SkuContainerType",
'CLOUD.SKUDATABASETYPE': "cloud.SkuDatabaseType",
'CLOUD.SKUINSTANCETYPE': "cloud.SkuInstanceType",
'CLOUD.SKUNETWORKTYPE': "cloud.SkuNetworkType",
'CLOUD.SKUREGIONRATECARDS': "cloud.SkuRegionRateCards",
'CLOUD.SKUVOLUMETYPE': "cloud.SkuVolumeType",
'CLOUD.TFCAGENTPOOL': "cloud.TfcAgentpool",
'CLOUD.TFCORGANIZATION': "cloud.TfcOrganization",
'CLOUD.TFCWORKSPACE': "cloud.TfcWorkspace",
'COMM.HTTPPROXYPOLICY': "comm.HttpProxyPolicy",
'COMPUTE.BIOSPOSTPOLICY': "compute.BiosPostPolicy",
'COMPUTE.BLADE': "compute.Blade",
'COMPUTE.BLADEIDENTITY': "compute.BladeIdentity",
'COMPUTE.BOARD': "compute.Board",
'COMPUTE.MAPPING': "compute.Mapping",
'COMPUTE.PHYSICALSUMMARY': "compute.PhysicalSummary",
'COMPUTE.RACKUNIT': "compute.RackUnit",
'COMPUTE.RACKUNITIDENTITY': "compute.RackUnitIdentity",
'COMPUTE.SERVERPOWERPOLICY': "compute.ServerPowerPolicy",
'COMPUTE.SERVERSETTING': "compute.ServerSetting",
'COMPUTE.VMEDIA': "compute.Vmedia",
'COND.ALARM': "cond.Alarm",
'COND.ALARMAGGREGATION': "cond.AlarmAggregation",
'COND.HCLSTATUS': "cond.HclStatus",
'COND.HCLSTATUSDETAIL': "cond.HclStatusDetail",
'COND.HCLSTATUSJOB': "cond.HclStatusJob",
'CONNECTORPACK.CONNECTORPACKUPGRADE': "connectorpack.ConnectorPackUpgrade",
'CONNECTORPACK.UPGRADEIMPACT': "connectorpack.UpgradeImpact",
'CONVERGEDINFRA.HEALTHCHECKDEFINITION': "convergedinfra.HealthCheckDefinition",
'CONVERGEDINFRA.HEALTHCHECKEXECUTION': "convergedinfra.HealthCheckExecution",
'CONVERGEDINFRA.POD': "convergedinfra.Pod",
'CRD.CUSTOMRESOURCE': "crd.CustomResource",
'DEVICECONNECTOR.POLICY': "deviceconnector.Policy",
'EQUIPMENT.CHASSIS': "equipment.Chassis",
'EQUIPMENT.CHASSISIDENTITY': "equipment.ChassisIdentity",
'EQUIPMENT.CHASSISOPERATION': "equipment.ChassisOperation",
'EQUIPMENT.DEVICESUMMARY': "equipment.DeviceSummary",
'EQUIPMENT.EXPANDERMODULE': "equipment.ExpanderModule",
'EQUIPMENT.FAN': "equipment.Fan",
'EQUIPMENT.FANCONTROL': "equipment.FanControl",
'EQUIPMENT.FANMODULE': "equipment.FanModule",
'EQUIPMENT.FEX': "equipment.Fex",
'EQUIPMENT.FEXIDENTITY': "equipment.FexIdentity",
'EQUIPMENT.FEXOPERATION': "equipment.FexOperation",
'EQUIPMENT.FRU': "equipment.Fru",
'EQUIPMENT.IDENTITYSUMMARY': "equipment.IdentitySummary",
'EQUIPMENT.IOCARD': "equipment.IoCard",
'EQUIPMENT.IOCARDOPERATION': "equipment.IoCardOperation",
'EQUIPMENT.IOEXPANDER': "equipment.IoExpander",
'EQUIPMENT.LOCATORLED': "equipment.LocatorLed",
'EQUIPMENT.PSU': "equipment.Psu",
'EQUIPMENT.PSUCONTROL': "equipment.PsuControl",
'EQUIPMENT.RACKENCLOSURE': "equipment.RackEnclosure",
'EQUIPMENT.RACKENCLOSURESLOT': "equipment.RackEnclosureSlot",
'EQUIPMENT.SHAREDIOMODULE': "equipment.SharedIoModule",
'EQUIPMENT.SWITCHCARD': "equipment.SwitchCard",
'EQUIPMENT.SYSTEMIOCONTROLLER': "equipment.SystemIoController",
'EQUIPMENT.TPM': "equipment.Tpm",
'EQUIPMENT.TRANSCEIVER': "equipment.Transceiver",
'ETHER.HOSTPORT': "ether.HostPort",
'ETHER.NETWORKPORT': "ether.NetworkPort",
'ETHER.PHYSICALPORT': "ether.PhysicalPort",
'ETHER.PORTCHANNEL': "ether.PortChannel",
'EXTERNALSITE.AUTHORIZATION': "externalsite.Authorization",
'FABRIC.APPLIANCEPCROLE': "fabric.AppliancePcRole",
'FABRIC.APPLIANCEROLE': "fabric.ApplianceRole",
'FABRIC.CONFIGCHANGEDETAIL': "fabric.ConfigChangeDetail",
'FABRIC.CONFIGRESULT': "fabric.ConfigResult",
'FABRIC.CONFIGRESULTENTRY': "fabric.ConfigResultEntry",
'FABRIC.ELEMENTIDENTITY': "fabric.ElementIdentity",
'FABRIC.ESTIMATEIMPACT': "fabric.EstimateImpact",
'FABRIC.ETHNETWORKCONTROLPOLICY': "fabric.EthNetworkControlPolicy",
'FABRIC.ETHNETWORKGROUPPOLICY': "fabric.EthNetworkGroupPolicy",
'FABRIC.ETHNETWORKPOLICY': "fabric.EthNetworkPolicy",
'FABRIC.FCNETWORKPOLICY': "fabric.FcNetworkPolicy",
'FABRIC.FCSTORAGEROLE': "fabric.FcStorageRole",
'FABRIC.FCUPLINKPCROLE': "fabric.FcUplinkPcRole",
'FABRIC.FCUPLINKROLE': "fabric.FcUplinkRole",
'FABRIC.FCOEUPLINKPCROLE': "fabric.FcoeUplinkPcRole",
'FABRIC.FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'FABRIC.FLOWCONTROLPOLICY': "fabric.FlowControlPolicy",
'FABRIC.LINKAGGREGATIONPOLICY': "fabric.LinkAggregationPolicy",
'FABRIC.LINKCONTROLPOLICY': "fabric.LinkControlPolicy",
'FABRIC.MULTICASTPOLICY': "fabric.MulticastPolicy",
'FABRIC.PCMEMBER': "fabric.PcMember",
'FABRIC.PCOPERATION': "fabric.PcOperation",
'FABRIC.PORTMODE': "fabric.PortMode",
'FABRIC.PORTOPERATION': "fabric.PortOperation",
'FABRIC.PORTPOLICY': "fabric.PortPolicy",
'FABRIC.SERVERROLE': "fabric.ServerRole",
'FABRIC.SWITCHCLUSTERPROFILE': "fabric.SwitchClusterProfile",
'FABRIC.SWITCHCONTROLPOLICY': "fabric.SwitchControlPolicy",
'FABRIC.SWITCHPROFILE': "fabric.SwitchProfile",
'FABRIC.SYSTEMQOSPOLICY': "fabric.SystemQosPolicy",
'FABRIC.UPLINKPCROLE': "fabric.UplinkPcRole",
'FABRIC.UPLINKROLE': "fabric.UplinkRole",
'FABRIC.VLAN': "fabric.Vlan",
'FABRIC.VSAN': "fabric.Vsan",
'FAULT.INSTANCE': "fault.Instance",
'FC.PHYSICALPORT': "fc.PhysicalPort",
'FC.PORTCHANNEL': "fc.PortChannel",
'FCPOOL.FCBLOCK': "fcpool.FcBlock",
'FCPOOL.LEASE': "fcpool.Lease",
'FCPOOL.POOL': "fcpool.Pool",
'FCPOOL.POOLMEMBER': "fcpool.PoolMember",
'FCPOOL.UNIVERSE': "fcpool.Universe",
'FEEDBACK.FEEDBACKPOST': "feedback.FeedbackPost",
'FIRMWARE.BIOSDESCRIPTOR': "firmware.BiosDescriptor",
'FIRMWARE.BOARDCONTROLLERDESCRIPTOR': "firmware.BoardControllerDescriptor",
'FIRMWARE.CHASSISUPGRADE': "firmware.ChassisUpgrade",
'FIRMWARE.CIMCDESCRIPTOR': "firmware.CimcDescriptor",
'FIRMWARE.DIMMDESCRIPTOR': "firmware.DimmDescriptor",
'FIRMWARE.DISTRIBUTABLE': "firmware.Distributable",
'FIRMWARE.DISTRIBUTABLEMETA': "firmware.DistributableMeta",
'FIRMWARE.DRIVEDESCRIPTOR': "firmware.DriveDescriptor",
'FIRMWARE.DRIVERDISTRIBUTABLE': "firmware.DriverDistributable",
'FIRMWARE.EULA': "firmware.Eula",
'FIRMWARE.FIRMWARESUMMARY': "firmware.FirmwareSummary",
'FIRMWARE.GPUDESCRIPTOR': "firmware.GpuDescriptor",
'FIRMWARE.HBADESCRIPTOR': "firmware.HbaDescriptor",
'FIRMWARE.IOMDESCRIPTOR': "firmware.IomDescriptor",
'FIRMWARE.MSWITCHDESCRIPTOR': "firmware.MswitchDescriptor",
'FIRMWARE.NXOSDESCRIPTOR': "firmware.NxosDescriptor",
'FIRMWARE.PCIEDESCRIPTOR': "firmware.PcieDescriptor",
'FIRMWARE.PSUDESCRIPTOR': "firmware.PsuDescriptor",
'FIRMWARE.RUNNINGFIRMWARE': "firmware.RunningFirmware",
'FIRMWARE.SASEXPANDERDESCRIPTOR': "firmware.SasExpanderDescriptor",
'FIRMWARE.SERVERCONFIGURATIONUTILITYDISTRIBUTABLE': "firmware.ServerConfigurationUtilityDistributable",
'FIRMWARE.STORAGECONTROLLERDESCRIPTOR': "firmware.StorageControllerDescriptor",
'FIRMWARE.SWITCHUPGRADE': "firmware.SwitchUpgrade",
'FIRMWARE.UNSUPPORTEDVERSIONUPGRADE': "firmware.UnsupportedVersionUpgrade",
'FIRMWARE.UPGRADE': "firmware.Upgrade",
'FIRMWARE.UPGRADEIMPACT': "firmware.UpgradeImpact",
'FIRMWARE.UPGRADEIMPACTSTATUS': "firmware.UpgradeImpactStatus",
'FIRMWARE.UPGRADESTATUS': "firmware.UpgradeStatus",
'FORECAST.CATALOG': "forecast.Catalog",
'FORECAST.DEFINITION': "forecast.Definition",
'FORECAST.INSTANCE': "forecast.Instance",
'GRAPHICS.CARD': "graphics.Card",
'GRAPHICS.CONTROLLER': "graphics.Controller",
'HCL.COMPATIBILITYSTATUS': "hcl.CompatibilityStatus",
'HCL.DRIVERIMAGE': "hcl.DriverImage",
'HCL.EXEMPTEDCATALOG': "hcl.ExemptedCatalog",
'HCL.HYPERFLEXSOFTWARECOMPATIBILITYINFO': "hcl.HyperflexSoftwareCompatibilityInfo",
'HCL.OPERATINGSYSTEM': "hcl.OperatingSystem",
'HCL.OPERATINGSYSTEMVENDOR': "hcl.OperatingSystemVendor",
'HCL.SUPPORTEDDRIVERNAME': "hcl.SupportedDriverName",
'HYPERFLEX.ALARM': "hyperflex.Alarm",
'HYPERFLEX.APPCATALOG': "hyperflex.AppCatalog",
'HYPERFLEX.AUTOSUPPORTPOLICY': "hyperflex.AutoSupportPolicy",
'HYPERFLEX.BACKUPCLUSTER': "hyperflex.BackupCluster",
'HYPERFLEX.CAPABILITYINFO': "hyperflex.CapabilityInfo",
'HYPERFLEX.CLUSTER': "hyperflex.Cluster",
'HYPERFLEX.CLUSTERBACKUPPOLICY': "hyperflex.ClusterBackupPolicy",
'HYPERFLEX.CLUSTERBACKUPPOLICYDEPLOYMENT': "hyperflex.ClusterBackupPolicyDeployment",
'HYPERFLEX.CLUSTERBACKUPPOLICYINVENTORY': "hyperflex.ClusterBackupPolicyInventory",
'HYPERFLEX.CLUSTERHEALTHCHECKEXECUTIONSNAPSHOT': "hyperflex.ClusterHealthCheckExecutionSnapshot",
'HYPERFLEX.CLUSTERNETWORKPOLICY': "hyperflex.ClusterNetworkPolicy",
'HYPERFLEX.CLUSTERPROFILE': "hyperflex.ClusterProfile",
'HYPERFLEX.CLUSTERREPLICATIONNETWORKPOLICY': "hyperflex.ClusterReplicationNetworkPolicy",
'HYPERFLEX.CLUSTERREPLICATIONNETWORKPOLICYDEPLOYMENT': "hyperflex.ClusterReplicationNetworkPolicyDeployment",
'HYPERFLEX.CLUSTERSTORAGEPOLICY': "hyperflex.ClusterStoragePolicy",
'HYPERFLEX.CONFIGRESULT': "hyperflex.ConfigResult",
'HYPERFLEX.CONFIGRESULTENTRY': "hyperflex.ConfigResultEntry",
'HYPERFLEX.DATAPROTECTIONPEER': "hyperflex.DataProtectionPeer",
'HYPERFLEX.DATASTORESTATISTIC': "hyperflex.DatastoreStatistic",
'HYPERFLEX.DEVICEPACKAGEDOWNLOADSTATE': "hyperflex.DevicePackageDownloadState",
'HYPERFLEX.DRIVE': "hyperflex.Drive",
'HYPERFLEX.EXTFCSTORAGEPOLICY': "hyperflex.ExtFcStoragePolicy",
'HYPERFLEX.EXTISCSISTORAGEPOLICY': "hyperflex.ExtIscsiStoragePolicy",
'HYPERFLEX.FEATURELIMITEXTERNAL': "hyperflex.FeatureLimitExternal",
'HYPERFLEX.FEATURELIMITINTERNAL': "hyperflex.FeatureLimitInternal",
'HYPERFLEX.HEALTH': "hyperflex.Health",
'HYPERFLEX.HEALTHCHECKDEFINITION': "hyperflex.HealthCheckDefinition",
'HYPERFLEX.HEALTHCHECKEXECUTION': "hyperflex.HealthCheckExecution",
'HYPERFLEX.HEALTHCHECKEXECUTIONSNAPSHOT': "hyperflex.HealthCheckExecutionSnapshot",
'HYPERFLEX.HEALTHCHECKPACKAGECHECKSUM': "hyperflex.HealthCheckPackageChecksum",
'HYPERFLEX.HXDPVERSION': "hyperflex.HxdpVersion",
'HYPERFLEX.LICENSE': "hyperflex.License",
'HYPERFLEX.LOCALCREDENTIALPOLICY': "hyperflex.LocalCredentialPolicy",
'HYPERFLEX.NODE': "hyperflex.Node",
'HYPERFLEX.NODECONFIGPOLICY': "hyperflex.NodeConfigPolicy",
'HYPERFLEX.NODEPROFILE': "hyperflex.NodeProfile",
'HYPERFLEX.PROTECTEDCLUSTER': "hyperflex.ProtectedCluster",
'HYPERFLEX.PROXYSETTINGPOLICY': "hyperflex.ProxySettingPolicy",
'HYPERFLEX.SERVERFIRMWAREVERSION': "hyperflex.ServerFirmwareVersion",
'HYPERFLEX.SERVERFIRMWAREVERSIONENTRY': "hyperflex.ServerFirmwareVersionEntry",
'HYPERFLEX.SERVERMODEL': "hyperflex.ServerModel",
'HYPERFLEX.SERVICEAUTHTOKEN': "hyperflex.ServiceAuthToken",
'HYPERFLEX.SOFTWAREDISTRIBUTIONCOMPONENT': "hyperflex.SoftwareDistributionComponent",
'HYPERFLEX.SOFTWAREDISTRIBUTIONENTRY': "hyperflex.SoftwareDistributionEntry",
'HYPERFLEX.SOFTWAREDISTRIBUTIONVERSION': "hyperflex.SoftwareDistributionVersion",
'HYPERFLEX.SOFTWAREVERSIONPOLICY': "hyperflex.SoftwareVersionPolicy",
'HYPERFLEX.STORAGECONTAINER': "hyperflex.StorageContainer",
'HYPERFLEX.SYSCONFIGPOLICY': "hyperflex.SysConfigPolicy",
'HYPERFLEX.UCSMCONFIGPOLICY': "hyperflex.UcsmConfigPolicy",
'HYPERFLEX.VCENTERCONFIGPOLICY': "hyperflex.VcenterConfigPolicy",
'HYPERFLEX.VMBACKUPINFO': "hyperflex.VmBackupInfo",
'HYPERFLEX.VMIMPORTOPERATION': "hyperflex.VmImportOperation",
'HYPERFLEX.VMRESTOREOPERATION': "hyperflex.VmRestoreOperation",
'HYPERFLEX.VMSNAPSHOTINFO': "hyperflex.VmSnapshotInfo",
'HYPERFLEX.VOLUME': "hyperflex.Volume",
'HYPERFLEX.WITNESSCONFIGURATION': "hyperflex.WitnessConfiguration",
'IAAS.CONNECTORPACK': "iaas.ConnectorPack",
'IAAS.DEVICESTATUS': "iaas.DeviceStatus",
'IAAS.DIAGNOSTICMESSAGES': "iaas.DiagnosticMessages",
'IAAS.LICENSEINFO': "iaas.LicenseInfo",
'IAAS.MOSTRUNTASKS': "iaas.MostRunTasks",
'IAAS.SERVICEREQUEST': "iaas.ServiceRequest",
'IAAS.UCSDINFO': "iaas.UcsdInfo",
'IAAS.UCSDMANAGEDINFRA': "iaas.UcsdManagedInfra",
'IAAS.UCSDMESSAGES': "iaas.UcsdMessages",
'IAM.ACCOUNT': "iam.Account",
'IAM.ACCOUNTEXPERIENCE': "iam.AccountExperience",
'IAM.APIKEY': "iam.ApiKey",
'IAM.APPREGISTRATION': "iam.AppRegistration",
'IAM.BANNERMESSAGE': "iam.BannerMessage",
'IAM.CERTIFICATE': "iam.Certificate",
'IAM.CERTIFICATEREQUEST': "iam.CertificateRequest",
'IAM.DOMAINGROUP': "iam.DomainGroup",
'IAM.ENDPOINTPRIVILEGE': "iam.EndPointPrivilege",
'IAM.ENDPOINTROLE': "iam.EndPointRole",
'IAM.ENDPOINTUSER': "iam.EndPointUser",
'IAM.ENDPOINTUSERPOLICY': "iam.EndPointUserPolicy",
'IAM.ENDPOINTUSERROLE': "iam.EndPointUserRole",
'IAM.IDP': "iam.Idp",
'IAM.IDPREFERENCE': "iam.IdpReference",
'IAM.IPACCESSMANAGEMENT': "iam.IpAccessManagement",
'IAM.IPADDRESS': "iam.IpAddress",
'IAM.LDAPGROUP': "iam.LdapGroup",
'IAM.LDAPPOLICY': "iam.LdapPolicy",
'IAM.LDAPPROVIDER': "iam.LdapProvider",
'IAM.LOCALUSERPASSWORD': "iam.LocalUserPassword",
'IAM.LOCALUSERPASSWORDPOLICY': "iam.LocalUserPasswordPolicy",
'IAM.OAUTHTOKEN': "iam.OAuthToken",
'IAM.PERMISSION': "iam.Permission",
'IAM.PRIVATEKEYSPEC': "iam.PrivateKeySpec",
'IAM.PRIVILEGE': "iam.Privilege",
'IAM.PRIVILEGESET': "iam.PrivilegeSet",
'IAM.QUALIFIER': "iam.Qualifier",
'IAM.RESOURCELIMITS': "iam.ResourceLimits",
'IAM.RESOURCEPERMISSION': "iam.ResourcePermission",
'IAM.RESOURCEROLES': "iam.ResourceRoles",
'IAM.ROLE': "iam.Role",
'IAM.SECURITYHOLDER': "iam.SecurityHolder",
'IAM.SERVICEPROVIDER': "iam.ServiceProvider",
'IAM.SESSION': "iam.Session",
'IAM.SESSIONLIMITS': "iam.SessionLimits",
'IAM.SYSTEM': "iam.System",
'IAM.TRUSTPOINT': "iam.TrustPoint",
'IAM.USER': "iam.User",
'IAM.USERGROUP': "iam.UserGroup",
'IAM.USERPREFERENCE': "iam.UserPreference",
'INVENTORY.DEVICEINFO': "inventory.DeviceInfo",
'INVENTORY.DNMOBINDING': "inventory.DnMoBinding",
'INVENTORY.GENERICINVENTORY': "inventory.GenericInventory",
'INVENTORY.GENERICINVENTORYHOLDER': "inventory.GenericInventoryHolder",
'INVENTORY.REQUEST': "inventory.Request",
'IPMIOVERLAN.POLICY': "ipmioverlan.Policy",
'IPPOOL.BLOCKLEASE': "ippool.BlockLease",
'IPPOOL.IPLEASE': "ippool.IpLease",
'IPPOOL.POOL': "ippool.Pool",
'IPPOOL.POOLMEMBER': "ippool.PoolMember",
'IPPOOL.SHADOWBLOCK': "ippool.ShadowBlock",
'IPPOOL.SHADOWPOOL': "ippool.ShadowPool",
'IPPOOL.UNIVERSE': "ippool.Universe",
'IQNPOOL.BLOCK': "iqnpool.Block",
'IQNPOOL.LEASE': "iqnpool.Lease",
'IQNPOOL.POOL': "iqnpool.Pool",
'IQNPOOL.POOLMEMBER': "iqnpool.PoolMember",
'IQNPOOL.UNIVERSE': "iqnpool.Universe",
'IWOTENANT.TENANTSTATUS': "iwotenant.TenantStatus",
'KUBERNETES.ACICNIAPIC': "kubernetes.AciCniApic",
'KUBERNETES.ACICNIPROFILE': "kubernetes.AciCniProfile",
'KUBERNETES.ACICNITENANTCLUSTERALLOCATION': "kubernetes.AciCniTenantClusterAllocation",
'KUBERNETES.ADDONDEFINITION': "kubernetes.AddonDefinition",
'KUBERNETES.ADDONPOLICY': "kubernetes.AddonPolicy",
'KUBERNETES.ADDONREPOSITORY': "kubernetes.AddonRepository",
'KUBERNETES.BAREMETALNODEPROFILE': "kubernetes.BaremetalNodeProfile",
'KUBERNETES.CATALOG': "kubernetes.Catalog",
'KUBERNETES.CLUSTER': "kubernetes.Cluster",
'KUBERNETES.CLUSTERADDONPROFILE': "kubernetes.ClusterAddonProfile",
'KUBERNETES.CLUSTERPROFILE': "kubernetes.ClusterProfile",
'KUBERNETES.CONFIGRESULT': "kubernetes.ConfigResult",
'KUBERNETES.CONFIGRESULTENTRY': "kubernetes.ConfigResultEntry",
'KUBERNETES.CONTAINERRUNTIMEPOLICY': "kubernetes.ContainerRuntimePolicy",
'KUBERNETES.DAEMONSET': "kubernetes.DaemonSet",
'KUBERNETES.DEPLOYMENT': "kubernetes.Deployment",
'KUBERNETES.INGRESS': "kubernetes.Ingress",
'KUBERNETES.NETWORKPOLICY': "kubernetes.NetworkPolicy",
'KUBERNETES.NODE': "kubernetes.Node",
'KUBERNETES.NODEGROUPPROFILE': "kubernetes.NodeGroupProfile",
'KUBERNETES.POD': "kubernetes.Pod",
'KUBERNETES.SERVICE': "kubernetes.Service",
'KUBERNETES.STATEFULSET': "kubernetes.StatefulSet",
'KUBERNETES.SYSCONFIGPOLICY': "kubernetes.SysConfigPolicy",
'KUBERNETES.TRUSTEDREGISTRIESPOLICY': "kubernetes.TrustedRegistriesPolicy",
'KUBERNETES.VERSION': "kubernetes.Version",
'KUBERNETES.VERSIONPOLICY': "kubernetes.VersionPolicy",
'KUBERNETES.VIRTUALMACHINEINFRACONFIGPOLICY': "kubernetes.VirtualMachineInfraConfigPolicy",
'KUBERNETES.VIRTUALMACHINEINFRASTRUCTUREPROVIDER': "kubernetes.VirtualMachineInfrastructureProvider",
'KUBERNETES.VIRTUALMACHINEINSTANCETYPE': "kubernetes.VirtualMachineInstanceType",
'KUBERNETES.VIRTUALMACHINENODEPROFILE': "kubernetes.VirtualMachineNodeProfile",
'KVM.POLICY': "kvm.Policy",
'KVM.SESSION': "kvm.Session",
'KVM.TUNNEL': "kvm.Tunnel",
'LICENSE.ACCOUNTLICENSEDATA': "license.AccountLicenseData",
'LICENSE.CUSTOMEROP': "license.CustomerOp",
'LICENSE.IKSCUSTOMEROP': "license.IksCustomerOp",
'LICENSE.IKSLICENSECOUNT': "license.IksLicenseCount",
'LICENSE.IWOCUSTOMEROP': "license.IwoCustomerOp",
'LICENSE.IWOLICENSECOUNT': "license.IwoLicenseCount",
'LICENSE.LICENSEINFO': "license.LicenseInfo",
'LICENSE.LICENSERESERVATIONOP': "license.LicenseReservationOp",
'LICENSE.SMARTLICENSETOKEN': "license.SmartlicenseToken",
'LS.SERVICEPROFILE': "ls.ServiceProfile",
'MACPOOL.IDBLOCK': "macpool.IdBlock",
'MACPOOL.LEASE': "macpool.Lease",
'MACPOOL.POOL': "macpool.Pool",
'MACPOOL.POOLMEMBER': "macpool.PoolMember",
'MACPOOL.UNIVERSE': "macpool.Universe",
'MANAGEMENT.CONTROLLER': "management.Controller",
'MANAGEMENT.ENTITY': "management.Entity",
'MANAGEMENT.INTERFACE': "management.Interface",
'MEMORY.ARRAY': "memory.Array",
'MEMORY.PERSISTENTMEMORYCONFIGRESULT': "memory.PersistentMemoryConfigResult",
'MEMORY.PERSISTENTMEMORYCONFIGURATION': "memory.PersistentMemoryConfiguration",
'MEMORY.PERSISTENTMEMORYNAMESPACE': "memory.PersistentMemoryNamespace",
'MEMORY.PERSISTENTMEMORYNAMESPACECONFIGRESULT': "memory.PersistentMemoryNamespaceConfigResult",
'MEMORY.PERSISTENTMEMORYPOLICY': "memory.PersistentMemoryPolicy",
'MEMORY.PERSISTENTMEMORYREGION': "memory.PersistentMemoryRegion",
'MEMORY.PERSISTENTMEMORYUNIT': "memory.PersistentMemoryUnit",
'MEMORY.UNIT': "memory.Unit",
'META.DEFINITION': "meta.Definition",
'NETWORK.ELEMENT': "network.Element",
'NETWORK.ELEMENTSUMMARY': "network.ElementSummary",
'NETWORK.FCZONEINFO': "network.FcZoneInfo",
'NETWORK.VLANPORTINFO': "network.VlanPortInfo",
'NETWORKCONFIG.POLICY': "networkconfig.Policy",
'NIAAPI.APICCCOPOST': "niaapi.ApicCcoPost",
'NIAAPI.APICFIELDNOTICE': "niaapi.ApicFieldNotice",
'NIAAPI.APICHWEOL': "niaapi.ApicHweol",
'NIAAPI.APICLATESTMAINTAINEDRELEASE': "niaapi.ApicLatestMaintainedRelease",
'NIAAPI.APICRELEASERECOMMEND': "niaapi.ApicReleaseRecommend",
'NIAAPI.APICSWEOL': "niaapi.ApicSweol",
'NIAAPI.DCNMCCOPOST': "niaapi.DcnmCcoPost",
'NIAAPI.DCNMFIELDNOTICE': "niaapi.DcnmFieldNotice",
'NIAAPI.DCNMHWEOL': "niaapi.DcnmHweol",
'NIAAPI.DCNMLATESTMAINTAINEDRELEASE': "niaapi.DcnmLatestMaintainedRelease",
'NIAAPI.DCNMRELEASERECOMMEND': "niaapi.DcnmReleaseRecommend",
'NIAAPI.DCNMSWEOL': "niaapi.DcnmSweol",
'NIAAPI.FILEDOWNLOADER': "niaapi.FileDownloader",
'NIAAPI.NIAMETADATA': "niaapi.NiaMetadata",
'NIAAPI.NIBFILEDOWNLOADER': "niaapi.NibFileDownloader",
'NIAAPI.NIBMETADATA': "niaapi.NibMetadata",
'NIAAPI.VERSIONREGEX': "niaapi.VersionRegex",
'NIATELEMETRY.AAALDAPPROVIDERDETAILS': "niatelemetry.AaaLdapProviderDetails",
'NIATELEMETRY.AAARADIUSPROVIDERDETAILS': "niatelemetry.AaaRadiusProviderDetails",
'NIATELEMETRY.AAATACACSPROVIDERDETAILS': "niatelemetry.AaaTacacsProviderDetails",
'NIATELEMETRY.APICAPPPLUGINDETAILS': "niatelemetry.ApicAppPluginDetails",
'NIATELEMETRY.APICCOREFILEDETAILS': "niatelemetry.ApicCoreFileDetails",
'NIATELEMETRY.APICDBGEXPRSEXPORTDEST': "niatelemetry.ApicDbgexpRsExportDest",
'NIATELEMETRY.APICDBGEXPRSTSSCHEDULER': "niatelemetry.ApicDbgexpRsTsScheduler",
'NIATELEMETRY.APICFANDETAILS': "niatelemetry.ApicFanDetails",
'NIATELEMETRY.APICFEXDETAILS': "niatelemetry.ApicFexDetails",
'NIATELEMETRY.APICFLASHDETAILS': "niatelemetry.ApicFlashDetails",
'NIATELEMETRY.APICNTPAUTH': "niatelemetry.ApicNtpAuth",
'NIATELEMETRY.APICPSUDETAILS': "niatelemetry.ApicPsuDetails",
'NIATELEMETRY.APICREALMDETAILS': "niatelemetry.ApicRealmDetails",
'NIATELEMETRY.APICSNMPCLIENTGRPDETAILS': "niatelemetry.ApicSnmpClientGrpDetails",
'NIATELEMETRY.APICSNMPCOMMUNITYACCESSDETAILS': "niatelemetry.ApicSnmpCommunityAccessDetails",
'NIATELEMETRY.APICSNMPCOMMUNITYDETAILS': "niatelemetry.ApicSnmpCommunityDetails",
'NIATELEMETRY.APICSNMPTRAPDETAILS': "niatelemetry.ApicSnmpTrapDetails",
'NIATELEMETRY.APICSNMPTRAPFWDSERVERDETAILS': "niatelemetry.ApicSnmpTrapFwdServerDetails",
'NIATELEMETRY.APICSNMPVERSIONTHREEDETAILS': "niatelemetry.ApicSnmpVersionThreeDetails",
'NIATELEMETRY.APICSYSLOGGRP': "niatelemetry.ApicSysLogGrp",
'NIATELEMETRY.APICSYSLOGSRC': "niatelemetry.ApicSysLogSrc",
'NIATELEMETRY.APICTRANSCEIVERDETAILS': "niatelemetry.ApicTransceiverDetails",
'NIATELEMETRY.APICUIPAGECOUNTS': "niatelemetry.ApicUiPageCounts",
'NIATELEMETRY.APPDETAILS': "niatelemetry.AppDetails",
'NIATELEMETRY.COMMONPOLICIES': "niatelemetry.CommonPolicies",
'NIATELEMETRY.DCNMFANDETAILS': "niatelemetry.DcnmFanDetails",
'NIATELEMETRY.DCNMFEXDETAILS': "niatelemetry.DcnmFexDetails",
'NIATELEMETRY.DCNMMODULEDETAILS': "niatelemetry.DcnmModuleDetails",
'NIATELEMETRY.DCNMPSUDETAILS': "niatelemetry.DcnmPsuDetails",
'NIATELEMETRY.DCNMTRANSCEIVERDETAILS': "niatelemetry.DcnmTransceiverDetails",
'NIATELEMETRY.EPG': "niatelemetry.Epg",
'NIATELEMETRY.FABRICMODULEDETAILS': "niatelemetry.FabricModuleDetails",
'NIATELEMETRY.FABRICPODPROFILE': "niatelemetry.FabricPodProfile",
'NIATELEMETRY.FABRICPODSS': "niatelemetry.FabricPodSs",
'NIATELEMETRY.FAULT': "niatelemetry.Fault",
'NIATELEMETRY.HTTPSACLCONTRACTDETAILS': "niatelemetry.HttpsAclContractDetails",
'NIATELEMETRY.HTTPSACLCONTRACTFILTERMAP': "niatelemetry.HttpsAclContractFilterMap",
'NIATELEMETRY.HTTPSACLEPGCONTRACTMAP': "niatelemetry.HttpsAclEpgContractMap",
'NIATELEMETRY.HTTPSACLEPGDETAILS': "niatelemetry.HttpsAclEpgDetails",
'NIATELEMETRY.HTTPSACLFILTERDETAILS': "niatelemetry.HttpsAclFilterDetails",
'NIATELEMETRY.LC': "niatelemetry.Lc",
'NIATELEMETRY.MSOCONTRACTDETAILS': "niatelemetry.MsoContractDetails",
'NIATELEMETRY.MSOEPGDETAILS': "niatelemetry.MsoEpgDetails",
'NIATELEMETRY.MSOSCHEMADETAILS': "niatelemetry.MsoSchemaDetails",
'NIATELEMETRY.MSOSITEDETAILS': "niatelemetry.MsoSiteDetails",
'NIATELEMETRY.MSOTENANTDETAILS': "niatelemetry.MsoTenantDetails",
'NIATELEMETRY.NEXUSDASHBOARDCONTROLLERDETAILS': "niatelemetry.NexusDashboardControllerDetails",
'NIATELEMETRY.NEXUSDASHBOARDDETAILS': "niatelemetry.NexusDashboardDetails",
'NIATELEMETRY.NEXUSDASHBOARDMEMORYDETAILS': "niatelemetry.NexusDashboardMemoryDetails",
'NIATELEMETRY.NEXUSDASHBOARDS': "niatelemetry.NexusDashboards",
'NIATELEMETRY.NIAFEATUREUSAGE': "niatelemetry.NiaFeatureUsage",
'NIATELEMETRY.NIAINVENTORY': "niatelemetry.NiaInventory",
'NIATELEMETRY.NIAINVENTORYDCNM': "niatelemetry.NiaInventoryDcnm",
'NIATELEMETRY.NIAINVENTORYFABRIC': "niatelemetry.NiaInventoryFabric",
'NIATELEMETRY.NIALICENSESTATE': "niatelemetry.NiaLicenseState",
'NIATELEMETRY.PASSWORDSTRENGTHCHECK': "niatelemetry.PasswordStrengthCheck",
'NIATELEMETRY.PODCOMMPOLICIES': "niatelemetry.PodCommPolicies",
'NIATELEMETRY.PODSNMPPOLICIES': "niatelemetry.PodSnmpPolicies",
'NIATELEMETRY.PODTIMESERVERPOLICIES': "niatelemetry.PodTimeServerPolicies",
'NIATELEMETRY.SITEINVENTORY': "niatelemetry.SiteInventory",
'NIATELEMETRY.SNMPSRC': "niatelemetry.SnmpSrc",
'NIATELEMETRY.SSHVERSIONTWO': "niatelemetry.SshVersionTwo",
'NIATELEMETRY.SUPERVISORMODULEDETAILS': "niatelemetry.SupervisorModuleDetails",
'NIATELEMETRY.SYSLOGREMOTEDEST': "niatelemetry.SyslogRemoteDest",
'NIATELEMETRY.SYSLOGSYSMSG': "niatelemetry.SyslogSysMsg",
'NIATELEMETRY.SYSLOGSYSMSGFACFILTER': "niatelemetry.SyslogSysMsgFacFilter",
'NIATELEMETRY.SYSTEMCONTROLLERDETAILS': "niatelemetry.SystemControllerDetails",
'NIATELEMETRY.TENANT': "niatelemetry.Tenant",
'NOTIFICATION.ACCOUNTSUBSCRIPTION': "notification.AccountSubscription",
'NTP.POLICY': "ntp.Policy",
'OAUTH.ACCESSTOKEN': "oauth.AccessToken",
'OAUTH.AUTHORIZATION': "oauth.Authorization",
'OPRS.DEPLOYMENT': "oprs.Deployment",
'OPRS.SYNCTARGETLISTMESSAGE': "oprs.SyncTargetListMessage",
'ORGANIZATION.ORGANIZATION': "organization.Organization",
'OS.BULKINSTALLINFO': "os.BulkInstallInfo",
'OS.CATALOG': "os.Catalog",
'OS.CONFIGURATIONFILE': "os.ConfigurationFile",
'OS.DISTRIBUTION': "os.Distribution",
'OS.INSTALL': "os.Install",
'OS.OSSUPPORT': "os.OsSupport",
'OS.SUPPORTEDVERSION': "os.SupportedVersion",
'OS.TEMPLATEFILE': "os.TemplateFile",
'OS.VALIDINSTALLTARGET': "os.ValidInstallTarget",
'PCI.COPROCESSORCARD': "pci.CoprocessorCard",
'PCI.DEVICE': "pci.Device",
'PCI.LINK': "pci.Link",
'PCI.SWITCH': "pci.Switch",
'PORT.GROUP': "port.Group",
'PORT.MACBINDING': "port.MacBinding",
'PORT.SUBGROUP': "port.SubGroup",
'POWER.CONTROLSTATE': "power.ControlState",
'POWER.POLICY': "power.Policy",
'PROCESSOR.UNIT': "processor.Unit",
'RACK.UNITPERSONALITY': "rack.UnitPersonality",
'RECOMMENDATION.CAPACITYRUNWAY': "recommendation.CapacityRunway",
'RECOMMENDATION.PHYSICALITEM': "recommendation.PhysicalItem",
'RECOVERY.BACKUPCONFIGPOLICY': "recovery.BackupConfigPolicy",
'RECOVERY.BACKUPPROFILE': "recovery.BackupProfile",
'RECOVERY.CONFIGRESULT': "recovery.ConfigResult",
'RECOVERY.CONFIGRESULTENTRY': "recovery.ConfigResultEntry",
'RECOVERY.ONDEMANDBACKUP': "recovery.OnDemandBackup",
'RECOVERY.RESTORE': "recovery.Restore",
'RECOVERY.SCHEDULECONFIGPOLICY': "recovery.ScheduleConfigPolicy",
'RESOURCE.GROUP': "resource.Group",
'RESOURCE.GROUPMEMBER': "resource.GroupMember",
'RESOURCE.LICENSERESOURCECOUNT': "resource.LicenseResourceCount",
'RESOURCE.MEMBERSHIP': "resource.Membership",
'RESOURCE.MEMBERSHIPHOLDER': "resource.MembershipHolder",
'RESOURCE.RESERVATION': "resource.Reservation",
'RESOURCEPOOL.LEASE': "resourcepool.Lease",
'RESOURCEPOOL.LEASERESOURCE': "resourcepool.LeaseResource",
'RESOURCEPOOL.POOL': "resourcepool.Pool",
'RESOURCEPOOL.POOLMEMBER': "resourcepool.PoolMember",
'RESOURCEPOOL.UNIVERSE': "resourcepool.Universe",
'RPROXY.REVERSEPROXY': "rproxy.ReverseProxy",
'SDCARD.POLICY': "sdcard.Policy",
'SDWAN.PROFILE': "sdwan.Profile",
'SDWAN.ROUTERNODE': "sdwan.RouterNode",
'SDWAN.ROUTERPOLICY': "sdwan.RouterPolicy",
'SDWAN.VMANAGEACCOUNTPOLICY': "sdwan.VmanageAccountPolicy",
'SEARCH.SEARCHITEM': "search.SearchItem",
'SEARCH.TAGITEM': "search.TagItem",
'SECURITY.UNIT': "security.Unit",
'SERVER.CONFIGCHANGEDETAIL': "server.ConfigChangeDetail",
'SERVER.CONFIGIMPORT': "server.ConfigImport",
'SERVER.CONFIGRESULT': "server.ConfigResult",
'SERVER.CONFIGRESULTENTRY': "server.ConfigResultEntry",
'SERVER.PROFILE': "server.Profile",
'SERVER.PROFILETEMPLATE': "server.ProfileTemplate",
'SMTP.POLICY': "smtp.Policy",
'SNMP.POLICY': "snmp.Policy",
'SOFTWARE.APPLIANCEDISTRIBUTABLE': "software.ApplianceDistributable",
'SOFTWARE.DOWNLOADHISTORY': "software.DownloadHistory",
'SOFTWARE.HCLMETA': "software.HclMeta",
'SOFTWARE.HYPERFLEXBUNDLEDISTRIBUTABLE': "software.HyperflexBundleDistributable",
'SOFTWARE.HYPERFLEXDISTRIBUTABLE': "software.HyperflexDistributable",
'SOFTWARE.RELEASEMETA': "software.ReleaseMeta",
'SOFTWARE.SOLUTIONDISTRIBUTABLE': "software.SolutionDistributable",
'SOFTWARE.UCSDBUNDLEDISTRIBUTABLE': "software.UcsdBundleDistributable",
'SOFTWARE.UCSDDISTRIBUTABLE': "software.UcsdDistributable",
'SOFTWAREREPOSITORY.AUTHORIZATION': "softwarerepository.Authorization",
'SOFTWAREREPOSITORY.CACHEDIMAGE': "softwarerepository.CachedImage",
'SOFTWAREREPOSITORY.CATALOG': "softwarerepository.Catalog",
'SOFTWAREREPOSITORY.CATEGORYMAPPER': "softwarerepository.CategoryMapper",
'SOFTWAREREPOSITORY.CATEGORYMAPPERMODEL': "softwarerepository.CategoryMapperModel",
'SOFTWAREREPOSITORY.CATEGORYSUPPORTCONSTRAINT': "softwarerepository.CategorySupportConstraint",
'SOFTWAREREPOSITORY.DOWNLOADSPEC': "softwarerepository.DownloadSpec",
'SOFTWAREREPOSITORY.OPERATINGSYSTEMFILE': "softwarerepository.OperatingSystemFile",
'SOFTWAREREPOSITORY.RELEASE': "softwarerepository.Release",
'SOL.POLICY': "sol.Policy",
'SSH.POLICY': "ssh.Policy",
'STORAGE.CONTROLLER': "storage.Controller",
'STORAGE.DISKGROUP': "storage.DiskGroup",
'STORAGE.DISKSLOT': "storage.DiskSlot",
'STORAGE.DRIVEGROUP': "storage.DriveGroup",
'STORAGE.ENCLOSURE': "storage.Enclosure",
'STORAGE.ENCLOSUREDISK': "storage.EnclosureDisk",
'STORAGE.ENCLOSUREDISKSLOTEP': "storage.EnclosureDiskSlotEp",
'STORAGE.FLEXFLASHCONTROLLER': "storage.FlexFlashController",
'STORAGE.FLEXFLASHCONTROLLERPROPS': "storage.FlexFlashControllerProps",
'STORAGE.FLEXFLASHPHYSICALDRIVE': "storage.FlexFlashPhysicalDrive",
'STORAGE.FLEXFLASHVIRTUALDRIVE': "storage.FlexFlashVirtualDrive",
'STORAGE.FLEXUTILCONTROLLER': "storage.FlexUtilController",
'STORAGE.FLEXUTILPHYSICALDRIVE': "storage.FlexUtilPhysicalDrive",
'STORAGE.FLEXUTILVIRTUALDRIVE': "storage.FlexUtilVirtualDrive",
'STORAGE.HITACHIARRAY': "storage.HitachiArray",
'STORAGE.HITACHICONTROLLER': "storage.HitachiController",
'STORAGE.HITACHIDISK': "storage.HitachiDisk",
'STORAGE.HITACHIHOST': "storage.HitachiHost",
'STORAGE.HITACHIHOSTLUN': "storage.HitachiHostLun",
'STORAGE.HITACHIPARITYGROUP': "storage.HitachiParityGroup",
'STORAGE.HITACHIPOOL': "storage.HitachiPool",
'STORAGE.HITACHIPORT': "storage.HitachiPort",
'STORAGE.HITACHIVOLUME': "storage.HitachiVolume",
'STORAGE.HYPERFLEXSTORAGECONTAINER': "storage.HyperFlexStorageContainer",
'STORAGE.HYPERFLEXVOLUME': "storage.HyperFlexVolume",
'STORAGE.ITEM': "storage.Item",
'STORAGE.NETAPPAGGREGATE': "storage.NetAppAggregate",
'STORAGE.NETAPPBASEDISK': "storage.NetAppBaseDisk",
'STORAGE.NETAPPCLUSTER': "storage.NetAppCluster",
'STORAGE.NETAPPETHERNETPORT': "storage.NetAppEthernetPort",
'STORAGE.NETAPPEXPORTPOLICY': "storage.NetAppExportPolicy",
'STORAGE.NETAPPFCINTERFACE': "storage.NetAppFcInterface",
'STORAGE.NETAPPFCPORT': "storage.NetAppFcPort",
'STORAGE.NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'STORAGE.NETAPPIPINTERFACE': "storage.NetAppIpInterface",
'STORAGE.NETAPPLICENSE': "storage.NetAppLicense",
'STORAGE.NETAPPLUN': "storage.NetAppLun",
'STORAGE.NETAPPLUNMAP': "storage.NetAppLunMap",
'STORAGE.NETAPPNODE': "storage.NetAppNode",
'STORAGE.NETAPPNTPSERVER': "storage.NetAppNtpServer",
'STORAGE.NETAPPSENSOR': "storage.NetAppSensor",
'STORAGE.NETAPPSTORAGEVM': "storage.NetAppStorageVm",
'STORAGE.NETAPPVOLUME': "storage.NetAppVolume",
'STORAGE.NETAPPVOLUMESNAPSHOT': "storage.NetAppVolumeSnapshot",
'STORAGE.PHYSICALDISK': "storage.PhysicalDisk",
'STORAGE.PHYSICALDISKEXTENSION': "storage.PhysicalDiskExtension",
'STORAGE.PHYSICALDISKUSAGE': "storage.PhysicalDiskUsage",
'STORAGE.PUREARRAY': "storage.PureArray",
'STORAGE.PURECONTROLLER': "storage.PureController",
'STORAGE.PUREDISK': "storage.PureDisk",
'STORAGE.PUREHOST': "storage.PureHost",
'STORAGE.PUREHOSTGROUP': "storage.PureHostGroup",
'STORAGE.PUREHOSTLUN': "storage.PureHostLun",
'STORAGE.PUREPORT': "storage.PurePort",
'STORAGE.PUREPROTECTIONGROUP': "storage.PureProtectionGroup",
'STORAGE.PUREPROTECTIONGROUPSNAPSHOT': "storage.PureProtectionGroupSnapshot",
'STORAGE.PUREREPLICATIONSCHEDULE': "storage.PureReplicationSchedule",
'STORAGE.PURESNAPSHOTSCHEDULE': "storage.PureSnapshotSchedule",
'STORAGE.PUREVOLUME': "storage.PureVolume",
'STORAGE.PUREVOLUMESNAPSHOT': "storage.PureVolumeSnapshot",
'STORAGE.SASEXPANDER': "storage.SasExpander",
'STORAGE.SASPORT': "storage.SasPort",
'STORAGE.SPAN': "storage.Span",
'STORAGE.STORAGEPOLICY': "storage.StoragePolicy",
'STORAGE.VDMEMBEREP': "storage.VdMemberEp",
'STORAGE.VIRTUALDRIVE': "storage.VirtualDrive",
'STORAGE.VIRTUALDRIVECONTAINER': "storage.VirtualDriveContainer",
'STORAGE.VIRTUALDRIVEEXTENSION': "storage.VirtualDriveExtension",
'STORAGE.VIRTUALDRIVEIDENTITY': "storage.VirtualDriveIdentity",
'SYSLOG.POLICY': "syslog.Policy",
'TAM.ADVISORYCOUNT': "tam.AdvisoryCount",
'TAM.ADVISORYDEFINITION': "tam.AdvisoryDefinition",
'TAM.ADVISORYINFO': "tam.AdvisoryInfo",
'TAM.ADVISORYINSTANCE': "tam.AdvisoryInstance",
'TAM.SECURITYADVISORY': "tam.SecurityAdvisory",
'TASK.HITACHISCOPEDINVENTORY': "task.HitachiScopedInventory",
'TASK.HYPERFLEXSCOPEDINVENTORY': "task.HyperflexScopedInventory",
'TASK.IWESCOPEDINVENTORY': "task.IweScopedInventory",
'TASK.NETAPPSCOPEDINVENTORY': "task.NetAppScopedInventory",
'TASK.PUBLICCLOUDSCOPEDINVENTORY': "task.PublicCloudScopedInventory",
'TASK.PURESCOPEDINVENTORY': "task.PureScopedInventory",
'TASK.SERVERSCOPEDINVENTORY': "task.ServerScopedInventory",
'TECHSUPPORTMANAGEMENT.COLLECTIONCONTROLPOLICY': "techsupportmanagement.CollectionControlPolicy",
'TECHSUPPORTMANAGEMENT.DOWNLOAD': "techsupportmanagement.Download",
'TECHSUPPORTMANAGEMENT.TECHSUPPORTBUNDLE': "techsupportmanagement.TechSupportBundle",
'TECHSUPPORTMANAGEMENT.TECHSUPPORTSTATUS': "techsupportmanagement.TechSupportStatus",
'TERMINAL.AUDITLOG': "terminal.AuditLog",
'TERRAFORM.EXECUTOR': "terraform.Executor",
'THERMAL.POLICY': "thermal.Policy",
'TOP.SYSTEM': "top.System",
'UCSD.BACKUPINFO': "ucsd.BackupInfo",
'UUIDPOOL.BLOCK': "uuidpool.Block",
'UUIDPOOL.POOL': "uuidpool.Pool",
'UUIDPOOL.POOLMEMBER': "uuidpool.PoolMember",
'UUIDPOOL.UNIVERSE': "uuidpool.Universe",
'UUIDPOOL.UUIDLEASE': "uuidpool.UuidLease",
'VIRTUALIZATION.CISCOHYPERVISORMANAGER': "virtualization.CiscoHypervisorManager",
'VIRTUALIZATION.ESXICONSOLE': "virtualization.EsxiConsole",
'VIRTUALIZATION.HOST': "virtualization.Host",
'VIRTUALIZATION.IWECLUSTER': "virtualization.IweCluster",
'VIRTUALIZATION.IWEDATACENTER': "virtualization.IweDatacenter",
'VIRTUALIZATION.IWEDVUPLINK': "virtualization.IweDvUplink",
'VIRTUALIZATION.IWEDVSWITCH': "virtualization.IweDvswitch",
'VIRTUALIZATION.IWEHOST': "virtualization.IweHost",
'VIRTUALIZATION.IWEHOSTINTERFACE': "virtualization.IweHostInterface",
'VIRTUALIZATION.IWEHOSTVSWITCH': "virtualization.IweHostVswitch",
'VIRTUALIZATION.IWENETWORK': "virtualization.IweNetwork",
'VIRTUALIZATION.IWEVIRTUALDISK': "virtualization.IweVirtualDisk",
'VIRTUALIZATION.IWEVIRTUALMACHINE': "virtualization.IweVirtualMachine",
'VIRTUALIZATION.IWEVIRTUALMACHINENETWORKINTERFACE': "virtualization.IweVirtualMachineNetworkInterface",
'VIRTUALIZATION.VIRTUALDISK': "virtualization.VirtualDisk",
'VIRTUALIZATION.VIRTUALMACHINE': "virtualization.VirtualMachine",
'VIRTUALIZATION.VIRTUALNETWORK': "virtualization.VirtualNetwork",
'VIRTUALIZATION.VMWARECLUSTER': "virtualization.VmwareCluster",
'VIRTUALIZATION.VMWAREDATACENTER': "virtualization.VmwareDatacenter",
'VIRTUALIZATION.VMWAREDATASTORE': "virtualization.VmwareDatastore",
'VIRTUALIZATION.VMWAREDATASTORECLUSTER': "virtualization.VmwareDatastoreCluster",
'VIRTUALIZATION.VMWAREDISTRIBUTEDNETWORK': "virtualization.VmwareDistributedNetwork",
'VIRTUALIZATION.VMWAREDISTRIBUTEDSWITCH': "virtualization.VmwareDistributedSwitch",
'VIRTUALIZATION.VMWAREFOLDER': "virtualization.VmwareFolder",
'VIRTUALIZATION.VMWAREHOST': "virtualization.VmwareHost",
'VIRTUALIZATION.VMWAREKERNELNETWORK': "virtualization.VmwareKernelNetwork",
'VIRTUALIZATION.VMWARENETWORK': "virtualization.VmwareNetwork",
'VIRTUALIZATION.VMWAREPHYSICALNETWORKINTERFACE': "virtualization.VmwarePhysicalNetworkInterface",
'VIRTUALIZATION.VMWAREUPLINKPORT': "virtualization.VmwareUplinkPort",
'VIRTUALIZATION.VMWAREVCENTER': "virtualization.VmwareVcenter",
'VIRTUALIZATION.VMWAREVIRTUALDISK': "virtualization.VmwareVirtualDisk",
'VIRTUALIZATION.VMWAREVIRTUALMACHINE': "virtualization.VmwareVirtualMachine",
'VIRTUALIZATION.VMWAREVIRTUALMACHINESNAPSHOT': "virtualization.VmwareVirtualMachineSnapshot",
'VIRTUALIZATION.VMWAREVIRTUALNETWORKINTERFACE': "virtualization.VmwareVirtualNetworkInterface",
'VIRTUALIZATION.VMWAREVIRTUALSWITCH': "virtualization.VmwareVirtualSwitch",
'VMEDIA.POLICY': "vmedia.Policy",
'VMRC.CONSOLE': "vmrc.Console",
'VNC.CONSOLE': "vnc.Console",
'VNIC.ETHADAPTERPOLICY': "vnic.EthAdapterPolicy",
'VNIC.ETHIF': "vnic.EthIf",
'VNIC.ETHNETWORKPOLICY': "vnic.EthNetworkPolicy",
'VNIC.ETHQOSPOLICY': "vnic.EthQosPolicy",
'VNIC.FCADAPTERPOLICY': "vnic.FcAdapterPolicy",
'VNIC.FCIF': "vnic.FcIf",
'VNIC.FCNETWORKPOLICY': "vnic.FcNetworkPolicy",
'VNIC.FCQOSPOLICY': "vnic.FcQosPolicy",
'VNIC.ISCSIADAPTERPOLICY': "vnic.IscsiAdapterPolicy",
'VNIC.ISCSIBOOTPOLICY': "vnic.IscsiBootPolicy",
'VNIC.ISCSISTATICTARGETPOLICY': "vnic.IscsiStaticTargetPolicy",
'VNIC.LANCONNECTIVITYPOLICY': "vnic.LanConnectivityPolicy",
'VNIC.LCPSTATUS': "vnic.LcpStatus",
'VNIC.SANCONNECTIVITYPOLICY': "vnic.SanConnectivityPolicy",
'VNIC.SCPSTATUS': "vnic.ScpStatus",
'VRF.VRF': "vrf.Vrf",
'WORKFLOW.ANSIBLEBATCHEXECUTOR': "workflow.AnsibleBatchExecutor",
'WORKFLOW.BATCHAPIEXECUTOR': "workflow.BatchApiExecutor",
'WORKFLOW.BUILDTASKMETA': "workflow.BuildTaskMeta",
'WORKFLOW.BUILDTASKMETAOWNER': "workflow.BuildTaskMetaOwner",
'WORKFLOW.CATALOG': "workflow.Catalog",
'WORKFLOW.CUSTOMDATATYPEDEFINITION': "workflow.CustomDataTypeDefinition",
'WORKFLOW.ERRORRESPONSEHANDLER': "workflow.ErrorResponseHandler",
'WORKFLOW.PENDINGDYNAMICWORKFLOWINFO': "workflow.PendingDynamicWorkflowInfo",
'WORKFLOW.ROLLBACKWORKFLOW': "workflow.RollbackWorkflow",
'WORKFLOW.SOLUTIONACTIONDEFINITION': "workflow.SolutionActionDefinition",
'WORKFLOW.SOLUTIONACTIONINSTANCE': "workflow.SolutionActionInstance",
'WORKFLOW.SOLUTIONDEFINITION': "workflow.SolutionDefinition",
'WORKFLOW.SOLUTIONINSTANCE': "workflow.SolutionInstance",
'WORKFLOW.SOLUTIONOUTPUT': "workflow.SolutionOutput",
'WORKFLOW.SSHBATCHEXECUTOR': "workflow.SshBatchExecutor",
'WORKFLOW.TASKDEBUGLOG': "workflow.TaskDebugLog",
'WORKFLOW.TASKDEFINITION': "workflow.TaskDefinition",
'WORKFLOW.TASKINFO': "workflow.TaskInfo",
'WORKFLOW.TASKMETADATA': "workflow.TaskMetadata",
'WORKFLOW.TASKNOTIFICATION': "workflow.TaskNotification",
'WORKFLOW.TEMPLATEEVALUATION': "workflow.TemplateEvaluation",
'WORKFLOW.TEMPLATEFUNCTIONMETA': "workflow.TemplateFunctionMeta",
'WORKFLOW.WORKFLOWDEFINITION': "workflow.WorkflowDefinition",
'WORKFLOW.WORKFLOWINFO': "workflow.WorkflowInfo",
'WORKFLOW.WORKFLOWMETA': "workflow.WorkflowMeta",
'WORKFLOW.WORKFLOWMETADATA': "workflow.WorkflowMetadata",
'WORKFLOW.WORKFLOWNOTIFICATION': "workflow.WorkflowNotification",
},
}
validations = {
('description',): {
'max_length': 1024,
'regex': {
'pattern': r'^$|^[a-zA-Z0-9]+[\x00-\xFF]*$', # noqa: E501
},
},
('name',): {
'regex': {
'pattern': r'^[a-zA-Z0-9_.:-]{1,64}$', # noqa: E501
},
},
('global_hot_spares',): {
'regex': {
'pattern': r'^$|^((\d+\-\d+)|(\d+))(,((\d+\-\d+)|(\d+)))*$', # noqa: E501
},
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'moid': (str,), # noqa: E501
'selector': (str,), # noqa: E501
'link': (str,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
'description': (str,), # noqa: E501
'name': (str,), # noqa: E501
'global_hot_spares': (str,), # noqa: E501
'm2_virtual_drive': (StorageM2VirtualDriveConfig,), # noqa: E501
'raid0_drive': (StorageR0Drive,), # noqa: E501
'unused_disks_state': (str,), # noqa: E501
'use_jbod_for_vd_creation': (bool,), # noqa: E501
'drive_group': ([StorageDriveGroupRelationship], none_type,), # noqa: E501
'organization': (OrganizationOrganizationRelationship,), # noqa: E501
'profiles': ([PolicyAbstractConfigProfileRelationship], none_type,), # noqa: E501
'object_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'mo.MoRef': MoMoRef,
'storage.StoragePolicy': StorageStoragePolicy,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'moid': 'Moid', # noqa: E501
'selector': 'Selector', # noqa: E501
'link': 'link', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
'description': 'Description', # noqa: E501
'name': 'Name', # noqa: E501
'global_hot_spares': 'GlobalHotSpares', # noqa: E501
'm2_virtual_drive': 'M2VirtualDrive', # noqa: E501
'raid0_drive': 'Raid0Drive', # noqa: E501
'unused_disks_state': 'UnusedDisksState', # noqa: E501
'use_jbod_for_vd_creation': 'UseJbodForVdCreation', # noqa: E501
'drive_group': 'DriveGroup', # noqa: E501
'organization': 'Organization', # noqa: E501
'profiles': 'Profiles', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""StorageStoragePolicyRelationship - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "mo.MoRef", must be one of ["mo.MoRef", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
moid (str): The Moid of the referenced REST resource.. [optional] # noqa: E501
selector (str): An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. 1. If 'moid' is set this field is ignored. 1. If 'selector' is set and 'moid' is empty/absent from the request, Intersight determines the Moid of the resource matching the filter expression and populates it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.. [optional] # noqa: E501
link (str): A URL to an instance of the 'mo.MoRef' class.. [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
description (str): Description of the policy.. [optional] # noqa: E501
name (str): Name of the concrete policy.. [optional] # noqa: E501
global_hot_spares (str): A collection of disks that is to be used as hot spares, globally, for all the RAID groups. Allowed value is a number range separated by a comma or a hyphen.. [optional] # noqa: E501
m2_virtual_drive (StorageM2VirtualDriveConfig): [optional] # noqa: E501
raid0_drive (StorageR0Drive): [optional] # noqa: E501
unused_disks_state (str): State to which disks, not used in this policy, are to be moved. NoChange will not change the drive state. * `NoChange` - Drive state will not be modified by Storage Policy. * `UnconfiguredGood` - Unconfigured good state -ready to be added in a RAID group. * `Jbod` - JBOD state where the disks start showing up to Host OS.. [optional] if omitted the server will use the default value of "NoChange" # noqa: E501
use_jbod_for_vd_creation (bool): Disks in JBOD State are used to create virtual drives.. [optional] # noqa: E501
drive_group ([StorageDriveGroupRelationship], none_type): An array of relationships to storageDriveGroup resources.. [optional] # noqa: E501
organization (OrganizationOrganizationRelationship): [optional] # noqa: E501
profiles ([PolicyAbstractConfigProfileRelationship], none_type): An array of relationships to policyAbstractConfigProfile resources.. [optional] # noqa: E501
object_type (str): The fully-qualified name of the remote type referred by this relationship.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "mo.MoRef")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
MoMoRef,
StorageStoragePolicy,
none_type,
],
}
|
py | 1a4edf28936c5123621e8d1a53f4a6f7022957e2 | from stix2 import MemoryStore, Filter
import json
from itertools import chain
def query_all(srcs, filters):
"""return the union of a query across multiple memorystores"""
return list(chain.from_iterable(
src.query(filters) for src in srcs
))
def get_related(srcs, src_type, rel_type, target_type, reverse=False):
"""build relationship mappings
params:
srcs: memorystores for enterprise, mobile and pre-attack, in an array
src_type: source type for the relationships, e.g "attack-pattern"
rel_type: relationship type for the relationships, e.g "uses"
target_type: target type for the relationship, e.g "intrusion-set"
reverse: build reverse mapping of target to source
"""
relationships = query_all(srcs, [
Filter('type', '=', 'relationship'),
Filter('relationship_type', '=', rel_type),
Filter('revoked', '=', False)
])
# stix_id => [ ids of objects with relationships with stix_id ]
id_to_related = {}
# build the dict
for relationship in relationships:
if (src_type in relationship.source_ref and target_type in relationship.target_ref):
if (relationship.source_ref in id_to_related and not reverse) or (relationship.target_ref in id_to_related and reverse):
if not reverse:
id_to_related[relationship.source_ref].append({
"relationship": relationship,
"id": relationship.target_ref
})
else:
id_to_related[relationship.target_ref].append({
"relationship": relationship,
"id": relationship.source_ref
})
else:
if not reverse:
id_to_related[relationship.source_ref] = [{
"relationship": relationship,
"id": relationship.target_ref
}]
else:
id_to_related[relationship.target_ref] = [{
"relationship": relationship,
"id": relationship.source_ref
}]
# all objects of target type
if not reverse:
targets = query_all(srcs, [
Filter('type', '=', target_type),
Filter('revoked', '=', False)
])
else:
targets = query_all(srcs, [
Filter('type', '=', src_type),
Filter('revoked', '=', False)
])
id_to_target = {}
# build the dict
for target in targets:
id_to_target[target.id] = target
output = {}
for stix_id in id_to_related:
value = []
for related in id_to_related[stix_id]:
if not related["id"] in id_to_target:
continue # targetting a revoked object
value.append({
"object": json.loads(id_to_target[related["id"]].serialize()),
"relationship": json.loads(related["relationship"].serialize())
})
output[stix_id] = value
return output
# tool:group
def tools_used_by_groups(srcs):
"""returns group_id => {tool, relationship} for each tool used by the
group. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "tool")
def groups_using_tool(srcs):
"""returns tool_id => {group, relationship} for each group using the tool.
srcs should be an array of memorystores for enterprise, mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "tool", reverse=True)
# malware:group
def malware_used_by_groups(srcs):
"""returns group_id => {malware, relationship} for each malware used by
group. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "malware")
def groups_using_malware(srcs):
"""returns malware_id => {group, relationship} for each group using
the malware. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "malware", reverse=True)
# technique:group
def techniques_used_by_groups(srcs):
"""returns group_id => {technique, relationship} for each technique used
by the group. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "attack-pattern")
def groups_using_technique(srcs):
"""returns technique_id => {group, relationship} for each group using the
technique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "intrusion-set", "uses", "attack-pattern", reverse=True)
# technique:malware
def techniques_used_by_malware(srcs):
"""return malware => {technique, relationship} for each technique
used by the malware. srcs should be an array of memorystores for
enterprise, mobile and pre
"""
return get_related(srcs, "malware", "uses", "attack-pattern")
def malware_using_technique(srcs):
"""return technique_id => {malware, relationship} for each malware using
the technique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "malware", "uses", "attack-pattern", reverse=True)
# technique:tool
def techniques_used_by_tools(srcs):
"""return tool_id => {technique, relationship} for each technique used
by the tool. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "tool", "uses", "attack-pattern")
def tools_using_technique(srcs):
"""return technique_id => {tool, relationship} for each tool using the
technique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "tool", "uses", "attack-pattern", reverse=True)
# technique:mitigation
def mitigation_mitigates_techniques(srcs):
"""return mitigation_id => {technique, relationship} for each technique
mitigated by the mitigation. srcs should be an array of memorystores
for enterprise, mobile and pre
"""
return get_related(srcs, "course-of-action", "mitigates", "attack-pattern", reverse=False)
def technique_mitigated_by_mitigation(srcs):
"""return technique_id => {mitigation, relationship} for each mitigation
of the technique. srcs should be an array of memorystores for
enterprise, mobile and pre
"""
return get_related(srcs, "course-of-action", "mitigates", "attack-pattern", reverse=True)
# technique:technique
def technique_related_to_technique(srcs):
"""return technique_id => {technique, relationship} for each technique
related to the technique. srcs should be an array of memorystores for
enterprise, mobile and pre
"""
return get_related(srcs, "attack-pattern", "related-to", "attack-pattern")
# technique:subtechnique
def subtechniques_of(srcs):
""" return technique_id => {subtechnique, relationship} for each subtechnique
of the technique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "attack-pattern", "subtechnique-of", "attack-pattern", reverse=True)
def parent_technique_of(srcs):
""" return subtechnique_id => {technique, relationship} describing the parent technique
of the subtechnique. srcs should be an array of memorystores for enterprise,
mobile and pre
"""
return get_related(srcs, "attack-pattern", "subtechnique-of", "attack-pattern")
def load(url):
"""Load stix data from file"""
src = MemoryStore()
src.load_from_file(url)
return src
|
py | 1a4edf663056aa16540bd83f1b5d03ae38ddeb8c | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-24 11:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
('image_name', models.CharField(max_length=30)),
('image_caption', models.CharField(blank=True, max_length=30)),
('comments', models.TextField(blank=True, max_length=50)),
('likes', models.IntegerField()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_photo', models.ImageField(upload_to='profile/')),
('bio', models.TextField(blank=True, max_length=50)),
('username', models.CharField(max_length=30)),
('user', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='image',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.Profile'),
),
]
|
py | 1a4ee09e4a254f3cc6fcb06d5b060af299da164e | # resources/srx/addrbook_finder.py
import netaddr
#from .zone import Zone
#from .addrbook import ZoneAddrBook
class AddrBookFinderResults(object):
"""
Helper-class to hold the results of a :ZoneAddrFind.find(): invocation
"""
def __init__(self, ab, find, results):
self._ab = ab
self._find = find
self._results = results
self.sets = []
@property
def lpm(self):
"""
The longest-prefix-matching address is the last one in the results
list. This fact is a result of the :ZoneAddrFinder.find(): sorted call
"""
return self._results[-1][0]
@property
def items(self):
"""
Return a list of the matching address items and sets
"""
return self.addrs + self.sets
@property
def addrs(self):
"""
Return a list of the matching address items
"""
# return a list of names
return [x[0] for x in self._results]
@property
def matching(self):
"""
Returns the string value of the original querried address presented to
the find() method
"""
return self._find
def __repr__(self):
"""
Provides the matching value and the zone name associated with this
results
"""
return "%s(%s in %s)" % (
self.__class__.__name__, self._find, self._ab.name)
class AddrBookFinder(object):
# -------------------------------------------------------------------------
# CONSTRUCTOR
# -------------------------------------------------------------------------
def __init__(self, addr_book):
"""
addr_book
Either a ZoneAddrBook or SharedAddrBook instance
"""
self._ab = addr_book
self._index = None
def __repr__(self):
return "AddrBookFinder(%s)" % self._ab.name
def compile(self):
"""
Compile a list of netaddr objects against the catalog of address items
"""
# create a tuple of (addr-name, netaddr) for each of the items in the
# address-book
self._index = [(name, netaddr.IPNetwork(addr['ip_prefix']))
for name, addr in self._ab.addr.catalog.items()]
def find(self, addr, sets=True):
"""
Given an ip or ip_prefix locate the matching address book address
and address-set items.
"""
# if the caller hasn't explicity invoked :compile(): to create the
# netaddr objects, then do that now.
if self._index is None:
self.compile()
# convert the provided :addr: into a netaddr object and then
# to a subnet match to find address entries. the matching
# values will be sorted with longest prefix matching to be
# last in the list
ip = netaddr.IPNetwork(addr).ip
# is ip in the subnet?
in_net = lambda i: ip & i[1].netmask == i[1].network
# used to sort by prefix-length
by_pflen = lambda a, b: cmp(a[1].prefixlen, b[1].prefixlen)
r = sorted(
filter(
in_net,
self._index),
cmp=by_pflen) # find/sort
if r is None:
return None
# now that we have some matching entries, we should find which
# address-set items uses the items
results = AddrBookFinderResults(self._ab, addr, r)
if sets is True:
results.sets = self.find_sets(results)
# return the results object
return results
def find_sets(self, r):
"""
Given a :AddrBookFinderResults: object, which contains the list of
matching address items, locate the list of address-set objects that
use those items
"""
catalog = self._ab.set.catalog
in_addr = lambda i: i in v['addr_list']
sets = [k for k, v in catalog.items() if filter(in_addr, r.addrs)]
in_set = lambda i: i in v['set_list']
subsets = [k for k, v in catalog.items() if filter(in_set, sets)]
return sets + subsets
|
py | 1a4ee0d905a10d6e03cb980ae0cf8547cfdc2406 | # dataset settings
dataset_type = "PascalContextDataset"
data_root = "data/VOCdevkit/VOC2010/"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
img_scale = (512, 512)
crop_size = (512, 512)
max_ratio = 8
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations"),
dict(type="Resize", img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type="RandomCrop", crop_size=crop_size, cat_max_ratio=0.75),
dict(type="RandomFlip", prob=0.5),
dict(type="PhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size=crop_size, pad_val=0, seg_pad_val=255),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img", "gt_semantic_seg"]),
]
val_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(512 * max_ratio, 512),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(512 * max_ratio, 512),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir="JPEGImages",
ann_dir="SegmentationClassContext",
split="ImageSets/SegmentationContext/train.txt",
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir="JPEGImages",
ann_dir="SegmentationClassContext",
split="ImageSets/SegmentationContext/val.txt",
pipeline=val_pipeline,
),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir="JPEGImages",
ann_dir="SegmentationClassContext",
split="ImageSets/SegmentationContext/val.txt",
pipeline=test_pipeline,
),
)
|
py | 1a4ee0f701305ba18e9c737f4c236dadcf014a0c | # encoding: utf8
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
import cities_light.models
class Migration(migrations.Migration):
dependencies = [
('cities_light', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name_ascii', models.CharField(db_index=True, max_length=200, blank=True)),
('slug', autoslug.fields.AutoSlugField(editable=False)),
('geoname_id', models.IntegerField(unique=True, null=True, blank=True)),
('alternate_names', models.TextField(default='', null=True, blank=True)),
('name', models.CharField(max_length=200, db_index=True)),
('display_name', models.CharField(max_length=200)),
('search_names', cities_light.models.ToSearchTextField(default='', max_length=4000, db_index=True, blank=True)),
('latitude', models.DecimalField(null=True, max_digits=8, decimal_places=5, blank=True)),
('longitude', models.DecimalField(null=True, max_digits=8, decimal_places=5, blank=True)),
('region', models.ForeignKey(to_field='id', blank=True, to='cities_light.Region', null=True)),
('country', models.ForeignKey(to='cities_light.Country', to_field='id')),
('population', models.BigIntegerField(db_index=True, null=True, blank=True)),
('feature_code', models.CharField(db_index=True, max_length=10, null=True, blank=True)),
],
options={
'ordering': ['name'],
'unique_together': set([('region', 'name'), ('region', 'slug')]),
'abstract': False,
'verbose_name_plural': 'cities',
},
bases=(models.Model,),
),
]
|
py | 1a4ee15095fdb5f5e06528bf418ea03f0cdf0e0a | from django.shortcuts import render
from .models import Story
from home.views import cart_size, get_valid_user_data
# Create your views here.
def show_story(request):
data = {
'cart_size' : cart_size(request),
'valid_user': get_valid_user_data(request),
'title': Story.objects.get(pk=1),
'section_1': Story.objects.get(pk=2),
'section_2': Story.objects.get(pk=3),
'section_3': Story.objects.get(pk=4),
'section_4': Story.objects.get(pk=5),
'section_5': Story.objects.get(pk=6),
'section_6': Story.objects.get(pk=7),
}
return render(request, "story.html", data)
|
py | 1a4ee2293ae4e03d9166ccddd4779c6fe5a3470c |
from flask import Flask,request, url_for, redirect, render_template
import pickle
import numpy as np
app = Flask(__name__)
test=pickle.load(open('test1.pkl','rb'))
@app.route('/')
def hello_world():
return render_template("t.html")
@app.route('/predict',methods=['POST','GET'])
def predict():
int_features=[int(x) for x in request.form.values()]
final=[np.array(int_features)]
print(int_features)
print(final)
prediction=test.predict(final)
if prediction == 0:
return render_template('t.html',pred="\t\t\t\t\tProbability of accident severity is : Minor")
else:
return render_template('t.html',pred="\t\t\t\t\tProbability of accident severity is : Major")
@app.route('/Map')
def map1():
return render_template("map.html")
@app.route('/Graphs')
def graph():
return render_template("graph.html")
@app.route('/Map1')
def map2():
return render_template("ur.html")
@app.route('/Map2')
def map3():
return render_template("bs.html")
@app.route('/Map3')
def map4():
return render_template("hm.html")
@app.route('/Pie')
def pie():
return render_template("pie.html")
if __name__=="__main__":
app.run()
|
py | 1a4ee25fcd3f8a66d9e4a41df96bfdac2275a1aa | #############################################################
## ##
## Copyright (c) 2003-2017 by The University of Queensland ##
## Centre for Geoscience Computing ##
## http://earth.uq.edu.au/centre-geoscience-computing ##
## ##
## Primary Business: Brisbane, Queensland, Australia ##
## Licensed under the Open Software License version 3.0 ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
#############################################################
"""
Defines the L{ImageFormat} class and functions for mapping a
file name extension to an associated C{ImageFormat} object.
"""
import os
import os.path
class ImageFormat(object):
"""
Class representing an image format.
"""
def __init__(self, name):
"""
Constructor.
@type name: str
@param name: Name assigned to this image format.
"""
self.name = name
def getName(self):
"""
Returns the name associated with this image format.
@rtype: str
"""
return self.name
def __str__(self):
return self.getName()
PNG = ImageFormat("PNG")
PNM = ImageFormat("PNM")
_nameFormatDict = dict()
_nameFormatDict[str.upper(str(PNG))] = PNG
_nameFormatDict[str.upper(str(PNM))] = PNM
def _getDelimitedFormatNameString():
return ", ".join(map(str,list(_nameFormatDict.keys())))
def getFormatFromName(formatName, ext=None):
"""
Returns the C{{ImageFormat}} object which corresponds
to a specified image-format name (string).
@type formatName: str
@param formatName: The name of an image format, one of: {0:s}
@type ext: str
@param ext: File name extension for error message string.
""".format(_getDelimitedFormatNameString())
if str.upper(formatName) in _nameFormatDict:
return _nameFormatDict[str.upper(formatName)]
raise \
ValueError(
(
"No image format found which matched extension '{0:s}';" +
" valid image file formats are: {1:s}"
).format(ext, _getDelimitedFormatNameString())
)
def getFormatFromExtension(fileName):
"""
Returns the C{ImageFormat} object which corresponds
to a specified file name. Uses the C{fileName} extension
to try and deduce the corresponding C{ImageFormat} object.
@type fileName: str
@param fileName: A file name.
@rtype: C{ImageFormat}
@return: An C{ImageFormat} object corresponding to the
specified file name (and corresponding file name extension).
"""
(base, ext) = os.path.splitext(fileName)
if (len(ext) > 0):
formatName = str.lstrip(ext, ".")
else:
raise ValueError(
"Could not determine image format from file "
+
"name " + fileName + ", no extension."
)
return getFormatFromName(formatName, ext)
|
py | 1a4ee28baef5afe0460398e82cc5a4f7774f5e87 | from InquirerPy.utils import color_print
import sys, psutil, time, cursor, valclient, ctypes, traceback, os, subprocess
from .utilities.killable_thread import Thread
from .utilities.config.app_config import Config
from .utilities.config.modify_config import Config_Editor
from .utilities.processes import Processes
from .utilities.rcs import Riot_Client_Services
from .utilities.systray import Systray
from .utilities.version_checker import Checker
from .utilities.logging import Logger
from .utilities.program_data import Program_Data
from .localization.localization import Localizer
from .presence.presence import Presence
from .webserver import server
# weird console window management stuff
kernel32 = ctypes.WinDLL('kernel32')
user32 = ctypes.WinDLL('user32')
hWnd = kernel32.GetConsoleWindow()
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), (0x4|0x80|0x20|0x2|0x10|0x1|0x00|0x100)) #disable inputs to console
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7) #allow for ANSI sequences
class Startup:
def __init__(self):
if not Processes.is_program_already_running():
cursor.hide()
Logger.create_logger()
Program_Data.update_file_location()
self.config = Config.fetch_config()
if "locale" in self.config.keys():
if self.config["locale"][0] == "":
config = Localizer.prompt_locale(self.config)
Config.modify_config(config)
Systray.restart()
self.installs = Program_Data.fetch_installs()
Localizer.set_locale(self.config)
self.config = Config.check_config()
Localizer.config = self.config
Logger.debug(self.config)
self.client = None
if Localizer.get_config_value("region",0) == "": # try to autodetect region on first launch
self.check_region()
ctypes.windll.kernel32.SetConsoleTitleW(f"valorant-rpc {Localizer.get_config_value('version')}")
color_print([("Red", Localizer.get_localized_text("prints","startup","wait_for_rpc"))])
try:
self.presence = Presence(self.config)
Startup.clear_line()
except Exception as e:
traceback.print_exc()
color_print([("Cyan",f"{Localizer.get_localized_text('prints','startup','discord_not_detected')} ({e})")])
if not Processes.are_processes_running():
color_print([("Red", Localizer.get_localized_text("prints","startup","starting_valorant"))])
self.start_game()
os._exit(1)
self.run()
def run(self):
self.presence.update_presence("startup")
Checker.check_version(self.config)
if not Processes.are_processes_running():
color_print([("Red", Localizer.get_localized_text("prints","startup","starting_valorant"))])
self.start_game()
self.setup_client()
self.systray = Systray(self.client,self.config)
self.dispatch_systray()
if self.client.fetch_presence() is None:
self.wait_for_presence()
self.check_run_cli()
self.dispatch_presence()
self.dispatch_webserver()
color_print([("LimeGreen",f"{Localizer.get_localized_text('prints','startup','startup_successful')}\n")])
time.sleep(5)
user32.ShowWindow(hWnd, 0) #hide window
self.systray_thread.join()
self.presence_thread.stop()
def dispatch_webserver(self):
server.client = self.client
server.config = self.config
self.webserver_thread = Thread(target=server.start,daemon=True)
self.webserver_thread.start()
def dispatch_presence(self):
self.presence_thread = Thread(target=self.presence.main_loop,daemon=True)
self.presence_thread.start()
def dispatch_systray(self):
self.systray_thread = Thread(target=self.systray.run)
self.systray_thread.start()
def setup_client(self):
self.client = valclient.Client(region=Localizer.get_config_value("region",0))
self.client.activate()
self.presence.client = self.client
def wait_for_presence(self):
presence_timeout = Localizer.get_config_value("startup","presence_timeout")
presence_timer = 0
print()
while self.client.fetch_presence() is None:
Startup.clear_line()
color_print([("Cyan", "["),("White",f"{presence_timer}"),("Cyan", f"] {Localizer.get_localized_text('prints','startup','waiting_for_presence')}")])
presence_timer += 1
if presence_timer >= presence_timeout:
self.systray.exit()
os._exit(1)
time.sleep(1)
Startup.clear_line()
Startup.clear_line()
def start_game(self):
path = Riot_Client_Services.get_rcs_path()
launch_timeout = Localizer.get_config_value("startup","game_launch_timeout")
launch_timer = 0
psutil.subprocess.Popen([path, "--launch-product=valorant", "--launch-patchline=live"])
print()
while not Processes.are_processes_running():
Startup.clear_line()
color_print([("Cyan", "["),("White",f"{launch_timer}"),("Cyan", f"] {Localizer.get_localized_text('prints','startup','waiting_for_valorant')}")])
launch_timer += 1
if launch_timer >= launch_timeout:
self.systray.exit()
os._exit(1)
time.sleep(1)
Startup.clear_line()
def check_run_cli(self):
if Localizer.get_config_value("startup","auto_launch_skincli"):
skincli_path = self.installs.get("valorant-skin-cli")
if skincli_path is not None:
subprocess.Popen(f"start {skincli_path}", shell=True)
def check_region(self):
color_print([("Red bold",Localizer.get_localized_text("prints","startup","autodetect_region"))])
client = valclient.Client(region="na")
client.activate()
sessions = client.riotclient_session_fetch_sessions()
for _,session in sessions.items():
if session["productId"] == "valorant":
launch_args = session["launchConfiguration"]["arguments"]
for arg in launch_args:
if "-ares-deployment" in arg:
region = arg.replace("-ares-deployment=","")
self.config[Localizer.get_config_key("region")][0] = region
Config.modify_config(self.config)
color_print([("LimeGreen",f"{Localizer.get_localized_text('prints','startup','autodetected_region')} {Localizer.get_config_value('region',0)}")])
time.sleep(5)
Systray.restart()
@staticmethod
def clear_line():
sys.stdout.write("\033[F") # move cursor up one line
sys.stdout.write("\r\033[K") |
py | 1a4ee2bdbbbe9c5c6b6c6dde9718ad62056554f7 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.immutable
import dns.rdata
_pows = tuple(10**i for i in range(0, 11))
# default values are in centimeters
_default_size = 100.0
_default_hprec = 1000000.0
_default_vprec = 1000.0
# for use by from_wire()
_MAX_LATITUDE = 0x80000000 + 90 * 3600000
_MIN_LATITUDE = 0x80000000 - 90 * 3600000
_MAX_LONGITUDE = 0x80000000 + 180 * 3600000
_MIN_LONGITUDE = 0x80000000 - 180 * 3600000
def _exponent_of(what, desc):
if what == 0:
return 0
exp = None
for (i, pow) in enumerate(_pows):
if what < pow:
exp = i - 1
break
if exp is None or exp < 0:
raise dns.exception.SyntaxError("%s value out of bounds" % desc)
return exp
def _float_to_tuple(what):
if what < 0:
sign = -1
what *= -1
else:
sign = 1
what = round(what * 3600000) # pylint: disable=round-builtin
degrees = int(what // 3600000)
what -= degrees * 3600000
minutes = int(what // 60000)
what -= minutes * 60000
seconds = int(what // 1000)
what -= int(seconds * 1000)
what = int(what)
return (degrees, minutes, seconds, what, sign)
def _tuple_to_float(what):
value = float(what[0])
value += float(what[1]) / 60.0
value += float(what[2]) / 3600.0
value += float(what[3]) / 3600000.0
return float(what[4]) * value
def _encode_size(what, desc):
what = int(what)
exponent = _exponent_of(what, desc) & 0xF
base = what // pow(10, exponent) & 0xF
return base * 16 + exponent
def _decode_size(what, desc):
exponent = what & 0x0F
if exponent > 9:
raise dns.exception.FormError("bad %s exponent" % desc)
base = (what & 0xF0) >> 4
if base > 9:
raise dns.exception.FormError("bad %s base" % desc)
return base * pow(10, exponent)
def _check_coordinate_list(value, low, high):
if value[0] < low or value[0] > high:
raise ValueError(f'not in range [{low}, {high}]')
if value[1] < 0 or value[1] > 59:
raise ValueError('bad minutes value')
if value[2] < 0 or value[2] > 59:
raise ValueError('bad seconds value')
if value[3] < 0 or value[3] > 999:
raise ValueError('bad milliseconds value')
if value[4] != 1 and value[4] != -1:
raise ValueError('bad hemisphere value')
@dns.immutable.immutable
class LOC(dns.rdata.Rdata):
"""LOC record"""
# see: RFC 1876
__slots__ = ['latitude', 'longitude', 'altitude', 'size',
'horizontal_precision', 'vertical_precision']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude,
size=_default_size, hprec=_default_hprec,
vprec=_default_vprec):
"""Initialize a LOC record instance.
The parameters I{latitude} and I{longitude} may be either a 4-tuple
of integers specifying (degrees, minutes, seconds, milliseconds),
or they may be floating point values specifying the number of
degrees. The other parameters are floats. Size, horizontal precision,
and vertical precision are specified in centimeters."""
super().__init__(rdclass, rdtype)
if isinstance(latitude, int):
latitude = float(latitude)
if isinstance(latitude, float):
latitude = _float_to_tuple(latitude)
_check_coordinate_list(latitude, -90, 90)
self.latitude = tuple(latitude)
if isinstance(longitude, int):
longitude = float(longitude)
if isinstance(longitude, float):
longitude = _float_to_tuple(longitude)
_check_coordinate_list(longitude, -180, 180)
self.longitude = tuple(longitude)
self.altitude = float(altitude)
self.size = float(size)
self.horizontal_precision = float(hprec)
self.vertical_precision = float(vprec)
def to_text(self, origin=None, relativize=True, **kw):
if self.latitude[4] > 0:
lat_hemisphere = 'N'
else:
lat_hemisphere = 'S'
if self.longitude[4] > 0:
long_hemisphere = 'E'
else:
long_hemisphere = 'W'
text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % (
self.latitude[0], self.latitude[1],
self.latitude[2], self.latitude[3], lat_hemisphere,
self.longitude[0], self.longitude[1], self.longitude[2],
self.longitude[3], long_hemisphere,
self.altitude / 100.0
)
# do not print default values
if self.size != _default_size or \
self.horizontal_precision != _default_hprec or \
self.vertical_precision != _default_vprec:
text += " {:0.2f}m {:0.2f}m {:0.2f}m".format(
self.size / 100.0, self.horizontal_precision / 100.0,
self.vertical_precision / 100.0
)
return text
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True,
relativize_to=None):
latitude = [0, 0, 0, 0, 1]
longitude = [0, 0, 0, 0, 1]
size = _default_size
hprec = _default_hprec
vprec = _default_vprec
latitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
latitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError(
'bad latitude seconds value')
latitude[2] = int(seconds)
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError(
'bad latitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
latitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
latitude[2] = int(t)
t = tok.get_string()
if t == 'S':
latitude[4] = -1
elif t != 'N':
raise dns.exception.SyntaxError('bad latitude hemisphere value')
longitude[0] = tok.get_int()
t = tok.get_string()
if t.isdigit():
longitude[1] = int(t)
t = tok.get_string()
if '.' in t:
(seconds, milliseconds) = t.split('.')
if not seconds.isdigit():
raise dns.exception.SyntaxError(
'bad longitude seconds value')
longitude[2] = int(seconds)
l = len(milliseconds)
if l == 0 or l > 3 or not milliseconds.isdigit():
raise dns.exception.SyntaxError(
'bad longitude milliseconds value')
if l == 1:
m = 100
elif l == 2:
m = 10
else:
m = 1
longitude[3] = m * int(milliseconds)
t = tok.get_string()
elif t.isdigit():
longitude[2] = int(t)
t = tok.get_string()
if t == 'W':
longitude[4] = -1
elif t != 'E':
raise dns.exception.SyntaxError('bad longitude hemisphere value')
t = tok.get_string()
if t[-1] == 'm':
t = t[0: -1]
altitude = float(t) * 100.0 # m -> cm
tokens = tok.get_remaining(max_tokens=3)
if len(tokens) >= 1:
value = tokens[0].unescape().value
if value[-1] == 'm':
value = value[0: -1]
size = float(value) * 100.0 # m -> cm
if len(tokens) >= 2:
value = tokens[1].unescape().value
if value[-1] == 'm':
value = value[0: -1]
hprec = float(value) * 100.0 # m -> cm
if len(tokens) >= 3:
value = tokens[2].unescape().value
if value[-1] == 'm':
value = value[0: -1]
vprec = float(value) * 100.0 # m -> cm
# Try encoding these now so we raise if they are bad
_encode_size(size, "size")
_encode_size(hprec, "horizontal precision")
_encode_size(vprec, "vertical precision")
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
milliseconds = (self.latitude[0] * 3600000 +
self.latitude[1] * 60000 +
self.latitude[2] * 1000 +
self.latitude[3]) * self.latitude[4]
latitude = 0x80000000 + milliseconds
milliseconds = (self.longitude[0] * 3600000 +
self.longitude[1] * 60000 +
self.longitude[2] * 1000 +
self.longitude[3]) * self.longitude[4]
longitude = 0x80000000 + milliseconds
altitude = int(self.altitude) + 10000000
size = _encode_size(self.size, "size")
hprec = _encode_size(self.horizontal_precision, "horizontal precision")
vprec = _encode_size(self.vertical_precision, "vertical precision")
wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude,
longitude, altitude)
file.write(wire)
@classmethod
def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
(version, size, hprec, vprec, latitude, longitude, altitude) = \
parser.get_struct("!BBBBIII")
if version != 0:
raise dns.exception.FormError("LOC version not zero")
if latitude < _MIN_LATITUDE or latitude > _MAX_LATITUDE:
raise dns.exception.FormError("bad latitude")
if latitude > 0x80000000:
latitude = (latitude - 0x80000000) / 3600000
else:
latitude = -1 * (0x80000000 - latitude) / 3600000
if longitude < _MIN_LONGITUDE or longitude > _MAX_LONGITUDE:
raise dns.exception.FormError("bad longitude")
if longitude > 0x80000000:
longitude = (longitude - 0x80000000) / 3600000
else:
longitude = -1 * (0x80000000 - longitude) / 3600000
altitude = float(altitude) - 10000000.0
size = _decode_size(size, "size")
hprec = _decode_size(hprec, "horizontal precision")
vprec = _decode_size(vprec, "vertical precision")
return cls(rdclass, rdtype, latitude, longitude, altitude,
size, hprec, vprec)
@property
def float_latitude(self):
"latitude as a floating point value"
return _tuple_to_float(self.latitude)
@property
def float_longitude(self):
"longitude as a floating point value"
return _tuple_to_float(self.longitude)
|
py | 1a4ee54abafbf9efecd45766d5fd2dca856e2139 | from backend.blockchain.block import Block
from backend.wallet.transactions import Transaction
from backend.wallet.wallet import Wallet
from backend.config import MINING_REWARD_INPUT
class Blockchain:
def __init__(self):
self.chain = [Block.genesis()]
def add_block(self, data):
self.chain.append(Block.mine_block(self.chain[-1], data))
def __repr__(self):
return f'Blockchain: {self.chain}'
def replace_chain(self, chain):
if len(chain) <= len(self.chain):
raise Exception('Cannot replace. The incoming chain must be longer')
try:
Blockchain.is_valid_chain(chain)
except Exception as e:
raise Exception(f'Cannot replace. The incoming chain is invalid: {e}')
self.chain = chain
def to_json(self):
return list(map(lambda block: block.to_json(), self.chain))
@staticmethod
def from_json(chain_json):
blockchain = Blockchain()
blockchain.chain = list(
map(lambda block_json: Block.from_json(block_json), chain_json)
)
return blockchain
@staticmethod
def is_valid_chain(chain):
if chain[0] != Block.genesis():
raise Exception('The genesis block must be valid')
for i in range(1, len(chain)):
block = chain[i]
last_block = chain[i-1]
Block.is_valid_block(last_block, block)
Blockchain.is_valid_transaction_chain(chain)
@staticmethod
def is_valid_transaction_chain(chain):
transaction_ids = set()
for i in range(len(chain)):
block = chain[i]
has_mining_reward = False
for transaction_json in block.data:
transaction = Transaction.from_json(transaction_json)
if transaction.id in transaction_ids:
raise Exception(f'Transaction {transaction.id} is not unique')
transaction_ids.add(transaction.id)
if transaction.input == MINING_REWARD_INPUT:
if has_mining_reward:
raise Exception(
'There can be only one mining reward per block. '\
f'Check block with hash: {block.hash}'
)
has_mining_reward = True
else:
historic_blockchain = Blockchain()
historic_blockchain.chain = chain[0:i]
historic_balance = Wallet.calculate_balance(
historic_blockchain,
transaction.input['address']
)
if historic_balance != transaction.input['amount']:
raise Exception(f'Transaction {transaction.id} has an invalid input amount')
transaction.is_valid_transaction(transaction)
def main():
blockchain = Blockchain()
blockchain.add_block('one')
blockchain.add_block('two')
print(blockchain)
print(f'blockchain.py ___name__: {__name__}')
if __name__ == '__main__':
main()
|
py | 1a4ee5b77d68a621a69eea8b1a6064272fae7fbd | # coding: utf8
# !/usr/bin/env python
import hunspell
import pandas as pd
from math import log
import matplotlib.pyplot as plt
import seaborn as sns
import codecs
import pickle
import re
import unicodedata
from ast import literal_eval
def getScriptPath():
return "/home/alexis/Documents/EPFL/MS3/Project/python"
def getIdxOfWord(ws, w):
"""Return index of word in sentence"""
try:
wIdx = ws.index(w)
except:
wIdx = -1
return wIdx
def stem(stemmer, word):
"""
Computes a possible stem for a given word
:param word: string
The word to be stemmed
:return: string
The last possible stem in list, or the word itself if no stem found
"""
wstem = stemmer.stem(word)
if len(wstem) > 0: # and wstem[-1] not in stopwords
return unicode(wstem[-1], 'utf8')
else:
return word
def storeCount(array, key):
"""Increments value for key in store by one, or sets to 1 if key nonexistent."""
if key in array:
array[key] += 1
else:
array[key] = 1
def storeIncrement(store, key, incr):
"""
Increment value for key in store by given increment.
:param incr: float
"""
if key in store:
store[key] += incr
else:
store[key] = incr
def idxForMaxKeyValPair(array):
maxV = array[0][1]
i = 0
maxVIdx = 0
for k, v in array:
if v > maxV:
maxV = v
maxVIdx = i
i += 1
return maxVIdx
def keyForMaxValue(_dict):
maxK = ''
maxV = 0
for k, v in _dict.iteritems():
if v > maxV:
maxV = v
maxK = k
return maxK
def sortUsingList(tosort, reflist):
"""
Sorts tosort by order of reflist.
Example: tosort: ['a', 'b', 'c'], reflist: [1, 3, 2]
Return: ['a', 'c', 'b']
:param tosort:
:param reflist:
:return:
"""
return [x for (y, x) in sorted(zip(reflist, tosort))]
def sortNTopByVal(tosort, top, descending=False):
"""
Sort dictionary by descending values and return top elements.
Return list of tuples.
"""
return sorted([(k, v) for k, v in tosort.items()], key=lambda x: x[1], reverse=descending)[:top]
def buildSentsByChar(chars, sents):
"""
NOT NEEDED ANY MORE
Build map of chars to list of indices where characters occur in sents.
"""
char_sent_map = dict.fromkeys(chars, list())
for ix, sent in enumerate(sents):
for char, ix_lst in char_sent_map.iteritems():
if char in sent['nostop']:
ix_lst.append(ix)
return char_sent_map
def writeData(bookfile, char_list, wsent, sentences):
"""
Write data relevant to book to pickle files
"""
file_prefix = '../books-txt/predicted-data/'
name_prefix = bookfile.split('/')[-1][:-4] # TODO get without .txt
# write list to file, one element per line
with codecs.open(file_prefix + name_prefix + '-chars.p', mode='wb') as f:
pickle.dump(char_list, f)
# write characters sentences dict to file in json format
with codecs.open(file_prefix + name_prefix + '-charsents.p', mode='wb') as f:
pickle.dump(wsent, f)
# write sentences dict to file in json format
with codecs.open(file_prefix + name_prefix + '-sents.p', mode='wb') as f:
pickle.dump(sentences, f)
def getSurroundings(array, idx, window=2):
"""
Return words +-2 from idx
"""
surroundings = []
if idx > 1:
surroundings.append(array[idx - 2])
else:
surroundings.append('---')
if idx > 0:
surroundings.append(array[idx - 1])
else:
surroundings.append('---')
if idx < len(array) - 1:
surroundings.append(array[idx + 1])
else:
surroundings.append('---')
if idx < len(array) - 2:
surroundings.append(array[idx + 2])
else:
surroundings.append('---')
return surroundings
def getWindow(lst, index, window):
"""
:param lst: Some list
:param index: index at senter of window
:param window: window size -> +- window on each side
Total size of 2*window+1
"""
min_idx = index-window if index-window >= 0 else 0
max_idx = index+window if index+window < len(lst) else len(lst)-1
return range(min_idx, max_idx+1)
def removeAccents(in_str):
encoding = "utf-8"
if(is_ascii(in_str)):
in_str = in_str.decode(encoding)
in_str = unicodedata.normalize('NFKD', in_str)
in_str = in_str.encode('ASCII', 'ignore')
return in_str
def is_ascii(mystr):
try:
mystr.decode('ascii')
return True
except UnicodeDecodeError:
return False
def camelSplit(name):
"""
Returns the string split if written in Camel case
"""
return re.sub('(?!^)([A-Z][a-z]+)', r' \1', name).split()
def objFromByte(r):
try:
return literal_eval(r.content.decode('utf-8'))
except ValueError:
return None
|
py | 1a4ee5e445d720d82b5349f1982a641b0ea91a05 | #!/usr/bin/env python3
#
# This file is part of GreatFET
from __future__ import print_function
import ast
import argparse
from greatfet.utils import GreatFETArgumentParser, log_silent, log_verbose
def int_auto_base(s):
"""
Allows the user to pass an integer argument on the command line e.g. in decimal, or in hex with 0x notation.
Used with argparse like `type=int_auto_base`, since argparse's `type` argument accepts any function.
"""
# base=0 means autodetect the base from the prefix (if any).
return int(s, base=0)
def main():
# Set up a simple argument parser.
parser = GreatFETArgumentParser(description="""Utility for chipcon debugging via GreatFET
(See /firmware/common/swra.c for pin mappings)""",
verbose_by_default=True)
parser.add_argument('--chip-id', action='store_true', # Short options (one dash) should always be one letter
help="Print the chip ID of the connected device.")
parser.add_argument('-a', '--address', dest='address', metavar='<n>', type=int_auto_base,
help="Starting address (default: 0)", default=0)
parser.add_argument('-l', '--length', dest='length', metavar='<n>', type=int_auto_base,
help="Length of data to read")
parser.add_argument('-r', '--read', metavar='<filename>', type=argparse.FileType('wb'),
help="Read data into file")
parser.add_argument('--no-erase', dest='erase', default=True, action='store_false',
help="Do not erase the flash before performing a write operation")
parser.add_argument('--no-verify', dest='verify', action='store_false', default=True,
help="Do not verify the flash after performing a write operation")
parser.add_argument('-E', '--mass-erase', action='store_true', help="Erase the entire flash memory")
parser.add_argument('-w', '--write', metavar='<filename>', type=argparse.FileType('rb'),
help="Write data from file")
args = parser.parse_args()
log_function = log_verbose if args.verbose else log_silent
device = parser.find_specified_device()
chipcon = device.create_programmer('chipcon')
chipcon.debug_init()
if args.chip_id:
chip_id(chipcon)
if args.read:
if not args.length:
parser.error("argument -s/--length: expected one argument")
read_flash(chipcon, args.read, args.address, args.length, log_function)
if args.mass_erase:
mass_erase_flash(chipcon, log_function)
if args.write:
program_flash(chipcon, args.write, args.address, args.erase, args.verify, log_function)
def chip_id(programmer):
print("Chip ID:", programmer.get_chip_id())
def read_flash(programmer, out_file, start_address, length, log_function):
log_function("Reading {} bytes starting at address {:02x}...".format(length, start_address))
data = programmer.read_flash(start_address=start_address, length=length)
out_file.write(data)
def mass_erase_flash(programmer, log_function):
log_function("Erasing entire flash...")
programmer.mass_erase_flash()
def program_flash(programmer, in_file, start_address, erase, verify, log_function):
log_function("Writing data to flash...")
image_array = in_file.read()
programmer.program_flash(image_array, erase=erase, verify=verify, start=start_address)
if __name__ == '__main__':
main()
|
py | 1a4ee5f33ac952a69269f886fd9349a367c49c4e | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2cli.node.strategy import add_arguments
from ros2cli.node.strategy import NodeStrategy
from ros2component.api import container_node_name_completer
from ros2component.api import find_container_node_names
from ros2component.api import get_components_in_container
from ros2component.api import get_components_in_containers
from ros2component.verb import VerbExtension
from ros2node.api import get_node_names
class ListVerb(VerbExtension):
"""Output a list of running containers and components."""
def add_arguments(self, parser, cli_name):
add_arguments(parser)
argument = parser.add_argument(
'container_node_name', nargs='?', default=None,
help='Name of the container node to list components from')
argument.completer = container_node_name_completer
parser.add_argument(
'--containers-only', action='store_true',
help='List found containers nodes only')
def main(self, *, args):
with NodeStrategy(args) as node:
container_node_names = find_container_node_names(
node=node, node_names=get_node_names(node=node)
)
if args.container_node_name is not None:
if args.container_node_name not in [n.full_name for n in container_node_names]:
return "Unable to find container node '" + args.container_node_name + "'"
if not args.containers_only:
ok, outcome = get_components_in_container(
node=node, remote_container_node_name=args.container_node_name
)
if not ok:
return f'{outcome} when listing components in {args.container_node_name}'
if any(outcome):
print(*[
f'{component.uid} {component.name}' for component in outcome
], sep='\n')
else:
results = get_components_in_containers(node=node, remote_containers_node_names=[
n.full_name for n in container_node_names
])
for container_node_name, (ok, outcome) in results.items():
print(container_node_name)
if not args.containers_only:
if not ok:
print(f'{outcome} when listing components')
continue
if any(outcome):
print(*[
f' {component.uid} {component.name}' for component in outcome
], sep='\n')
|
py | 1a4ee64a0df471118e1fd37ef09181500b43edfd | 
<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/PythagoreanTheorem/pythagorean-theorem.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
%%html
<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){
code_shown=false;
$('div.input').hide()
});
</script>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
from IPython.display import Image
from IPython.display import IFrame
import ipywidgets as widgets
import IPython
# Pythagorean Theorem
This notebook will cover the Pythagorean theorem, including its applications and a proof of the theorem.
**Note:** You should have a solid understanding of square roots and squaring numbers before moving on to this notebook. This notebook assumes you know these concepts, though it also gives more practice of these concepts.
## Introduction
Say you have 2 sides of a right angle triangle and are trying to figure out the third. How can we do this? Thankfully that's where the Pythagorean theorem comes in!
<img style="float: right;" src="images/PythagoreanTriangle.png" width="50%" height="700">
### Terminology
**Hypotenuse:** the longest side of a triangle
**legs:** the other two sides of a triangle that are not the hypotenuse
### What is this theorem?
When you draw a right angle triangle with a square on each side like this diagram, there's a relationship between the areas of the squares. You should notice that the areas of the squares on the two legs added together are equal to the area of the largest square on the hypotenuse. In this example, the area of the red square is $9 \text{ cm}^2$, the area of the blue square is $16 \text{ cm}^2$, and the area of the yellow triangle is $25 \text{ cm}^2$.
$$\text{Notice that } \color{red}{9 \text{ cm}^2} + \color{blue}{16 \text{ cm}^2} = \color{yellow}{25 \text{ cm}^2}$$
$$\text{But } \color{red}{3 \text{ cm}} + \color{blue}{4 \text{ cm}} ≠ \color{yellow}{5 \text{ cm}}$$
This relationship actually works for all right angle triangles!
**The Pythagorean theorem is $a^2 + b^2 = c^2$ where $a$ and $b$ are the legs and $c$ is the hypotenuse. It does not matter which leg is $a$ or $b$**.
**Fact:** The Pythagorean Theorem is named for the Greek mathematician, Pythagoras.
*Pythagorean Triples are sets of three numbers that create a right angle triangle like this one so 3,4,5 is a Pythagorean triple*
## Example 1
<img style="float: left;" src="images/PythagoreanTriangle2.png" width="45%" height="auto">
##### Question 1: What are the lengths of the legs of the triangle on the left?
The side length of a square is the square root of its area. <br>
The side length of the red square is $\sqrt{4 \text{ m}^2} = 2 \text{ m}$. <br>
The side length of the blue square is $\sqrt{9 \text{ m}^2} = 3 \text{ m}$. <br>
Therefore the lengths of the legs are $2 \text{ m}$ and $3 \text{ m}$.
##### Question 2: What is the area of the yellow square in the diagram to the left?
Let's use the Pythagorean theorem. The area of the two smaller squares added together is equal to the area of the larger square. <br>
The area of the red square is $ 4 \text{ m}^2$ and the area of the blue square is $ 9 \text{ m}^2$. <br>
Now we add them together: $ 4 \text{ m}^2 + 9 \text{ m}^2 = 13 \text{ m}^2$. <br>
The area of the yellow square is $ 13 \text{ m}^2$.
##### Question 3: What is the length of the hypotenuse of the triangle to the left?
Now we know the area of the large yellow square is $ 13 \text{ m}^2$, so the side length of the square is $\sqrt{13} \text{ m}$. <br>
The hypotenuse of the triangle has the same length as the length of the side of the yellow square. <br>
Therefore the length of the hypotenuse is $\sqrt{13} \text{ m}$.
## Proof
Not convinced that this relationship works for all right angle triangles? Look at the visual proof from mathisfun.com.
%%html
<iframe width="560" height="315" src="https://www.youtube.com/embed/_87RbSoELW8" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
### Algebraic proof
We will work through the proof of a² + b² = c² together. Lets look at the diagram below.
<img style="float: center;" src="images/PythagoreanProof.png" width="30%" height="auto">
You can see the 4 identical right angle triangles within a square, and the sides of each triangle are labelled just like our first example. a = 3, b = 4, and c = 5.
#### Area of the large square
The area of the large square is its side length squared, which is $(3 + 4)^2 = 7^2 = 49.$
#### Area of the pieces
The area of the smaller yellow square in the middle is $ 5^2 = 25.$ <br>
The area of one blue triangle is $\frac{3 \times 4}{2}$ and since there's 4 of them, the area of all 4 triangles is
$$\frac{4 \times (3 \times 4)}{2} = \frac{4 \times 12}{2} = \frac{48}{2} = 24.$$ <br>
Now we add those together to get $ 25 + 24 = 49.$
#### Areas are equal
You can see that $ 49 = 49. $ This is because the area of the large square takes up the exact same space as the ares of all 4 blue triangles and the yellow square.
This doesn't just work for these numbers though, it works for any numbers that create right angle triangles! If you want to see the full proof without numbers, you can check it out at [mathisfun.com](https://www.mathsisfun.com/geometry/pythagorean-theorem-proof.html).
## Example 2
Let's go through an example of a question without the squares. What is the length of the hypotenuse of the triangle below?
<img style="float: center;" src="images/PythagoreanTriangle4.png" width="40%" height="auto">
Recall the Pythagorean theorem: $a^2 + b^2 = c^2$. <br>
Now let's put the values we know into the theorem. The length of the hypotenuse is the value of c.
$$\begin{align*}
(2 \text{ cm})^2 + (5 \text{ cm})^2 & = c^2 \\
4 \text{ cm}^2 + 25 \text{ cm}^2 & = c^2 \\
29 \text{ cm}^2 & = c^2 \\
\sqrt{29 \text { cm}^2} & = \sqrt{c^2} \\
\sqrt{29} \text{ cm} & = c \\
\end{align*}$$
Let's approximate the answer to one decimal place using a calculator. $\sqrt{29} \text{ cm} = 5.4 \text{ cm}$. <br>
The length of the hypotenuse is $\sqrt{29} \text{ cm}$ or approximately $5.4 \text{ cm}$.
**********
## Practice
#### Question 1
<img style="float: left;" src="images/PythagoreanTriangle5.png" width="300">
answer1 = widgets.RadioButtons(options=['9 m', '6 m','6.4 m','5.4 m'],
value=None, description= 'Hypotenuse')
def display1():
IPython.display.clear_output()
print("What is the length of the hypotenuse of the triangle above?")
print("Round to one decimal place when necessary.")
IPython.display.display(answer1)
def check1(a):
display1()
if answer1.value == '6.4 m':
print("Correct! Great job! The theorem properly filled out looks like this: 16 m² + 25 m² = 41 m²")
else:
print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c².")
display1()
answer1.observe(check1, 'value')
#### Question 2
Let's have a more practical problem for the Pythagorean theorem. Say you have a table that's shortest side is 3.10 m long. If the table is held on an angle, can it fit through this door frame below? Round to 2 decimal places.
<img style="float: left;" src="images/PythagoreanTriangleDoor.png" width="200">
answer3 = widgets.RadioButtons(options=['2.00 m', '2.83 m','3.16 m','4.03 m'],
value=None, description= 'Diagonal')
def display3():
IPython.display.clear_output()
print("What is the diagonal of the door?")
print("Round to two decimal places when necessary.")
IPython.display.display(answer3)
def check3(a):
display3()
if answer3.value == '3.16 m':
print("Correct! Great job!")
else:
print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c².")
display3()
answer3.observe(check3, 'value')
answer2 = widgets.RadioButtons(options=['Yes, the table will fit.', 'No, the table will not fit'],
value=None)
def display2():
IPython.display.clear_output()
print("Is the length of the table smaller than the diagonal of the door?")
print("Round to two decimal places when necessary.")
IPython.display.display(answer2)
def check2(a):
display2()
if answer2.value == 'Yes, the table will fit.':
print("That's right! The table will fit through the door on an angle.")
else:
print("Sorry, that's not right, the table will be able to fit in the door because 3.1 m is less than 3.16 m.")
display2()
answer2.observe(check2, 'value')
What else would knowing how to find the hypotenuse be helpful for?
## Extend Your Knowledge
We can use the Pythagorean theorem for more than just finding the length of the hypotenuse given the two legs. We can find the length of one leg given the other leg and the hypotenuse.
### Example
Given this right angled triangle below, what is the missing side length?
<img style="float: left;" src="images/PythagoreanTriangle6.png" width=200>
Let's start by filling in the information we know into the pythagorean theorem.
$$\begin{align*}
a^2 + b^2 & = c^2 \\
a^2 + (\sqrt{20 \text{ units}})^2 & = (6 \text{ units})^2 \\
\end{align*}$$
Now let's solve this equation for the missing variable. In this example, we will solve for $a$.
$$\begin{align*}
a^2 + (\sqrt{20 \text{ units}})^2 & = (6 \text{ units})^2 \\
a^2 + 20 \text{ units}^2 & = 36 \text{ units}^2 \tag{apply the power of 2 to the bases} \\
a^2 + 20 \text{ units}^2 - 20 \text{ units}^2 & = 36 \text{ units}^2 - 20 \text{ units}^2 \tag{subtract 20 units² from both sides} \\
\sqrt{a^2} & = \sqrt{16 \text{ units}^2} \tag{square root both sides} \\
a & = 4 \text{ units}
\end{align*}$$
## Practice
Now you try to calculate the length of the missing leg.
<img style="float: left;" src="images/PythagoreanTriangle8.png" width="200">
answer4 = widgets.RadioButtons(options=['8 m', '9 m','8.3 m','7.8 m'],
value=None, description= 'Side Length')
def display4():
IPython.display.clear_output()
print("What is the length of the leg labelled a above?")
print("Round to one decimal place when necessary.")
IPython.display.display(answer4)
def check4(a):
display4()
if answer4.value == '8 m':
print("Correct! If we divide each side length by 2, you might notice that this triangle is the same one \n as the very first triangle we looked at in this notebook!")
else:
print("Sorry, that's not right, try again. Pythagorean Theorem is a² + b² = c². We are looking for a.")
display4()
answer4.observe(check4, 'value')
## Checking Right angles
We can check if a triangle is a right angle triangle by knowing if its sides fit the Pythagorean theorem. If they don't then it isn't a right angle triangle. Lets look at an acute and an obtuse triangle and compare their sides in the Pythagorean theorem. You know, just to make sure.
Look at the three triangles below. One is a right angle triangle, one is an acute triangle, and one is an obtuse triangle. Fill in the table below by clicking on the box you want to fill (where it's written 'nan') and typing in your answer. The longest side is side c.
<img style="float: left;" src="images/ThreeTriangles.png" width="600">
import pandas as pd
import qgrid
table = pd.DataFrame(index=pd.Series(['Right', 'Acute', 'Obtuse']), columns=pd.Series(['a²', 'b²','a² + b²', 'c²']))
table_widget = qgrid.QgridWidget(df =table, show_toolbar=False)
table_widget
answer5 = widgets.RadioButtons(options=['Yes','No'],
value=None)
def check5(a):
IPython.display.clear_output()
print("Does a² + b² = c² for all triangles?")
IPython.display.display(answer5)
if answer5.value == 'No':
print("That's right! The Pythagorean theorem only works for right angle triangles.")
else:
print("Actually, the Pythagorean theorem only works for right angle triangles.")
print("Now let's use this knowledge to check if triangles have a right angle or not!")
print("Does a² + b² = c² for all triangles?")
IPython.display.display(answer5)
answer5.observe(check5, 'value')
## Example
Let's go through an example together. Here is a triangle with all three sides labelled. Is this a right angle triangle?
<img style="float: left;" src="images/angle2.png" width="300">
Remember, the longest side is side c. Let's fill in the Pythagorean theorem and see if the left side equals the right. <br>
Since c is the largest side, a and b will be the legs.
$$\begin{align*}
\text{Let's start with the left side:} \\
a & = 7 \text{ m} \\
a^2 & = 49 \text{ m}^2 \\
b & = 10 \text{ m} \\
b^2& = 100 \text{ m}^2 \\
\text{Now let's add them together:} \\
a^2 + b^2 & = 49 \text{ m}^2 + 100 \text{ m}^2 \\
a^2 + b^2 & = 149 \text{ m}^2 \\
\end{align*}$$
**The left side equals 149 m²** <br>
$$\begin{align*}
\text{And now the right side:} \\
c & = 13 \text{ m} \\
c^2 & = 169 \text{ m}^2 \\
\end{align*}$$
**The right side equals 169 m²** <br>
149 m² does not equal 169 m² therefore this triangle is not a right angle triangle.
### Practice
Now it's your turn to check if this triangle below is a right angle triangle.
<img style="float: left;" src="images/angle1.png" width="200">
submit1 = widgets.Button(description='Submit', button_style='success')
answer6 = widgets.Text(value=None, placeholder='Your answer here', description='Left side')
def display6():
IPython.display.clear_output()
print("What is a² + b²?")
print("Type your answer below, and don't forget units! Eg: write 50 cm^2 or 50 units^2")
IPython.display.display(answer6, submit1)
submit1.on_click(check6)
def check6(a):
display6()
if answer6.value == '169 units^2':
print("That's right! Now let's move on to the right side.")
else:
if answer6.value == '169' or answer6.value == '169 units':
print("Don't forget your units!")
else:
print("Sorry, that's not right, try again before moving on to the right side.")
display6()
submit2 = widgets.Button(description='Submit', button_style='success')
answer7 = widgets.Text(value=None, placeholder='Your answer here', description='Right side')
def display7():
IPython.display.clear_output()
print("What is c²?")
print("Type your answer below, and don't forget units! Eg: write 50 cm^2 or 50 units^2")
IPython.display.display(answer7, submit2)
submit2.on_click(check7)
def check7(a):
display7()
if answer7.value == '169 units^2':
print("That's correct! Great job!")
elif answer7.value == '169' or answer7.value == '169 units':
print("Don't forget your units!")
else:
print("Sorry, try again.")
display7()
answer8 = widgets.RadioButtons(options=['Yes','No'],
value=None)
def check8(a):
IPython.display.clear_output()
print("Is this triangle a right angle triangle?")
IPython.display.display(answer8)
if answer8.value == 'Yes':
print("That's right! This is a right angle triangle")
else:
print("Actually, this triangle is a right angle triangle.")
print("Is this triangle a right angle triangle?")
IPython.display.display(answer8)
answer8.observe(check8, 'value')
### Word question

Bailey has four pieces of wood. Two of them are 3 inches long. The other two are 5 inches long. <br>
Bailey makes a rectangular picture frame using these pieces. Then the diagonal is measured to be 7 inches long. <br>
answer9 = widgets.RadioButtons(options=['Yes','No'],
value=None)
def check9(a):
IPython.display.clear_output()
print("Does the picture frame have a right angle corner?")
IPython.display.display(answer9)
if answer9.value == 'No':
print("That's right! The frame does not have a right angle corner.")
else:
print("Actually, the frame does not have a right angle corner.")
print("Does the picture frame have a right angle corner?")
IPython.display.display(answer9)
answer9.observe(check9, 'value')
## What did we learn?
Lets summarize what we have learned in this notebook:
* The Pythagorean theorem states: a² + b² = c²
* This theorem has been proven multiple ways
* This theorem can be used for multiple purposes
* Find the length of the hypotenuse
* Find the length of a side
* Confirm if there's a right angle
* Lots of situations in life need the Pythagorean theorem
This math concept will be used for many more years in school. Make sure to do lots of practice, even beyond this notebook so that you understand the Pythagorean theorem well.
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md) |
py | 1a4ee6be49c87e3e7c6db9128fda6255a22981b2 | import torch
import torch.nn as nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin: float = 2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output1: torch.Tensor, output2: torch.Tensor, label: torch.Tensor):
euclidean_distance = F.pairwise_distance(output1, output2)
losses = 0.5 * (label.float() * euclidean_distance
+ (1 + (-1 * label)).float() * F.relu(self.margin
- (euclidean_distance + self.eps).sqrt()).pow(2))
loss_contrastive = torch.mean(losses)
return loss_contrastive
|
py | 1a4ee8282e66db47914dddb192389c9d553a1e69 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import jmespath
from chart.tests.helm_template_generator import render_chart
class ResourceQuotaTest(unittest.TestCase):
def test_resource_quota_template(self):
docs = render_chart(
values={
"quotas": {
"configmaps": "10",
"persistentvolumeclaims": "4",
"pods": "4",
"replicationcontrollers": "20",
"secrets": "10",
"services": "10",
}
},
show_only=["templates/resourcequota.yaml"],
)
assert "ResourceQuota" == jmespath.search("kind", docs[0])
assert "20" == jmespath.search("spec.hard.replicationcontrollers", docs[0])
def test_resource_quota_are_not_added_by_default(self):
docs = render_chart(
show_only=["templates/resourcequota.yaml"],
)
assert docs == []
|
py | 1a4ee9682dd3bc362426874ff7e4156b41bbf5db | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
class acx(Exchange):
def describe(self):
return self.deep_extend(super(acx, self).describe(), {
'id': 'acx',
'name': 'ACX',
'countries': ['AU'],
'rateLimit': 1000,
'version': 'v2',
'has': {
'CORS': True,
'fetchTickers': True,
'fetchOHLCV': True,
'withdraw': True,
'fetchOrder': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'12h': '720',
'1d': '1440',
'3d': '4320',
'1w': '10080',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/30247614-1fe61c74-9621-11e7-9e8c-f1a627afa279.jpg',
'extension': '.json',
'api': 'https://acx.io/api',
'www': 'https://acx.io',
'doc': 'https://acx.io/documents/api_v2',
},
'api': {
'public': {
'get': [
'depth', # Get depth or specified market Both asks and bids are sorted from highest price to lowest.
'k_with_pending_trades', # Get K data with pending trades, which are the trades not included in K data yet, because there's delay between trade generated and processed by K data generator
'k', # Get OHLC(k line) of specific market
'markets', # Get all available markets
'order_book', # Get the order book of specified market
'order_book/{market}',
'tickers', # Get ticker of all markets
'tickers/{market}', # Get ticker of specific market
'timestamp', # Get server current time, in seconds since Unix epoch
'trades', # Get recent trades on market, each trade is included only once Trades are sorted in reverse creation order.
'trades/{market}',
],
},
'private': {
'get': [
'members/me', # Get your profile and accounts info
'deposits', # Get your deposits history
'deposit', # Get details of specific deposit
'deposit_address', # Where to deposit The address field could be empty when a new address is generating(e.g. for bitcoin), you should try again later in that case.
'orders', # Get your orders, results is paginated
'order', # Get information of specified order
'trades/my', # Get your executed trades Trades are sorted in reverse creation order.
'withdraws', # Get your cryptocurrency withdraws
'withdraw', # Get your cryptocurrency withdraw
],
'post': [
'orders', # Create a Sell/Buy order
'orders/multi', # Create multiple sell/buy orders
'orders/clear', # Cancel all my orders
'order/delete', # Cancel an order
'withdraw', # Create a withdraw
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
'funding': {
'tierBased': False,
'percentage': True,
'withdraw': {}, # There is only 1% fee on withdrawals to your bank account.
},
},
'exceptions': {
'2002': InsufficientFunds,
'2003': OrderNotFound,
},
})
async def fetch_markets(self, params={}):
markets = await self.publicGetMarkets(params)
result = []
for i in range(0, len(markets)):
market = markets[i]
id = market['id']
symbol = market['name']
baseId = self.safe_string(market, 'base_unit')
quoteId = self.safe_string(market, 'quote_unit')
if (baseId is None) or (quoteId is None):
ids = symbol.split('/')
baseId = ids[0].lower()
quoteId = ids[1].lower()
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
# todo: find out their undocumented precision and limits
precision = {
'amount': 8,
'price': 8,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetMembersMe(params)
balances = self.safe_value(response, 'accounts')
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
if limit is not None:
request['limit'] = limit # default = 300
orderbook = await self.publicGetDepth(self.extend(request, params))
timestamp = self.safe_timestamp(orderbook, 'timestamp')
return self.parse_order_book(orderbook, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'at')
ticker = ticker['ticker']
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
ids = list(response.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = None
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
base = id[0:3]
quote = id[3:6]
base = base.upper()
quote = quote.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
result[symbol] = self.parse_ticker(response[id], market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTickersMarket(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
id = self.safe_string(trade, 'tid')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': None,
'order': None,
'takerOrMaker': None,
'price': self.safe_float(trade, 'price'),
'amount': self.safe_float(trade, 'volume'),
'cost': self.safe_float(trade, 'funds'),
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[5],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 500 # default is 30
request = {
'market': market['id'],
'period': self.timeframes[timeframe],
'limit': limit,
}
if since is not None:
request['timestamp'] = int(since / 1000)
response = await self.publicGetK(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'done': 'closed',
'wait': 'open',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
else:
marketId = self.safe_string(order, 'market')
symbol = self.markets_by_id[marketId]['symbol']
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
status = self.parse_order_status(self.safe_string(order, 'state'))
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
id = self.safe_string(order, 'id')
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': self.safe_float(order, 'price'),
'amount': self.safe_float(order, 'volume'),
'filled': self.safe_float(order, 'executed_volume'),
'remaining': self.safe_float(order, 'remaining_volume'),
'trades': None,
'fee': None,
'info': order,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': int(id),
}
response = await self.privateGetOrder(self.extend(request, params))
return self.parse_order(response)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
request = {
'market': self.market_id(symbol),
'side': side,
'volume': str(amount),
'ord_type': type,
}
if type == 'limit':
request['price'] = str(price)
response = await self.privatePostOrders(self.extend(request, params))
marketId = self.safe_value(response, 'market')
market = self.safe_value(self.markets_by_id, marketId)
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privatePostOrderDelete(self.extend(request, params))
order = self.parse_order(response)
status = order['status']
if status == 'closed' or status == 'canceled':
raise OrderNotFound(self.id + ' ' + self.json(order))
return order
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
# they have XRP but no docs on memo/tag
request = {
'currency': currency['id'],
'sum': amount,
'address': address,
}
response = await self.privatePostWithdraw(self.extend(request, params))
# withdrawal response is undocumented
return {
'info': response,
'id': None,
}
def nonce(self):
return self.milliseconds()
def encode_params(self, params):
if 'orders' in params:
orders = params['orders']
query = self.urlencode(self.keysort(self.omit(params, 'orders')))
for i in range(0, len(orders)):
order = orders[i]
keys = list(order.keys())
for k in range(0, len(keys)):
key = keys[k]
value = order[key]
query += '&orders%5B%5D%5B' + key + '%5D=' + str(value)
return query
return self.urlencode(self.keysort(params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/api/' + self.version + '/' + self.implode_params(path, params)
if 'extension' in self.urls:
request += self.urls['extension']
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
query = self.encode_params(self.extend({
'access_key': self.apiKey,
'tonce': nonce,
}, params))
auth = method + '|' + request + '|' + query
signed = self.hmac(self.encode(auth), self.encode(self.secret))
suffix = query + '&signature=' + signed
if method == 'GET':
url += '?' + suffix
else:
body = suffix
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code == 400:
error = self.safe_value(response, 'error')
errorCode = self.safe_string(error, 'code')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if errorCode in exceptions:
raise exceptions[errorCode](feedback)
# fallback to default error handler
|
py | 1a4eea10c1445d818b821b60a94ed8576ff3605e | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
from logging import DEBUG, getLogger
from ._vendor.auxlib.decorators import memoize
from ._vendor.toolz import concat, groupby
from .base.constants import ChannelPriority, MAX_CHANNEL_PRIORITY
from .base.context import context
from .common.compat import iteritems, iterkeys, itervalues, odict, on_win, text_type
from .common.io import time_recorder
from .common.logic import Clauses, get_sat_solver_cls, minimal_unsatisfiable_subset
from .common.toposort import toposort
from .exceptions import InvalidSpec, ResolvePackageNotFound, UnsatisfiableError
from .models.channel import Channel, MultiChannel
from .models.enums import NoarchType
from .models.match_spec import MatchSpec
from .models.records import PackageRecord
from .models.version import VersionOrder
log = getLogger(__name__)
stdoutlog = getLogger('conda.stdoutlog')
# used in conda build
Unsatisfiable = UnsatisfiableError
ResolvePackageNotFound = ResolvePackageNotFound
get_sat_solver_cls = memoize(get_sat_solver_cls)
def dashlist(iterable, indent=2):
return ''.join('\n' + ' ' * indent + '- ' + str(x) for x in iterable)
class Resolve(object):
def __init__(self, index, sort=False, processed=False, channels=()):
self.index = index
self.channels = channels
self._channel_priorities_map = self._make_channel_priorities(channels) if channels else {}
self._channel_priority = context.channel_priority
self._solver_ignore_timestamps = context.solver_ignore_timestamps
groups = groupby("name", itervalues(index))
trackers = defaultdict(list)
for name in groups:
unmanageable_precs = [prec for prec in groups[name] if prec.is_unmanageable]
if unmanageable_precs:
log.debug("restricting to unmanageable packages: %s", name)
groups[name] = unmanageable_precs
tf_precs = (prec for prec in groups[name] if prec.track_features)
for prec in tf_precs:
for feature_name in prec.track_features:
trackers[feature_name].append(prec)
self.groups = groups # Dict[package_name, List[PackageRecord]]
self.trackers = trackers # Dict[track_feature, List[PackageRecord]]
self._cached_find_matches = {} # Dict[MatchSpec, Set[PackageRecord]]
self.ms_depends_ = {} # Dict[PackageRecord, List[MatchSpec]]
self._reduced_index_cache = {}
self._strict_channel_cache = {}
if sort:
for group in itervalues(groups):
group.sort(key=self.version_key, reverse=True)
def default_filter(self, features=None, filter=None):
# TODO: fix this import; this is bad
from .core.subdir_data import make_feature_record
if filter is None:
filter = {}
else:
filter.clear()
filter.update({make_feature_record(fstr): False for fstr in iterkeys(self.trackers)})
if features:
filter.update({make_feature_record(fstr): True for fstr in features})
return filter
def valid(self, spec_or_prec, filter, optional=True):
"""Tests if a package, MatchSpec, or a list of both has satisfiable
dependencies, assuming cyclic dependencies are always valid.
Args:
spec_or_prec: a package record, a MatchSpec, or an iterable of these.
filter: a dictionary of (fkey,valid) pairs, used to consider a subset
of dependencies, and to eliminate repeated searches.
optional: if True (default), do not enforce optional specifications
when considering validity. If False, enforce them.
Returns:
True if the full set of dependencies can be satisfied; False otherwise.
If filter is supplied and update is True, it will be updated with the
search results.
"""
def v_(spec):
return v_ms_(spec) if isinstance(spec, MatchSpec) else v_fkey_(spec)
def v_ms_(ms):
return (optional and ms.optional
or any(v_fkey_(fkey) for fkey in self.find_matches(ms)))
def v_fkey_(prec):
val = filter.get(prec)
if val is None:
filter[prec] = True
try:
depends = self.ms_depends(prec)
except InvalidSpec as e:
val = filter[prec] = False
else:
val = filter[prec] = all(v_ms_(ms) for ms in depends)
return val
result = v_(spec_or_prec)
return result
def valid2(self, spec_or_prec, filter_out, optional=True):
def is_valid(_spec_or_prec):
if isinstance(_spec_or_prec, MatchSpec):
return is_valid_spec(_spec_or_prec)
else:
return is_valid_prec(_spec_or_prec)
def is_valid_spec(_spec):
return optional and _spec.optional or any(
is_valid_prec(_prec) for _prec in self.find_matches(_spec)
)
def is_valid_prec(prec):
val = filter_out.get(prec)
if val is None:
filter_out[prec] = False
try:
has_valid_deps = all(is_valid_spec(ms) for ms in self.ms_depends(prec))
except InvalidSpec as e:
val = filter_out[prec] = "invalid dep specs"
else:
val = filter_out[prec] = False if has_valid_deps else "invalid depends specs"
return not val
return is_valid(spec_or_prec)
def invalid_chains(self, spec, filter, optional=True):
"""Constructs a set of 'dependency chains' for invalid specs.
A dependency chain is a tuple of MatchSpec objects, starting with
the requested spec, proceeding down the dependency tree, ending at
a specification that cannot be satisfied. Uses self.valid_ as a
filter, both to prevent chains and to allow other routines to
prune the list of valid packages with additional criteria.
Args:
spec: a package key or MatchSpec
filter: a dictionary of (prec, valid) pairs to be used when
testing for package validity.
optional: if True (default), do not enforce optional specifications
when considering validity. If False, enforce them.
Returns:
A generator of tuples, empty if the MatchSpec is valid.
"""
def chains_(spec, names):
if spec.name in names:
return
names.add(spec.name)
if self.valid(spec, filter, optional):
return
precs = self.find_matches(spec)
found = False
for prec in precs:
for m2 in self.ms_depends(prec):
for x in chains_(m2, names):
found = True
yield (spec,) + x
if not found:
yield (spec,)
return chains_(spec, set())
def invalid_chains2(self, spec, filter_out, optional=True):
def chains_(spec, names):
if spec.name in names:
return
names.add(spec.name)
if self.valid2(spec, filter_out, optional):
return
precs = self.find_matches(spec)
found = False
for prec in precs:
for m2 in self.ms_depends(prec):
for x in chains_(m2, names):
found = True
yield (spec,) + x
if not found:
yield (spec,)
return chains_(spec, set())
def verify_specs(self, specs):
"""Perform a quick verification that specs and dependencies are reasonable.
Args:
specs: An iterable of strings or MatchSpec objects to be tested.
Returns:
Nothing, but if there is a conflict, an error is thrown.
Note that this does not attempt to resolve circular dependencies.
"""
non_tf_specs = []
bad_deps = []
feature_names = set()
for ms in specs:
_feature_names = ms.get_exact_value('track_features')
if _feature_names:
feature_names.update(_feature_names)
else:
non_tf_specs.append(ms)
filter = self.default_filter(feature_names)
for ms in non_tf_specs:
bad_deps.extend(self.invalid_chains(ms, filter.copy()))
if bad_deps:
raise ResolvePackageNotFound(bad_deps)
return non_tf_specs, feature_names
def find_conflicts(self, specs):
"""Perform a deeper analysis on conflicting specifications, by attempting
to find the common dependencies that might be the cause of conflicts.
Args:
specs: An iterable of strings or MatchSpec objects to be tested.
It is assumed that the specs conflict.
Returns:
Nothing, because it always raises an UnsatisfiableError.
Strategy:
If we're here, we know that the specs conflict. This could be because:
- One spec conflicts with another; e.g.
['numpy 1.5*', 'numpy >=1.6']
- One spec conflicts with a dependency of another; e.g.
['numpy 1.5*', 'scipy 0.12.0b1']
- Each spec depends on *the same package* but in a different way; e.g.,
['A', 'B'] where A depends on numpy 1.5, and B on numpy 1.6.
Technically, all three of these cases can be boiled down to the last
one if we treat the spec itself as one of the "dependencies". There
might be more complex reasons for a conflict, but this code only
considers the ones above.
The purpose of this code, then, is to identify packages (like numpy
above) that all of the specs depend on *but in different ways*. We
then identify the dependency chains that lead to those packages.
"""
sdeps = {}
# For each spec, assemble a dictionary of dependencies, with package
# name as key, and all of the matching packages as values.
for ms in specs:
rec = sdeps.setdefault(ms, {})
slist = [ms]
while slist:
ms2 = slist.pop()
deps = rec.setdefault(ms2.name, set())
for fkey in self.find_matches(ms2):
if fkey not in deps:
deps.add(fkey)
slist.extend(ms3 for ms3 in self.ms_depends(fkey) if ms3.name != ms.name)
# Find the list of dependencies they have in common. And for each of
# *those*, find the individual packages that they all share. Those need
# to be removed as conflict candidates.
commkeys = set.intersection(*(set(s.keys()) for s in sdeps.values()))
commkeys = {k: set.intersection(*(v[k] for v in sdeps.values())) for k in commkeys}
# and find the dependency chains that lead to them.
bad_deps = []
for ms, sdep in iteritems(sdeps):
filter = {}
for mn, v in sdep.items():
if mn != ms.name and mn in commkeys:
# Mark this package's "unique" dependencies as invalid
for fkey in v - commkeys[mn]:
filter[fkey] = False
# Find the dependencies that lead to those invalid choices
ndeps = set(self.invalid_chains(ms, filter, False))
# This may produce some additional invalid chains that we
# don't care about. Select only those that terminate in our
# predetermined set of "common" keys.
ndeps = [nd for nd in ndeps if nd[-1].name in commkeys]
if ndeps:
bad_deps.extend(ndeps)
else:
# This means the package *itself* was the common conflict.
bad_deps.append((ms,))
raise UnsatisfiableError(bad_deps)
def _get_strict_channel(self, package_name):
try:
channel_name = self._strict_channel_cache[package_name]
except KeyError:
all_channel_names = set(prec.channel.name for prec in self.groups[package_name])
by_cp = {self._channel_priorities_map.get(cn, 1): cn for cn in all_channel_names}
highest_priority = sorted(by_cp)[0] # highest priority is the lowest number
channel_name = self._strict_channel_cache[package_name] = by_cp[highest_priority]
return channel_name
@time_recorder(module_name=__name__)
def get_reduced_index(self, specs):
# TODO: fix this import; this is bad
from .core.subdir_data import make_feature_record
strict_channel_priority = context.channel_priority == ChannelPriority.STRICT
cache_key = strict_channel_priority, frozenset(specs)
if cache_key in self._reduced_index_cache:
return self._reduced_index_cache[cache_key]
if log.isEnabledFor(DEBUG):
log.debug('Retrieving packages for: %s', dashlist(sorted(text_type(s) for s in specs)))
specs, features = self.verify_specs(specs)
filter_out = {prec: False if val else "feature not enabled"
for prec, val in iteritems(self.default_filter(features))}
snames = set()
top_level_spec = None
cp_filter_applied = set() # values are package names
def filter_group(_specs):
# all _specs should be for the same package name
name = next(iter(_specs)).name
group = self.groups.get(name, ())
# implement strict channel priority
if strict_channel_priority and name not in cp_filter_applied:
sole_source_channel_name = self._get_strict_channel(name)
for prec in group:
if prec.channel.name != sole_source_channel_name:
filter_out[prec] = "removed due to strict channel priority"
cp_filter_applied.add(name)
# Prune packages that don't match any of the patterns
# or which have unsatisfiable dependencies
nold = nnew = 0
for prec in group:
if not filter_out.setdefault(prec, False):
nold += 1
if not self.match_any(_specs, prec):
filter_out[prec] = "incompatible with required spec %s" % top_level_spec
continue
unsatisfiable_dep_specs = tuple(
ms for ms in self.ms_depends(prec)
if not any(not filter_out.get(rec, False) for rec in self.find_matches(ms))
)
if unsatisfiable_dep_specs:
filter_out[prec] = "unsatisfiable dependencies %s" % " ".join(
str(s) for s in unsatisfiable_dep_specs
)
continue
filter_out[prec] = False
nnew += 1
reduced = nnew < nold
if reduced:
log.debug('%s: pruned from %d -> %d' % (name, nold, nnew))
if any(ms.optional for ms in _specs):
return reduced
elif nnew == 0:
# Indicates that a conflict was found; we can exit early
return None
# Perform the same filtering steps on any dependencies shared across
# *all* packages in the group. Even if just one of the packages does
# not have a particular dependency, it must be ignored in this pass.
# Otherwise, we might do more filtering than we should---and it is
# better to have extra packages here than missing ones.
if reduced or name not in snames:
snames.add(name)
_dep_specs = groupby(lambda s: s.name, (
dep_spec
for prec in group if not filter_out.get(prec, False)
for dep_spec in self.ms_depends(prec) if not dep_spec.optional
))
_dep_specs.pop("*", None) # discard track_features specs
for deps in itervalues(_dep_specs):
if len(deps) >= nnew:
res = filter_group(set(deps))
if res:
reduced = True
elif res is None:
# Indicates that a conflict was found; we can exit early
return None
return reduced
# Iterate on pruning until no progress is made. We've implemented
# what amounts to "double-elimination" here; packages get one additional
# chance after their first "False" reduction. This catches more instances
# where one package's filter affects another. But we don't have to be
# perfect about this, so performance matters.
for _ in range(2):
snames.clear()
slist = list(specs)
reduced = False
while slist:
s = slist.pop()
top_level_spec = s
reduced = filter_group([s])
if reduced:
slist.append(s)
elif reduced is None:
break
if reduced is None:
# This filter reset means that unsatisfiable indexes leak through.
filter_out = {prec: False if val else "feature not enabled"
for prec, val in iteritems(self.default_filter(features))}
# TODO: raise unsatisfiable exception here
# Messaging to users should be more descriptive.
# 1. Are there no direct matches?
# 2. Are there no matches for first-level dependencies?
# 3. Have the first level dependencies been invalidated?
break
# Determine all valid packages in the dependency graph
reduced_index2 = {prec: prec for prec in (make_feature_record(fstr) for fstr in features)}
processed_specs = set()
specs_queue = set(specs)
while specs_queue:
this_spec = specs_queue.pop()
processed_specs.add(this_spec)
add_these_precs2 = tuple(
prec for prec in self.find_matches(this_spec)
if prec not in reduced_index2 and self.valid2(prec, filter_out)
)
if strict_channel_priority and add_these_precs2:
strict_chanel_name = self._get_strict_channel(add_these_precs2[0].name)
add_these_precs2 = tuple(
prec for prec in add_these_precs2 if prec.channel.name == strict_chanel_name
)
reduced_index2.update((prec, prec) for prec in add_these_precs2)
# We do not pull packages into the reduced index due
# to a track_features dependency. Remember, a feature
# specifies a "soft" dependency: it must be in the
# environment, but it is not _pulled_ in. The SAT
# logic doesn't do a perfect job of capturing this
# behavior, but keeping these packages out of the
# reduced index helps. Of course, if _another_
# package pulls it in by dependency, that's fine.
specs_queue.update(
ms for prec in add_these_precs2 for ms in self.ms_depends(prec)
if "track_features" not in ms and ms not in processed_specs
)
self._reduced_index_cache[cache_key] = reduced_index2
return reduced_index2
def match_any(self, mss, prec):
return any(ms.match(prec) for ms in mss)
def find_matches(self, spec):
# type: (MatchSpec) -> Set[PackageRecord]
res = self._cached_find_matches.get(spec, None)
if res is not None:
return res
spec_name = spec.get_exact_value('name')
if spec_name:
candidate_precs = self.groups.get(spec_name, ())
elif spec.get_exact_value('track_features'):
feature_names = spec.get_exact_value('track_features')
candidate_precs = concat(
self.trackers.get(feature_name, ()) for feature_name in feature_names
)
else:
candidate_precs = itervalues(self.index)
res = frozenset(p for p in candidate_precs if spec.match(p))
self._cached_find_matches[spec] = res
return res
def ms_depends(self, prec):
# type: (PackageRecord) -> List[MatchSpec]
deps = self.ms_depends_.get(prec)
if deps is None:
deps = [MatchSpec(d) for d in prec.combined_depends]
deps.extend(MatchSpec(track_features=feat) for feat in prec.features)
self.ms_depends_[prec] = deps
return deps
def version_key(self, prec, vtype=None):
channel = prec.channel
channel_priority = self._channel_priorities_map.get(channel.name, 1) # TODO: ask @mcg1969 why the default value is 1 here # NOQA
valid = 1 if channel_priority < MAX_CHANNEL_PRIORITY else 0
version_comparator = VersionOrder(prec.get('version', ''))
build_number = prec.get('build_number', 0)
build_string = prec.get('build')
ts = prec.get('timestamp', 0)
if self._channel_priority != ChannelPriority.DISABLED:
vkey = [valid, -channel_priority, version_comparator, build_number]
else:
vkey = [valid, version_comparator, -channel_priority, build_number]
if self._solver_ignore_timestamps:
vkey.append(build_string)
else:
vkey.extend((ts, build_string))
return vkey
@staticmethod
def _make_channel_priorities(channels):
priorities_map = odict()
for priority_counter, chn in enumerate(concat(
(Channel(cc) for cc in c._channels) if isinstance(c, MultiChannel) else (c,)
for c in (Channel(c) for c in channels)
)):
channel_name = chn.name
if channel_name in priorities_map:
continue
priorities_map[channel_name] = min(priority_counter, MAX_CHANNEL_PRIORITY - 1)
return priorities_map
def get_pkgs(self, ms, emptyok=False): # pragma: no cover
# legacy method for conda-build
ms = MatchSpec(ms)
precs = self.find_matches(ms)
if not precs and not emptyok:
raise ResolvePackageNotFound([(ms,)])
return sorted(precs, key=self.version_key)
@staticmethod
def to_sat_name(val):
# val can be a PackageRecord or MatchSpec
if isinstance(val, PackageRecord):
return val.dist_str()
elif isinstance(val, MatchSpec):
return '@s@' + text_type(val) + ('?' if val.optional else '')
else:
raise NotImplementedError()
@staticmethod
def to_feature_metric_id(prec_dist_str, feat):
return '@fm@%s@%s' % (prec_dist_str, feat)
def push_MatchSpec(self, C, spec):
spec = MatchSpec(spec)
sat_name = self.to_sat_name(spec)
m = C.from_name(sat_name)
if m is not None:
# the spec has already been pushed onto the clauses stack
return sat_name
simple = spec._is_single()
nm = spec.get_exact_value('name')
tf = frozenset(_tf for _tf in (
f.strip() for f in spec.get_exact_value('track_features') or ()
) if _tf)
if nm:
tgroup = libs = self.groups.get(nm, [])
elif tf:
assert len(tf) == 1
k = next(iter(tf))
tgroup = libs = self.trackers.get(k, [])
else:
tgroup = libs = self.index.keys()
simple = False
if not simple:
libs = [fkey for fkey in tgroup if spec.match(fkey)]
if len(libs) == len(tgroup):
if spec.optional:
m = True
elif not simple:
ms2 = MatchSpec(track_features=tf) if tf else MatchSpec(nm)
m = C.from_name(self.push_MatchSpec(C, ms2))
if m is None:
sat_names = [self.to_sat_name(prec) for prec in libs]
if spec.optional:
ms2 = MatchSpec(track_features=tf) if tf else MatchSpec(nm)
sat_names.append('!' + self.to_sat_name(ms2))
m = C.Any(sat_names)
C.name_var(m, sat_name)
return sat_name
@time_recorder(module_name=__name__)
def gen_clauses(self):
C = Clauses(sat_solver_cls=get_sat_solver_cls(context.sat_solver))
for name, group in iteritems(self.groups):
group = [self.to_sat_name(prec) for prec in group]
# Create one variable for each package
for sat_name in group:
C.new_var(sat_name)
# Create one variable for the group
m = C.new_var(self.to_sat_name(MatchSpec(name)))
# Exactly one of the package variables, OR
# the negation of the group variable, is true
C.Require(C.ExactlyOne, group + [C.Not(m)])
# If a package is installed, its dependencies must be as well
for prec in itervalues(self.index):
nkey = C.Not(self.to_sat_name(prec))
for ms in self.ms_depends(prec):
C.Require(C.Or, nkey, self.push_MatchSpec(C, ms))
if log.isEnabledFor(DEBUG):
log.debug("gen_clauses returning with clause count: %d", C.get_clause_count())
return C
def generate_spec_constraints(self, C, specs):
result = [(self.push_MatchSpec(C, ms),) for ms in specs]
if log.isEnabledFor(DEBUG):
log.debug(
"generate_spec_constraints returning with clause count: %d",
C.get_clause_count())
return result
def generate_feature_count(self, C):
result = {self.push_MatchSpec(C, MatchSpec(track_features=name)): 1
for name in iterkeys(self.trackers)}
if log.isEnabledFor(DEBUG):
log.debug(
"generate_feature_count returning with clause count: %d", C.get_clause_count())
return result
def generate_update_count(self, C, specs):
return {'!'+ms.target: 1 for ms in specs if ms.target and C.from_name(ms.target)}
def generate_feature_metric(self, C):
eq = {} # a C.minimize() objective: Dict[varname, coeff]
# Given a pair (prec, feature), assign a "1" score IF:
# - The prec is installed
# - The prec does NOT require the feature
# - At least one package in the group DOES require the feature
# - A package that tracks the feature is installed
for name, group in iteritems(self.groups):
prec_feats = {self.to_sat_name(prec): set(prec.features) for prec in group}
active_feats = set.union(*prec_feats.values()).intersection(self.trackers)
for feat in active_feats:
clause_id_for_feature = self.push_MatchSpec(C, MatchSpec(track_features=feat))
for prec_sat_name, features in prec_feats.items():
if feat not in features:
feature_metric_id = self.to_feature_metric_id(prec_sat_name, feat)
C.name_var(C.And(prec_sat_name, clause_id_for_feature), feature_metric_id)
eq[feature_metric_id] = 1
return eq
def generate_removal_count(self, C, specs):
return {'!'+self.push_MatchSpec(C, ms.name): 1 for ms in specs}
def generate_install_count(self, C, specs):
return {self.push_MatchSpec(C, ms.name): 1 for ms in specs if ms.optional}
def generate_package_count(self, C, missing):
return {self.push_MatchSpec(C, nm): 1 for nm in missing}
def generate_version_metrics(self, C, specs, include0=False):
# each of these are weights saying how well packages match the specs
# format for each: a C.minimize() objective: Dict[varname, coeff]
eqc = {} # channel
eqv = {} # version
eqb = {} # build number
eqt = {} # timestamp
sdict = {} # Dict[package_name, PackageRecord]
for s in specs:
s = MatchSpec(s) # needed for testing
sdict.setdefault(s.name, [])
# # TODO: this block is important! can't leave it commented out
# rec = sdict.setdefault(s.name, [])
# if s.target:
# dist = Dist(s.target)
# if dist in self.index:
# if self.index[dist].get('priority', 0) < MAX_CHANNEL_PRIORITY:
# rec.append(dist)
for name, targets in iteritems(sdict):
pkgs = [(self.version_key(p), p) for p in self.groups.get(name, [])]
pkey = None
# keep in mind that pkgs is already sorted according to version_key (a tuple,
# so composite sort key). Later entries in the list are, by definition,
# greater in some way, so simply comparing with != suffices.
for version_key, prec in pkgs:
if targets and any(prec == t for t in targets):
continue
if pkey is None:
ic = iv = ib = it = 0
# valid package, channel priority
elif pkey[0] != version_key[0] or pkey[1] != version_key[1]:
ic += 1
iv = ib = it = 0
# version
elif pkey[2] != version_key[2]:
iv += 1
ib = it = 0
# build number
elif pkey[3] != version_key[3]:
ib += 1
it = 0
elif not self._solver_ignore_timestamps and pkey[4] != version_key[4]:
it += 1
prec_sat_name = self.to_sat_name(prec)
if ic or include0:
eqc[prec_sat_name] = ic
if iv or include0:
eqv[prec_sat_name] = iv
if ib or include0:
eqb[prec_sat_name] = ib
if it or include0:
eqt[prec_sat_name] = it
pkey = version_key
return eqc, eqv, eqb, eqt
def dependency_sort(self, must_have):
# type: (Dict[package_name, PackageRecord]) -> List[PackageRecord]
assert isinstance(must_have, dict)
digraph = {} # Dict[package_name, Set[dependent_package_names]]
for package_name, prec in iteritems(must_have):
if prec in self.index:
digraph[package_name] = set(ms.name for ms in self.ms_depends(prec))
# There are currently at least three special cases to be aware of.
# 1. The `toposort()` function, called below, contains special case code to remove
# any circular dependency between python and pip.
# 2. conda/plan.py has special case code for menuinst
# Always link/unlink menuinst first/last on windows in case a subsequent
# package tries to import it to create/remove a shortcut
# 3. On windows, python noarch packages need an implicit dependency on conda added, if
# conda is in the list of packages for the environment. Python noarch packages
# that have entry points use conda's own conda.exe python entry point binary. If conda
# is going to be updated during an operation, the unlink / link order matters.
# See issue #6057.
if on_win and 'conda' in digraph:
for package_name, dist in iteritems(must_have):
record = self.index.get(prec)
if hasattr(record, 'noarch') and record.noarch == NoarchType.python:
digraph[package_name].add('conda')
sorted_keys = toposort(digraph)
must_have = must_have.copy()
# Take all of the items in the sorted keys
# Don't fail if the key does not exist
result = [must_have.pop(key) for key in sorted_keys if key in must_have]
# Take any key that were not sorted
result.extend(must_have.values())
return result
def environment_is_consistent(self, installed):
log.debug('Checking if the current environment is consistent')
if not installed:
return None, []
sat_name_map = {} # Dict[sat_name, PackageRecord]
specs = []
for prec in installed:
sat_name_map[self.to_sat_name(prec)] = prec
specs.append(MatchSpec('%s %s %s' % (prec.name, prec.version, prec.build)))
r2 = Resolve({prec: prec for prec in installed}, True, True, channels=self.channels)
C = r2.gen_clauses()
constraints = r2.generate_spec_constraints(C, specs)
solution = C.sat(constraints)
return bool(solution)
def get_conflicting_specs(self, specs):
if not specs:
return ()
reduced_index = self.get_reduced_index(specs)
# Check if satisfiable
def mysat(specs, add_if=False):
constraints = r2.generate_spec_constraints(C, specs)
return C.sat(constraints, add_if)
r2 = Resolve(reduced_index, True, True, channels=self.channels)
C = r2.gen_clauses()
solution = mysat(specs, True)
if solution:
return ()
else:
# This first result is just a single unsatisfiable core. There may be several.
unsat_specs = list(minimal_unsatisfiable_subset(specs, sat=mysat))
satisfiable_specs = set(specs) - set(unsat_specs)
# In this loop, we test each unsatisfiable spec individually against the satisfiable
# specs to ensure there are no other unsatisfiable specs in the set.
final_unsat_specs = set()
while unsat_specs:
this_spec = unsat_specs.pop(0)
final_unsat_specs.add(this_spec)
test_specs = satisfiable_specs | {this_spec}
C = r2.gen_clauses() # TODO: wasteful call, but Clauses() needs refactored
solution = mysat(test_specs, True)
if not solution:
these_unsat = minimal_unsatisfiable_subset(test_specs, sat=mysat)
if len(these_unsat) > 1:
unsat_specs.extend(these_unsat)
satisfiable_specs -= set(unsat_specs)
return tuple(final_unsat_specs)
def bad_installed(self, installed, new_specs):
log.debug('Checking if the current environment is consistent')
if not installed:
return None, []
sat_name_map = {} # Dict[sat_name, PackageRecord]
specs = []
for prec in installed:
sat_name_map[self.to_sat_name(prec)] = prec
specs.append(MatchSpec('%s %s %s' % (prec.name, prec.version, prec.build)))
new_index = {prec: prec for prec in itervalues(sat_name_map)}
r2 = Resolve(new_index, True, True, channels=self.channels)
C = r2.gen_clauses()
constraints = r2.generate_spec_constraints(C, specs)
solution = C.sat(constraints)
limit = xtra = None
if not solution or xtra:
def get_(name, snames):
if name not in snames:
snames.add(name)
for fn in self.groups.get(name, []):
for ms in self.ms_depends(fn):
get_(ms.name, snames)
# New addition: find the largest set of installed packages that
# are consistent with each other, and include those in the
# list of packages to maintain consistency with
snames = set()
eq_optional_c = r2.generate_removal_count(C, specs)
solution, _ = C.minimize(eq_optional_c, C.sat())
snames.update(sat_name_map[sat_name]['name']
for sat_name in (C.from_index(s) for s in solution)
if sat_name and sat_name[0] != '!' and '@' not in sat_name)
# Existing behavior: keep all specs and their dependencies
for spec in new_specs:
get_(MatchSpec(spec).name, snames)
if len(snames) < len(sat_name_map):
limit = snames
xtra = [rec for sat_name, rec in iteritems(sat_name_map)
if rec['name'] not in snames]
log.debug('Limiting solver to the following packages: %s', ', '.join(limit))
if xtra:
log.debug('Packages to be preserved: %s', xtra)
return limit, xtra
def restore_bad(self, pkgs, preserve):
if preserve:
sdict = {prec.name: prec for prec in pkgs}
pkgs.extend(p for p in preserve if p.name not in sdict)
def install_specs(self, specs, installed, update_deps=True):
specs = list(map(MatchSpec, specs))
snames = {s.name for s in specs}
log.debug('Checking satisfiability of current install')
limit, preserve = self.bad_installed(installed, specs)
for prec in installed:
if prec not in self.index:
continue
name, version, build = prec.name, prec.version, prec.build
schannel = prec.channel.canonical_name
if name in snames or limit is not None and name not in limit:
continue
# If update_deps=True, set the target package in MatchSpec so that
# the solver can minimize the version change. If update_deps=False,
# fix the version and build so that no change is possible.
if update_deps:
# TODO: fix target here
spec = MatchSpec(name=name, target=prec.dist_str())
else:
spec = MatchSpec(name=name, version=version,
build=build, channel=schannel)
specs.append(spec)
return specs, preserve
def install(self, specs, installed=None, update_deps=True, returnall=False):
specs, preserve = self.install_specs(specs, installed or [], update_deps)
pkgs = self.solve(specs, returnall=returnall, _remove=False)
self.restore_bad(pkgs, preserve)
return pkgs
def remove_specs(self, specs, installed):
nspecs = []
# There's an imperfect thing happening here. "specs" nominally contains
# a list of package names or track_feature values to be removed. But
# because of add_defaults_to_specs it may also contain version contraints
# like "python 2.7*", which are *not* asking for python to be removed.
# We need to separate these two kinds of specs here.
for s in map(MatchSpec, specs):
# Since '@' is an illegal version number, this ensures that all of
# these matches will never match an actual package. Combined with
# optional=True, this has the effect of forcing their removal.
if s._is_single():
nspecs.append(MatchSpec(s, version='@', optional=True))
else:
nspecs.append(MatchSpec(s, optional=True))
snames = set(s.name for s in nspecs if s.name)
limit, _ = self.bad_installed(installed, nspecs)
preserve = []
for prec in installed:
nm, ver = prec.name, prec.version
if nm in snames:
continue
elif limit is not None:
preserve.append(prec)
else:
# TODO: fix target here
nspecs.append(MatchSpec(name=nm,
version='>='+ver if ver else None,
optional=True,
target=prec.dist_str()))
return nspecs, preserve
def remove(self, specs, installed):
specs, preserve = self.remove_specs(specs, installed)
pkgs = self.solve(specs, _remove=True)
self.restore_bad(pkgs, preserve)
return pkgs
@time_recorder(module_name=__name__)
def solve(self, specs, returnall=False, _remove=False):
# type: (List[str], bool) -> List[PackageRecord]
if log.isEnabledFor(DEBUG):
log.debug('Solving for: %s', dashlist(sorted(text_type(s) for s in specs)))
# Find the compliant packages
log.debug("Solve: Getting reduced index of compliant packages")
len0 = len(specs)
specs = tuple(map(MatchSpec, specs))
reduced_index = self.get_reduced_index(specs)
if not reduced_index:
return False if reduced_index is None else ([[]] if returnall else [])
# Check if satisfiable
log.debug("Solve: determining satisfiability")
def mysat(specs, add_if=False):
constraints = r2.generate_spec_constraints(C, specs)
return C.sat(constraints, add_if)
r2 = Resolve(reduced_index, True, True, channels=self.channels)
C = r2.gen_clauses()
solution = mysat(specs, True)
if not solution:
specs = minimal_unsatisfiable_subset(specs, sat=mysat)
self.find_conflicts(specs)
speco = [] # optional packages
specr = [] # requested packages
speca = [] # all other packages
specm = set(r2.groups) # missing from specs
for k, s in enumerate(specs):
if s.name in specm:
specm.remove(s.name)
if not s.optional:
(speca if s.target or k >= len0 else specr).append(s)
elif any(r2.find_matches(s)):
s = MatchSpec(s.name, optional=True, target=s.target)
speco.append(s)
speca.append(s)
speca.extend(MatchSpec(s) for s in specm)
# Removed packages: minimize count
log.debug("Solve: minimize removed packages")
if _remove:
eq_optional_c = r2.generate_removal_count(C, speco)
solution, obj7 = C.minimize(eq_optional_c, solution)
log.debug('Package removal metric: %d', obj7)
# Requested packages: maximize versions
log.debug("Solve: maximize versions of requested packages")
eq_req_c, eq_req_v, eq_req_b, eq_req_t = r2.generate_version_metrics(C, specr)
solution, obj3a = C.minimize(eq_req_c, solution)
solution, obj3 = C.minimize(eq_req_v, solution)
log.debug('Initial package channel/version metric: %d/%d', obj3a, obj3)
# Track features: minimize feature count
log.debug("Solve: minimize track_feature count")
eq_feature_count = r2.generate_feature_count(C)
solution, obj1 = C.minimize(eq_feature_count, solution)
log.debug('Track feature count: %d', obj1)
# Featured packages: minimize number of featureless packages
# installed when a featured alternative is feasible.
# For example, package name foo exists with two built packages. One with
# 'track_features: 'feat1', and one with 'track_features': 'feat2'.
# The previous "Track features" minimization pass has chosen 'feat1' for the
# environment, but not 'feat2'. In this case, the 'feat2' version of foo is
# considered "featureless."
if not context.featureless_minimization_disabled_feature_flag:
log.debug("Solve: maximize number of packages that have necessary features")
eq_feature_metric = r2.generate_feature_metric(C)
solution, obj2 = C.minimize(eq_feature_metric, solution)
log.debug('Package misfeature count: %d', obj2)
# Requested packages: maximize builds
log.debug("Solve: maximize build numbers of requested packages")
solution, obj4 = C.minimize(eq_req_b, solution)
log.debug('Initial package build metric: %d', obj4)
# Optional installations: minimize count
if not _remove:
log.debug("Solve: minimize number of optional installations")
eq_optional_install = r2.generate_install_count(C, speco)
solution, obj49 = C.minimize(eq_optional_install, solution)
log.debug('Optional package install metric: %d', obj49)
# Dependencies: minimize the number of packages that need upgrading
log.debug("Solve: minimize number of necessary upgrades")
eq_u = r2.generate_update_count(C, speca)
solution, obj50 = C.minimize(eq_u, solution)
log.debug('Dependency update count: %d', obj50)
# Remaining packages: maximize versions, then builds
log.debug("Solve: maximize versions and builds of indirect dependencies")
eq_c, eq_v, eq_b, eq_t = r2.generate_version_metrics(C, speca)
solution, obj5a = C.minimize(eq_c, solution)
solution, obj5 = C.minimize(eq_v, solution)
solution, obj6 = C.minimize(eq_b, solution)
log.debug('Additional package channel/version/build metrics: %d/%d/%d',
obj5a, obj5, obj6)
# Maximize timestamps
log.debug("Solve: maximize timestamps")
eq_t.update(eq_req_t)
solution, obj6t = C.minimize(eq_t, solution)
log.debug('Timestamp metric: %d', obj6t)
# Prune unnecessary packages
log.debug("Solve: prune unnecessary packages")
eq_c = r2.generate_package_count(C, specm)
solution, obj7 = C.minimize(eq_c, solution, trymax=True)
log.debug('Weak dependency count: %d', obj7)
def clean(sol):
return [q for q in (C.from_index(s) for s in sol)
if q and q[0] != '!' and '@' not in q]
log.debug('Looking for alternate solutions')
nsol = 1
psolutions = []
psolution = clean(solution)
psolutions.append(psolution)
while True:
nclause = tuple(C.Not(C.from_name(q)) for q in psolution)
solution = C.sat((nclause,), True)
if solution is None:
break
nsol += 1
if nsol > 10:
log.debug('Too many solutions; terminating')
break
psolution = clean(solution)
psolutions.append(psolution)
if nsol > 1:
psols2 = list(map(set, psolutions))
common = set.intersection(*psols2)
diffs = [sorted(set(sol) - common) for sol in psols2]
if not context.json:
stdoutlog.info(
'\nWarning: %s possible package resolutions '
'(only showing differing packages):%s%s' %
('>10' if nsol > 10 else nsol,
dashlist(', '.join(diff) for diff in diffs),
'\n ... and others' if nsol > 10 else ''))
# def stripfeat(sol):
# return sol.split('[')[0]
new_index = {self.to_sat_name(prec): prec for prec in itervalues(self.index)}
if returnall:
if len(psolutions) > 1:
raise RuntimeError()
# TODO: clean up this mess
# return [sorted(Dist(stripfeat(dname)) for dname in psol) for psol in psolutions]
# return [sorted((new_index[sat_name] for sat_name in psol), key=lambda x: x.name)
# for psol in psolutions]
# return sorted(Dist(stripfeat(dname)) for dname in psolutions[0])
return sorted((new_index[sat_name] for sat_name in psolutions[0]), key=lambda x: x.name)
|
py | 1a4eea33a42ded6f1045718aa3a68932d6e2eb05 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a stomp test script.
Individual stomp test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave stompds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop stompds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing stompd/stomp-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: stompds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a stompd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple stompds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a stompd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple stompd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'stompd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "stompd should have exited with an error"
else:
assert_msg = "stompd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as stompd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "stompd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some stompd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "stompd"),
help="stompd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "stompd"),
help="stompd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
py | 1a4eebc47ff7aa5b63d0638ff8085f4fd4c45232 | from __future__ import (absolute_import, division, print_function)
from playbook.event import Event
def test_event_instance():
event = Event()
assert isinstance(event, Event)
def test_event_headers():
event = Event(headers={'k1': 'v1', 'k2': 'v2'})
assert event.headers == {'k1': 'v1', 'k2': 'v2'}
def test_event_payload():
event = Event(payload='value')
assert event.payload == 'value'
def test_event_headers_payload():
event = Event(headers={'k1': 'v1', 'k2': 'v2'}, payload='value')
assert event.headers == {'k1': 'v1', 'k2': 'v2'}
assert event.payload == 'value'
def test_event_to_dict():
event = Event(headers={'k1': 'v1', 'k2': 'v2'}, payload='value')
assert event.to_dict() == {
'headers': {'k1': 'v1', 'k2': 'v2'},
'payload': 'value'
}
|
py | 1a4eebd7c276019734a77c8d0fb3b9d204932978 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
def fft_wavenumbers(x, y, shape_dat, shape_pdat):
"""
Compute the wavenumbers.
Parameters
----------
x : 1D array
Coordinates along x direction.
y : 1D array
Coordinates along y direction.
shape_dat : tuple
Shape of the input data.
shape_pdat : tuple
Shape of the pad.
Returns
-------
u : array
Wavenumber.
v : TYPE
Wavenumber.
"""
dx = (np.amax(x) - np.amin(x))/(shape_dat[0] - 1)
dy = (np.amax(y) - np.amin(y))/(shape_dat[1] - 1)
fx = 2*np.pi*np.fft.fftfreq(shape_pdat[0], dx)
fy = 2*np.pi*np.fft.fftfreq(shape_pdat[1], dy)
v,u=np.meshgrid(fy, fx)
return (u,v)
def fft_pad_data(data, mode='edge'):
"""
Perform the 2D discrete Fourier transform and extend the data with padding.
Parameters
----------
data : 2D array
Input data.
mode : TYPE, optional
The type of the pad, available on numpy.pad. The default is 'edge'.
Returns
-------
fpdat : 2D array
The padded data.
mask : boolean
The mask to perform the unppading.
"""
n_points=int(2**(np.ceil(np.log(np.max(data.shape))/np.log(2))))
nx, ny = data.shape
padx = int((n_points - nx)/2)
pady = int((n_points - ny)/2)
padded_data = np.pad(data, ((padx, padx), (pady, pady)),mode)
mask = np.zeros_like(padded_data, dtype=bool)
mask[padx:padx+data.shape[0], pady:pady+data.shape[1]] = True
fpdat = np.fft.fft2(padded_data)
return (fpdat,mask)
def ifft_unpad_data(data_p, mask, shape_dat):
'''
Unpad the extended data to fit the original data shape.
Parameters
----------
data_p : 2D array
Padded data.
mask : boolean
The mask that will be used to unpad the data.
shape_dat : tuple
Shape of the original data.
Returns
-------
data : array
Unpadded data.
'''
ifft_data = np.real(np.fft.ifft2(data_p))
data = ifft_data[mask]
return np.reshape(data, shape_dat)
def butter2d_lp(shape, f, n):
"""
Designs a lowpass 2D Butterworth filter.
Modified from Peirce JW (2009) Generating stimuli for neuroscience using
PsychoPy. Front. Neuroinform. 2:10.
doi:10.3389/neuro.11.010.2008.
Parameters
----------
shape : tuple
Size of the filter.
f : float
Relative cutoff frequency of the filter.
n : int
Order of the filter, the higher n is the sharper the transition is.
Returns
-------
filt : 2D array
Filter kernel centered.
"""
rows, cols = shape
x = np.linspace(-0.5, 0.5, cols)
y = np.linspace(-0.5, 0.5, rows)
radius = np.sqrt((x**2)[np.newaxis] + (y**2)[:, np.newaxis])
filt = 1 / (1.0 + (radius / f)**(2*n))
return (filt)
def plot_wav(decomp):
"""
Plot the data in DWT domain
Parameters
----------
data : list
Data in wavelet domain.
Returns
-------
None.
"""
plt.figure(figsize=(10,10))
gs = GridSpec(4, 4)
ax = plt.subplot(gs[0, 0])
plt.imshow(decomp[0])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[1,0])
plt.imshow(decomp[1][0])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[0, 1])
plt.imshow(decomp[1][1])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[1, 1])
plt.imshow(decomp[1][2])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[2:,:2])
plt.imshow(decomp[2][0])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[:2,2:])
plt.imshow(decomp[2][1])
plt.xticks([])
plt.yticks([])
ax = plt.subplot(gs[2:,2:])
plt.imshow(decomp[2][2])
plt.xticks([])
plt.yticks([])
plt.tight_layout()
return |
py | 1a4eed8c4e06753bc353deb25b842ef021867838 | #!/usr/bin/env python2.7
#
# Copyright (c) 2016, Daniel Bolgheroni.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import cmd
import signal
import shlex
from time import sleep
from pyfirmata import Arduino, serial
from conf import Config
class Sh(cmd.Cmd):
prompt = 'rswtch> '
intro = 'type \'help\' to see available commands'
def default(self, line):
print(line + ": not found")
def do_EOF(self, line):
exit(0)
# overwrite help, since commands are simple, do not need independent
# help for each command
def do_help(self, line):
print("{0:<16} {1}".format("COMMAND", "DESCRIPTION"))
print("{0:<16} {1}".format("annotate n \"c\"", "annotate c in channel n (use quotes)"))
print("{0:<16} {1}".format("down n", "turn off the n channel"))
print("{0:<16} {1}".format("help", "this help"))
print("{0:<16} {1}".format("reset n", "turn the n channel off and on again after 2 seconds"))
print("{0:<16} {1}".format("status", "display the status of all channels, including annotations"))
print("{0:<16} {1}".format("toggle n", "turn the n channel off if its on, and vice-versa"))
print("{0:<16} {1}".format("up n", "turn on the n channel"))
### commands
# up
def do_up(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].up()
except KeyError:
print("no channel")
# down
def do_down(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].down()
except KeyError:
print("no channel")
# toggle
def do_toggle(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].toggle()
except KeyError:
print("no channel")
# reset
def do_reset(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].reset()
except KeyError:
print("no channel")
# status
def do_status(self, line):
status()
def do_annotate(self, line):
parser = shlex.shlex(line, posix=True)
c = parser.get_token()
try:
channels[c].annotation = parser.get_token()
except KeyError:
print("no channel")
# quit
def do_quit(self, line):
exit(0)
# handle ^C
@staticmethod
def handle_sigint(signum, frame):
exit(0)
class Channel():
# the relay module uses inverted logic, so
# 1 to bring pin down and 0 bring pin up
def __init__(self, pin, boardname):
self.__pin = pin
self.boardname = boardname
self.annotation = None
# up by default
self.__pin.write(0)
def up(self):
self.__pin.write(0)
def down(self):
self.__pin.write(1)
def toggle(self):
if self.__pin.read() == 0:
self.__pin.write(1)
else:
self.__pin.write(0)
def reset(self):
self.__pin.write(1)
sleep(2)
self.__pin.write(0)
@property
def status(self):
return 'up' if self.__pin.read() == 0 else 'down'
def status():
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("CH", "STATUS", "BOARD", "ANNOTATION"))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("1", ch1.status, ch1.boardname, ch1.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("2", ch2.status, ch2.boardname, ch2.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("3", ch3.status, ch3.boardname, ch3.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("4", ch4.status, ch4.boardname, ch4.annotation))
if __name__ == '__main__':
opts = argparse.ArgumentParser()
opts.add_argument("-v", action="store_true",
help="shows board Firmata firmware version")
opts.add_argument("-f",
help="specify config file")
opts.add_argument("dev", help="serial device")
args = opts.parse_args()
# init Firmata module
try:
board = Arduino(args.dev)
except serial.serialutil.SerialException:
print("could not open port {0}".format(args.dev))
exit(1)
# try to get board firmata version
# this fails most of the times
if args.v:
v = board.get_firmata_version()
try:
print("{0}.{1}".format(v[0], v[1]))
exit(0)
except (NameError, TypeError):
print("could not get board firmata version")
exit(1)
# handle configuration file
if args.f:
config = Config(args.f)
else:
config = Config()
# turn off board led
led = board.get_pin('d:13:o')
led.write(0)
# configuring pins
ch1 = Channel(board.get_pin('d:9:o'), config.get_boardname(1))
ch2 = Channel(board.get_pin('d:8:o'), config.get_boardname(2))
ch3 = Channel(board.get_pin('d:7:o'), config.get_boardname(3))
ch4 = Channel(board.get_pin('d:6:o'), config.get_boardname(4))
channels = {'1': ch1, '2': ch2, '3': ch3, '4': ch4}
# start shell
signal.signal(signal.SIGINT, Sh.handle_sigint)
Sh().cmdloop()
|
py | 1a4eedcad66d9dc4d0bb7974c22aa6ceb62f8e27 | from django.contrib import admin
from .models import (
EnsemblRegulatoryFeature,
GeneInterval,
TadSet,
TadInterval,
TadBoundaryInterval,
VistaEnhancer,
)
# Register your models here.
admin.site.register(EnsemblRegulatoryFeature)
admin.site.register(GeneInterval)
admin.site.register(TadSet)
admin.site.register(TadInterval)
admin.site.register(TadBoundaryInterval)
admin.site.register(VistaEnhancer)
|
py | 1a4eef2f23daf8c01c2e172de77a6c5722803066 | import os
from strange_case.configurators import *
from strange_case.tests import *
def test_provides_decorator():
a = {'dont_change_me': 'not changed'}
@provides('dont_change_me')
def should_do_nothing(source_file, config):
config['dont_change_me'] = 'changed'
return config
should_do_nothing(None, a)
assert a['dont_change_me'] == 'not changed'
@provides('change_me')
def should_do_something(source_file, config):
config['change_me'] = 'changed'
return config
should_do_something(None, a)
assert a['change_me'] == 'changed'
@will_test(file_types)
def test_file_types_folder(config):
source_file = get_test_file('a_folder')
config = file_types(source_file, config)
assert config['type'] == 'folder'
@will_test(file_types)
def test_file_types_root(config):
source_file = config['site_path']
config = file_types(source_file, config)
assert config['type'] == 'root'
@will_test(file_types)
def test_file_types_from_glob1(config):
config.update({
'file_types': [
('text', ('*.txt',)),
('bin', ('*.bin',)),
],
})
source_file = get_test_file('a_folder/a_file.txt')
config = file_types(source_file, config)
assert config['type'] == 'text'
@will_test(file_types)
def test_file_types_from_glob2(config):
config.update({
'file_types': [
('text', ('*.txt',)),
('bin', ('*.bin',)),
],
})
source_file = get_test_file('a_folder/a_file.bin')
config = file_types(source_file, config)
assert config['type'] == 'bin'
@will_test()
def test_file_types_from_default_type(config):
source_file = get_test_file('a_folder/a_file.txt')
config.update({
'default_type': 'file',
})
config = file_types(source_file, config)
assert config['type'] == 'file'
@will_test(file_types)
def test_file_types_no_match(config):
source_file = get_test_file('a_folder/a_file.txt')
config.update({
'default_type': None,
'file_types': (
('page', ['*.j2']),
),
})
assert None == file_types(source_file, config)
@will_test(file_types, folder_config_file)
def test_folder_config_file(config):
source_file = get_test_file('a_folder')
config.update({
'config_file': 'config.yaml',
})
config = file_types(source_file, config)
config = folder_config_file(source_file, config)
assert config['test'] == 'test'
@will_test(file_types, folder_config_file)
def test_folder_config_file_missing_config_file(config):
source_file = get_test_file('a_folder')
config.update({
'config_file': 'HUH.yml',
})
config = file_types(source_file, config)
config = folder_config_file(source_file, config)
assert not 'test' in config
@will_test(file_types, folder_config_file, ignore)
def test_folder_config_file_ignore(config):
source_file = get_test_file('a_folder')
config.update({
'config_file': 'ignore_config.yaml',
})
config = file_types(source_file, config)
config == folder_config_file(source_file, config)
assert None == ignore(source_file, config)
@will_test(front_matter_config)
def test_front_matter_config_success(config):
source_file = get_test_file('a_folder/page.j2')
config.update({
'type': 'page',
'override': 'wrong',
})
config = front_matter_config(source_file, config)
assert config['front'] == 'matter'
assert config['override'] == 'overridden'
@will_test(front_matter_config)
def test_front_matter_config_ticks(config):
source_file = get_test_file('a_folder/page_ticks.j2')
config.update({
'type': 'page',
'modified': 1,
})
config = front_matter_config(source_file, config)
assert config['ticks'] == 2
assert config['modified'] == 2
@will_test(front_matter_config)
def test_front_matter_config_ignore_doesnt_exist(config):
source_file = get_test_file('a_folder/not_a_file.j2')
config.update({
'type': 'page',
'modified': 1,
})
config = front_matter_config(source_file, config)
assert config['modified'] == 1
@will_test(front_matter_config)
def test_front_matter_config_bad1(config):
source_file = get_test_file('a_folder/bad_page1.j2')
config.update({
'type': 'page',
})
config = front_matter_config(source_file, config)
assert not 'front' in config
@will_test(front_matter_config)
def test_front_matter_config_bad2(config):
source_file = get_test_file('a_folder/bad_page2.j2')
config.update({
'type': 'page',
})
config = front_matter_config(source_file, config)
assert not 'front' in config
@will_test(ignore)
def test_ignore_true(config):
source_file = get_test_file('a_folder/a_file.txt')
config.update({
'ignore': True,
})
assert ignore(source_file, config) is None
@will_test(ignore)
def test_ignore_false(config):
source_file = get_test_file('a_folder/a_file.txt')
config.update({
'ignore': False,
})
assert ignore(source_file, config) == config
@will_test(ignore)
def test_ignore_true_pattern_match(config):
source_file = get_test_file('a_folder/a_file.txt')
config.update({
'ignore': ('*.txt'),
})
assert ignore(source_file, config) is None
@will_test(ignore)
def test_ignore_false_pattern_match(config):
source_file = get_test_file('a_folder/a_file.txt')
config.update({
'ignore': ('*.bfg',),
})
assert ignore(source_file, config) == config
@will_test(file_types, folder_config_file)
def test_merge_files_config(folder_config):
source_file = get_test_file('a_folder')
folder_config.update({
'config_file': 'files_config.yaml'
})
folder_config = file_types(source_file, folder_config)
folder_config = folder_config_file(source_file, folder_config)
def _folder_config():
config = {}
config.update(folder_config)
return config
config = _folder_config()
assert len(config['files'].keys()) == 3
config = _folder_config()
source_file = get_test_file('a_folder/a_file.txt')
config = merge_files_config(source_file, config)
assert 'files' not in config
assert config['is_a_file'] is True
config = _folder_config()
source_file = get_test_file('a_folder/bad_page1.j2')
config = merge_files_config(source_file, config)
assert 'files' not in config
assert 'is_a_file' not in config
config = _folder_config()
source_file = get_test_file('a_folder/page.j2')
config = merge_files_config(source_file, config)
assert 'files' not in config
assert config['is_a_file'] is False
@will_test(setdefault_name)
def test_setdefault_name_not_setup(config):
source_file = get_test_file('a_folder/page.j2')
config = setdefault_name(source_file, config)
assert config['name'] == 'page'
@will_test(setdefault_name)
def test_setdefault_name_remove_extension(config):
source_file = get_test_file('a_folder/page.j2')
config.update({
'rename_extensions': {
'.j2': '.html',
},
'html_extension': '.html',
})
config = setdefault_name(source_file, config)
assert config['name'] == 'page'
@will_test(setdefault_name)
def test_setdefault_name_keep_extension(config):
source_file = get_test_file('a_folder/a_file.txt')
config.update({
'rename_extensions': {
'.j2': '.html',
},
'html_extension': '.html',
})
config = setdefault_name(source_file, config)
assert config['name'] == 'a_file_txt'
@will_test(setdefault_target_name)
def test_setdefault_target_name_dont_rename_extension(config):
source_file = get_test_file('a_folder/a_file.txt')
config.update({
'rename_extensions': {
'.j2': '.html',
},
})
config = setdefault_target_name(source_file, config)
assert config['target_name'] == 'a_file.txt'
@will_test(skip_if_not_modified)
def test_skip_if_not_modified_not_modified(config):
source_file = get_test_file('a_folder/a_file.txt')
mtime = os.stat(source_file).st_mtime
config.update({
'file_mtimes': {source_file: mtime}
})
config = skip_if_not_modified(source_file, config)
assert config['skip'] is True
@will_test(skip_if_not_modified)
def test_skip_if_not_modified_is_modified(config):
source_file = get_test_file('a_folder/a_file.txt')
mtime = os.stat(source_file).st_mtime
config.update({
'file_mtimes': {source_file: mtime - 1}
})
config = skip_if_not_modified(source_file, config)
assert config['skip'] is False
@will_test(is_index, setdefault_target_name, setdefault_iterable)
def test_setdefault_iterable_true(config):
source_file = get_test_file('a_folder/page.j2')
config.update({
'rename_extensions': {
'.j2': '.html',
},
'index.html': 'page.html'
})
config = setdefault_target_name(source_file, config)
config = is_index(source_file, config)
assert config['target_name'] == config['index.html']
config = setdefault_iterable(source_file, config)
assert config['iterable'] is False
@will_test(is_index, setdefault_target_name, setdefault_iterable)
def test_setdefault_iterable_false(config):
source_file = get_test_file('a_folder/page.j2')
config.update({
'rename_extensions': {
'.j2': '.html',
},
'index.html': 'index.html'
})
config = setdefault_target_name(source_file, config)
config = is_index(source_file, config)
assert config['target_name'] != config['index.html']
config = setdefault_iterable(source_file, config)
assert config['iterable'] is True
@will_test(setdefault_iterable)
def test_setdefault_iterable_override_true(config):
source_file = get_test_file('a_folder/bad_page1.j2')
config.update({
'index.html': 'bad_page1.j2',
'iterable': True
})
config = setdefault_iterable(source_file, config)
assert config['iterable'] is True
@will_test(setdefault_iterable)
def test_setdefault_iterable_override_false(config):
source_file = get_test_file('a_folder/page.j2')
config.update({
'index.html': 'page.j2',
'iterable': False
})
config = setdefault_iterable(source_file, config)
assert config['iterable'] is False
@will_test(setdefault_name, setdefault_target_name, is_index, set_url)
def test_set_url(config):
source_file = get_test_file('a_folder/page.j2')
config.update({
'rename_extensions': {
'.j2': '.html',
},
})
config = setdefault_target_name(source_file, config)
config = is_index(source_file, config)
config = set_url(source_file, config)
assert config['url'] == 'page.html'
@will_test(setdefault_name, setdefault_target_name, is_index, set_url)
def test_set_url_index(config):
source_file = get_test_file('a_folder/index.j2')
config.update({
'rename_extensions': {
'.j2': '.html',
},
'index.html': 'index.html'
})
config = setdefault_name(source_file, config)
config = setdefault_target_name(source_file, config)
config = is_index(source_file, config)
assert config['target_name'] == config['index.html']
config = set_url(source_file, config)
assert config['url'] == ''
@will_test(setdefault_name, setdefault_target_name, is_index, set_url)
def test_set_url_cant_override(config):
source_file = get_test_file('a_folder/bad_page1.j2')
config.update({
'url': 'bad_page1',
})
config = setdefault_name(source_file, config)
config = setdefault_target_name(source_file, config)
config = is_index(source_file, config)
config = set_url(source_file, config)
assert config['url'] == 'bad_page1.html'
@will_test(override)
def test_override_preserves_local_config(config):
source_file = get_test_file('a_folder/bad_page1.j2')
config.update({
'title': 'old title',
'override': {
'title': 'Overridden'
}
})
config = override(source_file, config)
assert config['title'] == 'old title'
|
py | 1a4ef01ee6b35a6b3ac294bd64ccdf8655266890 | from hdmf.utils import docval, getargs
from hdmf.container import Container
CORE_NAMESPACE = 'test_core'
class Foo(Container):
@docval({'name': 'name', 'type': str, 'doc': 'the name of this Foo'},
{'name': 'my_data', 'type': ('array_data', 'data'), 'doc': 'some data'},
{'name': 'attr1', 'type': str, 'doc': 'an attribute'},
{'name': 'attr2', 'type': int, 'doc': 'another attribute'},
{'name': 'attr3', 'type': float, 'doc': 'a third attribute', 'default': 3.14})
def __init__(self, **kwargs):
name, my_data, attr1, attr2, attr3 = getargs('name', 'my_data', 'attr1', 'attr2', 'attr3', kwargs)
super(Foo, self).__init__(name=name)
self.__data = my_data
self.__attr1 = attr1
self.__attr2 = attr2
self.__attr3 = attr3
def __eq__(self, other):
attrs = ('name', 'my_data', 'attr1', 'attr2', 'attr3')
return all(getattr(self, a) == getattr(other, a) for a in attrs)
def __str__(self):
attrs = ('name', 'my_data', 'attr1', 'attr2', 'attr3')
return '<' + ','.join('%s=%s' % (a, getattr(self, a)) for a in attrs) + '>'
@property
def my_data(self):
return self.__data
@property
def attr1(self):
return self.__attr1
@property
def attr2(self):
return self.__attr2
@property
def attr3(self):
return self.__attr3
def __hash__(self):
return hash(self.name)
class FooBucket(Container):
@docval({'name': 'name', 'type': str, 'doc': 'the name of this bucket'},
{'name': 'foos', 'type': list, 'doc': 'the Foo objects in this bucket', 'default': list()})
def __init__(self, **kwargs):
name, foos = getargs('name', 'foos', kwargs)
super(FooBucket, self).__init__(name=name)
self.__foos = foos
for f in self.__foos:
f.parent = self
def __eq__(self, other):
return self.name == other.name and set(self.foos) == set(other.foos)
def __str__(self):
foo_str = "[" + ",".join(str(f) for f in self.foos) + "]"
return 'name=%s, foos=%s' % (self.name, foo_str)
@property
def foos(self):
return self.__foos
|
py | 1a4ef17fcc8fcd298f18f6a0229e28e37a809415 | from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass,
fuel_capacity,
brake_deadband,
decel_limit,
accel_limit,
wheel_radius,
wheel_base,
steer_ratio,
max_lat_accel,
max_steer_angle):
self.yaw_controller = YawController(wheel_base,
steer_ratio,
0.1,
max_lat_accel,
max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.0
mn = 0. # minimum throttle value
mx = 0.2 #max throttle
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5 # 1/(2pi*tau) = cutoff frequency
ts = 0.02 # sample time
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
# rospy.logwarn("Angular vel: {0}".format(angular_vel))
# rospy.logwarn("Current vel: {0}".format(current_vel))
# rospy.logwarn("Target vel: {0}".format(linear_vel))
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
# could add damping --> based on target_ang_vel - current_ang_vel
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
if linear_vel == 0. and current_vel <0.1:
throttle = 0
brake = 700 # N*m to hold car in place if stopped at light; acc ~ 1 m/s^2
elif linear_vel <.1 and vel_error < 0:
throttle = 0
decel = max(vel_error, self.decel_limit)
brake = abs(decel)*self.vehicle_mass*self.wheel_radius # Torque N*m
return throttle, brake, steering
|
py | 1a4ef1fd2913145127635b7aeb5c6c6950045eb0 | import base64
import unittest
import zlib
from os.path import abspath, basename, dirname, join
from robot.utils.asserts import assert_equal, assert_true
from robot.utils.platform import PY2
from robot.result import Keyword, Message, TestCase, TestSuite
from robot.result.executionerrors import ExecutionErrors
from robot.model import Statistics
from robot.reporting.jsmodelbuilders import *
from robot.reporting.stringcache import StringIndex
try:
long
except NameError:
long = int
CURDIR = dirname(abspath(__file__))
def decode_string(string):
string = string if PY2 else string.encode('ASCII')
return zlib.decompress(base64.b64decode(string)).decode('UTF-8')
def remap(model, strings):
if isinstance(model, StringIndex):
if strings[model].startswith('*'):
# Strip the asterisk from a raw string.
return strings[model][1:]
return decode_string(strings[model])
elif isinstance(model, (int, long, type(None))):
return model
elif isinstance(model, tuple):
return tuple(remap(item, strings) for item in model)
else:
raise AssertionError("Item '%s' has invalid type '%s'" % (model, type(model)))
class TestBuildTestSuite(unittest.TestCase):
def test_default_suite(self):
self._verify_suite(TestSuite())
def test_suite_with_values(self):
suite = TestSuite('Name', 'Doc', {'m1': 'v1', 'M2': 'V2'}, None, 'Message',
'20111204 19:00:00.000', '20111204 19:00:42.001')
self._verify_suite(suite, 'Name', 'Doc', ('m1', '<p>v1</p>', 'M2', '<p>V2</p>'),
message='Message', start=0, elapsed=42001)
def test_relative_source(self):
self._verify_suite(TestSuite(source='non-existing'), source='non-existing')
source = join(CURDIR, 'test_jsmodelbuilders.py')
self._verify_suite(TestSuite(source=source), source=source,
relsource=basename(source))
def test_suite_html_formatting(self):
self._verify_suite(TestSuite(name='*xxx*', doc='*bold* <&>',
metadata={'*x*': '*b*', '<': '>'}),
name='*xxx*', doc='<b>bold</b> <&>',
metadata=('*x*', '<p><b>b</b></p>', '<', '<p>></p>'))
def test_default_test(self):
self._verify_test(TestCase())
def test_test_with_values(self):
test = TestCase('Name', '*Doc*', ['t1', 't2'], '1 minute', 'PASS', 'Msg',
'20111204 19:22:22.222', '20111204 19:22:22.333')
test.setup.config(kwname='setup', type='setup')
test.teardown.config(kwname='td', type='teardown')
k1 = self._verify_keyword(test.setup, type=1, kwname='setup')
k2 = self._verify_keyword(test.teardown, type=2, kwname='td')
self._verify_test(test, 'Name', '<b>Doc</b>', ('t1', 't2'),
'1 minute', 1, 'Msg', 0, 111, (k1, k2))
def test_name_escaping(self):
kw = Keyword('quote:"', 'and *url* https://url.com', '*"Doc"*',)
self._verify_keyword(kw, 0, 'quote:"', 'and *url* https://url.com', '<b>"Doc"</b>')
test = TestCase('quote:" and *url* https://url.com', '*"Doc"*',)
self._verify_test(test, 'quote:" and *url* https://url.com', '<b>"Doc"</b>')
suite = TestSuite('quote:" and *url* https://url.com', '*"Doc"*',)
self._verify_suite(suite, 'quote:" and *url* https://url.com', '<b>"Doc"</b>')
def test_default_keyword(self):
self._verify_keyword(Keyword())
def test_keyword_with_values(self):
kw = Keyword('KW Name', 'libname', 'http://doc', ('arg1', 'arg2'),
('${v1}', '${v2}'), ('tag1', 'tag2'), '1 second', 'setup',
'PASS', '20111204 19:42:42.000', '20111204 19:42:42.042')
self._verify_keyword(kw, 1, 'KW Name', 'libname',
'<a href="http://doc">http://doc</a>',
'arg1, arg2', '${v1}, ${v2}', 'tag1, tag2',
'1 second', 1, 0, 42)
def test_default_message(self):
self._verify_message(Message())
self._verify_min_message_level('INFO')
def test_message_with_values(self):
msg = Message('Message', 'DEBUG', timestamp='20111204 22:04:03.210')
self._verify_message(msg, 'Message', 1, 0)
self._verify_min_message_level('DEBUG')
def test_warning_linking(self):
msg = Message('Message', 'WARN', timestamp='20111204 22:04:03.210',
parent=TestCase().body.create_keyword())
self._verify_message(msg, 'Message', 3, 0)
links = self.context._msg_links
assert_equal(len(links), 1)
key = (msg.message, msg.level, msg.timestamp)
assert_equal(remap(links[key], self.context.strings), 't1-k1')
def test_error_linking(self):
msg = Message('ERROR Message', 'ERROR', timestamp='20150609 01:02:03.004',
parent=TestCase().body.create_keyword().body.create_keyword())
self._verify_message(msg, 'ERROR Message', 4, 0)
links = self.context._msg_links
assert_equal(len(links), 1)
key = (msg.message, msg.level, msg.timestamp)
assert_equal(remap(links[key], self.context.strings), 't1-k1-k1')
def test_message_with_html(self):
self._verify_message(Message('<img>'), '<img>')
self._verify_message(Message('<b></b>', html=True), '<b></b>')
def test_nested_structure(self):
suite = TestSuite()
suite.setup.config(kwname='setup', type='setup')
suite.teardown.config(kwname='td', type='teardown')
K1 = self._verify_keyword(suite.setup, type=1, kwname='setup')
K2 = self._verify_keyword(suite.teardown, type=2, kwname='td')
suite.suites = [TestSuite()]
suite.suites[0].tests = [TestCase(tags=['crit', 'xxx'])]
t = self._verify_test(suite.suites[0].tests[0], tags=('crit', 'xxx'))
suite.tests = [TestCase(), TestCase(status='PASS')]
S1 = self._verify_suite(suite.suites[0],
status=0, tests=(t,), stats=(1, 0, 1, 0))
suite.tests[0].body = [Keyword(type=Keyword.FOR_TYPE), Keyword()]
suite.tests[0].body[0].body = [Keyword(type=Keyword.FOR_ITEM_TYPE), Message()]
k = self._verify_keyword(suite.tests[0].body[0].body[0], type=4)
m = self._verify_message(suite.tests[0].body[0].messages[0])
k1 = self._verify_keyword(suite.tests[0].body[0], type=3, body=(k, m))
suite.tests[0].body[1].body = [Message(), Message('msg', level='TRACE')]
m1 = self._verify_message(suite.tests[0].body[1].messages[0])
m2 = self._verify_message(suite.tests[0].body[1].messages[1], 'msg', level=0)
k2 = self._verify_keyword(suite.tests[0].body[1], body=(m1, m2))
T1 = self._verify_test(suite.tests[0], body=(k1, k2))
T2 = self._verify_test(suite.tests[1], status=1)
self._verify_suite(suite, status=0, keywords=(K1, K2), suites=(S1,),
tests=(T1, T2), stats=(3, 1, 2, 0))
self._verify_min_message_level('TRACE')
def test_timestamps(self):
suite = TestSuite(starttime='20111205 00:33:33.333')
suite.setup.config(kwname='s1', starttime='20111205 00:33:33.334')
suite.setup.body.create_message('Message', timestamp='20111205 00:33:33.343')
suite.setup.body.create_message(level='DEBUG', timestamp='20111205 00:33:33.344')
suite.tests.create(starttime='20111205 00:33:34.333')
context = JsBuildingContext()
model = SuiteBuilder(context).build(suite)
self._verify_status(model[5], start=0)
self._verify_status(model[-2][0][8], start=1)
self._verify_mapped(model[-2][0][-1], context.strings,
((8, 10, 2, 'Message'), (8, 11, 1, '')))
self._verify_status(model[-3][0][4], start=1000)
def test_if(self):
test = TestSuite().tests.create()
if_ = test.body.create_if(condition='$x > 0', branch_status='NOT RUN')
else_if = if_.orelse.config(condition='$y > 0', branch_status='PASS')
else_ = else_if.orelse.config()
else_.body.create_keyword('z')
exp_if = (
5, '$x > 0', '', '', '', '', '', '', (3, None, 0), ()
)
exp_else_if = (
6, '$y > 0', '', '', '', '', '', '', (1, None, 0), ()
)
exp_else = (
7, '', '', '', '', '', '', '', (0, None, 0),
((0, 'z', '', '', '', '', '', '', (0, None, 0), ()),)
)
self._verify_test(test, body=(exp_if, exp_else_if, exp_else))
def _verify_status(self, model, status=0, start=None, elapsed=0):
assert_equal(model, (status, start, elapsed))
def _verify_suite(self, suite, name='', doc='', metadata=(), source='',
relsource='', status=2, message='', start=None, elapsed=0,
suites=(), tests=(), keywords=(), stats=(0, 0, 0, 0)):
status = (status, start, elapsed, message) \
if message else (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(SuiteBuilder, suite, name, source,
relsource, doc, metadata, status,
suites, tests, keywords, stats)
def _get_status(self, *elements):
return elements if elements[-1] else elements[:-1]
def _verify_test(self, test, name='', doc='', tags=(), timeout='',
status=0, message='', start=None, elapsed=0, body=()):
status = (status, start, elapsed, message) \
if message else (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(TestBuilder, test, name, timeout,
doc, tags, status, body)
def _verify_keyword(self, keyword, type=0, kwname='', libname='', doc='',
args='', assign='', tags='', timeout='', status=0,
start=None, elapsed=0, body=()):
status = (status, start, elapsed)
doc = '<p>%s</p>' % doc if doc else ''
return self._build_and_verify(KeywordBuilder, keyword, type, kwname,
libname, timeout, doc, args, assign, tags,
status, body)
def _verify_message(self, msg, message='', level=2, timestamp=None):
return self._build_and_verify(MessageBuilder, msg, 8, timestamp, level, message)
def _verify_min_message_level(self, expected):
assert_equal(self.context.min_level, expected)
def _build_and_verify(self, builder_class, item, *expected):
self.context = JsBuildingContext(log_path=join(CURDIR, 'log.html'))
model = builder_class(self.context).build(item)
self._verify_mapped(model, self.context.strings, expected)
return expected
def _verify_mapped(self, model, strings, expected):
mapped_model = tuple(remap(model, strings))
assert_equal(mapped_model, expected)
class TestSplitting(unittest.TestCase):
def test_test_keywords(self):
suite = self._get_suite_with_tests()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-3][0][-1], expected[-3][1][-1]]
expected[-3][0][-1], expected[-3][1][-1] = 1, 2
model, context = self._build_and_remap(suite, split_log=True)
assert_equal(context.strings, ('*', '*suite', '*t1', '*t2'))
assert_equal(model, expected)
assert_equal([strings for _, strings in context.split_results],
[('*', '*t1-k1', '*t1-k1-k1', '*t1-k2'), ('*', '*t2-k1')])
assert_equal([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_suite_with_tests(self):
suite = TestSuite(name='suite')
suite.tests = [TestCase('t1'), TestCase('t2')]
suite.tests[0].body = [Keyword('t1-k1'), Keyword('t1-k2')]
suite.tests[0].body[0].body = [Keyword('t1-k1-k1')]
suite.tests[1].body = [Keyword('t2-k1')]
return suite
def _build_and_remap(self, suite, split_log=False):
context = JsBuildingContext(split_log=split_log)
model = remap(SuiteBuilder(context).build(suite), context.strings)
return self._to_list(model), context
def _to_list(self, model):
return list(self._to_list(item) if isinstance(item, tuple) else item
for item in model)
def test_suite_keywords(self):
suite = self._get_suite_with_keywords()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-2][0][-1], expected[-2][1][-1]]
expected[-2][0][-1], expected[-2][1][-1] = 1, 2
model, context = self._build_and_remap(suite, split_log=True)
assert_equal(context.strings, ('*', '*root', '*k1', '*k2'))
assert_equal(model, expected)
assert_equal([strings for _, strings in context.split_results],
[('*', '*k1-k2'), ('*',)])
assert_equal([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_suite_with_keywords(self):
suite = TestSuite(name='root')
suite.setup.config(kwname='k1')
suite.teardown.config(kwname='k2')
suite.setup.body.create_keyword('k1-k2')
return suite
def test_nested_suite_and_test_keywords(self):
suite = self._get_nested_suite_with_tests_and_keywords()
expected, _ = self._build_and_remap(suite)
expected_split = [expected[-4][0][-3][0][-1], expected[-4][0][-3][1][-1],
expected[-4][1][-3][0][-1], expected[-4][1][-2][0][-1],
expected[-2][0][-1], expected[-2][1][-1]]
(expected[-4][0][-3][0][-1], expected[-4][0][-3][1][-1],
expected[-4][1][-3][0][-1], expected[-4][1][-2][0][-1],
expected[-2][0][-1], expected[-2][1][-1]) = 1, 2, 3, 4, 5, 6
model, context = self._build_and_remap(suite, split_log=True)
assert_equal(model, expected)
assert_equal([self._to_list(remap(*res)) for res in context.split_results],
expected_split)
def _get_nested_suite_with_tests_and_keywords(self):
suite = self._get_suite_with_keywords()
sub = TestSuite(name='suite2')
suite.suites = [self._get_suite_with_tests(), sub]
sub.setup.config(kwname='kw')
sub.setup.body.create_keyword('skw').body.create_message('Message')
sub.tests.create('test', doc='tdoc').body.create_keyword('koowee', doc='kdoc')
return suite
def test_message_linking(self):
suite = self._get_suite_with_keywords()
msg1 = suite.setup.body[0].body.create_message(
'Message 1', 'WARN', timestamp='20111204 22:04:03.210'
)
msg2 = suite.tests.create().body.create_keyword().body.create_message(
'Message 2', 'ERROR', timestamp='20111204 22:04:04.210'
)
context = JsBuildingContext(split_log=True)
SuiteBuilder(context).build(suite)
errors = ErrorsBuilder(context).build(ExecutionErrors([msg1, msg2]))
assert_equal(remap(errors, context.strings),
((8, -1000, 3, 'Message 1', 's1-k1-k1'),
(8, 0, 4, 'Message 2', 's1-t1-k1')))
assert_equal(remap(context.link(msg1), context.strings), 's1-k1-k1')
assert_equal(remap(context.link(msg2), context.strings), 's1-t1-k1')
assert_true('*s1-k1-k1' in context.strings)
assert_true('*s1-t1-k1' in context.strings)
for res in context.split_results:
assert_true('*s1-k1-k1' not in res[1])
assert_true('*s1-t1-k1' not in res[1])
class TestPruneInput(unittest.TestCase):
def setUp(self):
self.suite = TestSuite()
self.suite.setup.config(kwname='s')
self.suite.teardown.config(kwname='t')
s1 = self.suite.suites.create()
s1.setup.config(kwname='s1')
tc = s1.tests.create()
tc.setup.config(kwname='tcs')
tc.teardown.config(kwname='tct')
tc.body = [Keyword(), Keyword(), Keyword()]
tc.body[0].body = [Keyword(), Keyword(), Message(), Message(), Message()]
tc.body[0].teardown.config(kwname='kt')
s2 = self.suite.suites.create()
t1 = s2.tests.create()
t2 = s2.tests.create()
t1.body = [Keyword()]
t2.body = [Keyword(), Keyword()]
def test_no_pruning(self):
SuiteBuilder(JsBuildingContext(prune_input=False)).build(self.suite)
assert_equal(self.suite.setup.kwname, 's')
assert_equal(self.suite.teardown.kwname, 't')
assert_equal(self.suite.suites[0].setup.kwname, 's1')
assert_equal(self.suite.suites[0].teardown.kwname, None)
assert_equal(self.suite.suites[0].tests[0].setup.kwname, 'tcs')
assert_equal(self.suite.suites[0].tests[0].teardown.kwname, 'tct')
assert_equal(len(self.suite.suites[0].tests[0].body), 3)
assert_equal(len(self.suite.suites[0].tests[0].body[0].body), 5)
assert_equal(len(self.suite.suites[0].tests[0].body[0].messages), 3)
assert_equal(self.suite.suites[0].tests[0].body[0].teardown.kwname, 'kt')
assert_equal(len(self.suite.suites[1].tests[0].body), 1)
assert_equal(len(self.suite.suites[1].tests[1].body), 2)
def test_prune_suites_from_suite(self):
suite = self.suite
assert_equal(len(suite.suites), 2)
assert_equal(len(suite.tests), 0)
SuiteBuilder(JsBuildingContext(prune_input=True)).build(suite)
assert_equal(len(suite.suites), 0)
assert_equal(len(suite.tests), 0)
def test_prune_test_from_suite(self):
suite = self.suite.suites[0]
assert_equal(len(suite.suites), 0)
assert_equal(len(suite.tests), 1)
SuiteBuilder(JsBuildingContext(prune_input=True)).build(suite)
assert_equal(len(suite.suites), 0)
assert_equal(len(suite.tests), 0)
def test_prune_test(self):
test = self.suite.suites[0].tests[0]
assert_equal(len(test.body), 3)
TestBuilder(JsBuildingContext(prune_input=True)).build(test)
assert_equal(len(test.body), 0)
def test_prune_keyword(self):
kw = self.suite.suites[0].tests[0].body[0]
assert_equal(len(kw.body), 5)
assert_equal(len(kw.messages), 3)
KeywordBuilder(JsBuildingContext(prune_input=True)).build(kw)
assert_equal(len(kw.body), 0)
assert_equal(len(kw.messages), 0)
def test_prune_errors(self):
errors = ExecutionErrors([Message(), Message()])
ErrorsBuilder(JsBuildingContext(prune_input=False)).build(errors)
assert_equal(len(errors), 2)
ErrorsBuilder(JsBuildingContext(prune_input=True)).build(errors)
assert_equal(len(errors), 0)
class TestBuildStatistics(unittest.TestCase):
def test_total_stats(self):
all = self._build_statistics()[0][0]
self._verify_stat(all, 2, 2, 1, 'All Tests', '00:00:33')
def test_tag_stats(self):
stats = self._build_statistics()[1]
comb, t1, t2, t3 = self._build_statistics()[1]
self._verify_stat(t2, 2, 0, 0, 't2', '00:00:22',
doc='doc', links='t:url')
self._verify_stat(comb, 2, 0, 0, 'name', '00:00:22',
info='combined', combined='t1&t2')
self._verify_stat(t1, 2, 2, 0, 't1', '00:00:33')
self._verify_stat(t3, 0, 1, 1, 't3', '00:00:01')
def test_suite_stats(self):
root, sub1, sub2 = self._build_statistics()[2]
self._verify_stat(root, 2, 2, 1, 'root', '00:00:42', name='root', id='s1')
self._verify_stat(sub1, 1, 1, 1, 'root.sub1', '00:00:10', name='sub1', id='s1-s1')
self._verify_stat(sub2, 1, 1, 0, 'root.sub2', '00:00:30', name='sub2', id='s1-s2')
def _build_statistics(self):
return StatisticsBuilder().build(self._get_statistics())
def _get_statistics(self):
return Statistics(self._get_suite(),
suite_stat_level=2,
tag_stat_combine=[('t1&t2', 'name')],
tag_doc=[('t2', 'doc')],
tag_stat_link=[('?2', 'url', '%1')])
def _get_suite(self):
ts = lambda s, ms=0: '20120816 16:09:%02d.%03d' % (s, ms)
suite = TestSuite(name='root', starttime=ts(0), endtime=ts(42))
sub1 = TestSuite(name='sub1', starttime=ts(0), endtime=ts(10))
sub2 = TestSuite(name='sub2')
suite.suites = [sub1, sub2]
sub1.tests = [
TestCase(tags=['t1', 't2'], status='PASS', starttime=ts(0), endtime=ts(1, 500)),
TestCase(tags=['t1', 't3'], status='FAIL', starttime=ts(2), endtime=ts(3, 499)),
TestCase(tags=['t3'], status='SKIP', starttime=ts(3, 560), endtime=ts(3, 560))
]
sub2.tests = [
TestCase(tags=['t1', 't2'], status='PASS', starttime=ts(10), endtime=ts(30))
]
sub2.suites.create(name='below suite stat level')\
.tests.create(tags=['t1'], status='FAIL', starttime=ts(30), endtime=ts(40))
return suite
def _verify_stat(self, stat, pass_, fail, skip, label, elapsed, **attrs):
attrs.update({'pass': pass_, 'fail': fail, 'skip': skip,
'label': label, 'elapsed': elapsed})
assert_equal(stat, attrs)
class TestBuildErrors(unittest.TestCase):
def setUp(self):
msgs = [Message('Error', 'ERROR', timestamp='20111206 14:33:00.000'),
Message('Warning', 'WARN', timestamp='20111206 14:33:00.042')]
self.errors = ExecutionErrors(msgs)
def test_errors(self):
context = JsBuildingContext()
model = ErrorsBuilder(context).build(self.errors)
model = remap(model, context.strings)
assert_equal(model, ((8, 0, 4, 'Error'), (8, 42, 3, 'Warning')))
def test_linking(self):
self.errors.messages.create('Linkable', 'WARN',
timestamp='20111206 14:33:00.001')
context = JsBuildingContext()
msg = TestSuite().tests.create().body.create_keyword().body.create_message(
'Linkable', 'WARN', timestamp='20111206 14:33:00.001'
)
MessageBuilder(context).build(msg)
model = ErrorsBuilder(context).build(self.errors)
model = remap(model, context.strings)
assert_equal(model, ((8, -1, 4, 'Error'),
(8, 41, 3, 'Warning'),
(8, 0, 3, 'Linkable', 's1-t1-k1')))
if __name__ == '__main__':
unittest.main()
|
py | 1a4ef23cbac9c347971854e8b21e9c01b4434a15 | # Copyright (c) 2020 original authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from expertai.nlapi.v1 import constants
from expertai.nlapi.v1.errors import ExpertAiRequestError, MissingParametersError
from expertai.nlapi.v1.object_mapper import ObjectMapper
from expertai.nlapi.v1.request import ExpertAiRequest
from expertai.nlapi.v1.response import ExpertAiResponse
from expertai.nlapi.v1.validate import ExpertAiValidation
class ExpertAiClient:
def __init__(self):
self.response_class = ExpertAiResponse
self._endpoint_path = ""
def urlpath_keywords(self, endpoint_path):
return re.findall(r"\{(\w+)\}", endpoint_path)
def verify_request(self, endpoint_path, **kwargs):
"""
Verify that the user has set all the required parameters.
Some of the endpoint url paths are parameterised, therefore
the user has to provide some value when setting up the
endpoint method
"""
required_params = self.urlpath_keywords(endpoint_path)
if not required_params:
return
params = kwargs.get("params") or {}
missing_params = set(required_params).difference(set(params.keys()))
if required_params and missing_params:
raise MissingParametersError(
"Missing request parameters: {}".format(
",".join(*[missing_params])
)
)
ExpertAiValidation().check_parameters(params=params)
def get_method_name_for_endpoint(self, endpoint_path):
return dict(constants.URLS_AND_METHODS).get(endpoint_path)
def create_request(self, endpoint_path, params=None, body=None):
http_method_name = self.get_method_name_for_endpoint(endpoint_path)
if params:
self.verify_request(endpoint_path, params=params)
endpoint_path = endpoint_path.format(**params)
return ExpertAiRequest(
endpoint_path=endpoint_path,
http_method_name=http_method_name,
body=body,
)
def process_response(self, response):
if not response.successful:
raise ExpertAiRequestError(
"Response status code: {}".format(response.status_code)
)
elif response.bad_request:
return ExpertAiRequestError(
response.bad_request_message(response.json)
)
return ObjectMapper().read_json(response.json)
def full_analysis(self, params, body):
request = self.create_request(
endpoint_path=constants.FULL_ANALYSIS_PATH,
params=params,
body=body,
)
response = self.response_class(response=request.send())
return self.process_response(response)
def specific_resource_analysis(self, params, body):
request = self.create_request(
endpoint_path=constants.SPECIFIC_RESOURCE_ANALYSIS_PATH,
params=params,
body=body,
)
response = self.response_class(response=request.send())
return self.process_response(response)
def iptc_media_topics_classification(self, params, body):
request = self.create_request(
endpoint_path=constants.IPTC_MEDIA_TOPICS_CLASSIFICATION_PATH,
params=params,
body=body,
)
response = self.response_class(response=request.send())
return self.process_response(response)
def contexts(self):
request = self.create_request(endpoint_path=constants.CONTEXTS_PATH)
response = self.response_class(response=request.send())
return self.process_response(response)
def contexts_standard(self):
request = self.create_request(
endpoint_path=constants.CONTEXTS_STANDARD_PATH
)
response = self.response_class(response=request.send())
return self.process_response(response)
def iptc_taxonomies_list(self):
request = self.create_request(
endpoint_path=constants.TAXONOMIES_LIST_PATH
)
response = self.response_class(response=request.send())
return self.process_response(response)
def iptc_taxonomies(self):
request = self.create_request(
endpoint_path=constants.IPTC_TAXONOMIES_PATH
)
response = self.response_class(response=request.send())
return self.process_response(response)
|
py | 1a4ef28909708b18a00d7a467213fca9e9e25d51 | """
Renderer Module
This module defines the PlotlyRenderer class and a single function,
fig_to_plotly, which is intended to be the main way that user's will interact
with the matplotlylib package.
"""
from __future__ import absolute_import
import six
import warnings
import plotly.graph_objs as go
from plotly.matplotlylib.mplexporter import Renderer
from plotly.matplotlylib import mpltools
# Warning format
def warning_on_one_line(msg, category, filename, lineno, file=None, line=None):
return "%s:%s: %s:\n\n%s\n\n" % (filename, lineno, category.__name__, msg)
warnings.formatwarning = warning_on_one_line
class PlotlyRenderer(Renderer):
"""A renderer class inheriting from base for rendering mpl plots in plotly.
A renderer class to be used with an exporter for rendering matplotlib
plots in Plotly. This module defines the PlotlyRenderer class which handles
the creation of the JSON structures that get sent to plotly.
All class attributes available are defined in __init__().
Basic Usage:
# (mpl code) #
fig = gcf()
renderer = PlotlyRenderer(fig)
exporter = Exporter(renderer)
exporter.run(fig) # ... et voila
"""
def __init__(self):
"""Initialize PlotlyRenderer obj.
PlotlyRenderer obj is called on by an Exporter object to draw
matplotlib objects like figures, axes, text, etc.
All class attributes are listed here in the __init__ method.
"""
self.plotly_fig = go.Figure()
self.mpl_fig = None
self.current_mpl_ax = None
self.bar_containers = None
self.current_bars = []
self.axis_ct = 0
self.x_is_mpl_date = False
self.mpl_x_bounds = (0, 1)
self.mpl_y_bounds = (0, 1)
self.msg = "Initialized PlotlyRenderer\n"
def open_figure(self, fig, props):
"""Creates a new figure by beginning to fill out layout dict.
The 'autosize' key is set to false so that the figure will mirror
sizes set by mpl. The 'hovermode' key controls what shows up when you
mouse around a figure in plotly, it's set to show the 'closest' point.
Positional agurments:
fig -- a matplotlib.figure.Figure object.
props.keys(): [
'figwidth',
'figheight',
'dpi'
]
"""
self.msg += "Opening figure\n"
self.mpl_fig = fig
self.plotly_fig["layout"] = go.Layout(
width=int(props["figwidth"] * props["dpi"]),
height=int(props["figheight"] * props["dpi"]),
autosize=False,
hovermode="closest",
)
self.mpl_x_bounds, self.mpl_y_bounds = mpltools.get_axes_bounds(fig)
margin = go.layout.Margin(
l=int(self.mpl_x_bounds[0] * self.plotly_fig["layout"]["width"]),
r=int((1 - self.mpl_x_bounds[1]) * self.plotly_fig["layout"]["width"]),
t=int((1 - self.mpl_y_bounds[1]) * self.plotly_fig["layout"]["height"]),
b=int(self.mpl_y_bounds[0] * self.plotly_fig["layout"]["height"]),
pad=0,
)
self.plotly_fig["layout"]["margin"] = margin
def close_figure(self, fig):
"""Closes figure by cleaning up data and layout dictionaries.
The PlotlyRenderer's job is to create an appropriate set of data and
layout dictionaries. When the figure is closed, some cleanup and
repair is necessary. This method removes inappropriate dictionary
entries, freeing up Plotly to use defaults and best judgements to
complete the entries. This method is called by an Exporter object.
Positional arguments:
fig -- a matplotlib.figure.Figure object.
"""
self.plotly_fig["layout"]["showlegend"] = False
self.msg += "Closing figure\n"
def open_axes(self, ax, props):
"""Setup a new axes object (subplot in plotly).
Plotly stores information about subplots in different 'xaxis' and
'yaxis' objects which are numbered. These are just dictionaries
included in the layout dictionary. This function takes information
from the Exporter, fills in appropriate dictionary entries,
and updates the layout dictionary. PlotlyRenderer keeps track of the
number of plots by incrementing the axis_ct attribute.
Setting the proper plot domain in plotly is a bit tricky. Refer to
the documentation for mpltools.convert_x_domain and
mpltools.convert_y_domain.
Positional arguments:
ax -- an mpl axes object. This will become a subplot in plotly.
props.keys() -- [
'axesbg', (background color for axes obj)
'axesbgalpha', (alpha, or opacity for background)
'bounds', ((x0, y0, width, height) for axes)
'dynamic', (zoom/pan-able?)
'axes', (list: [xaxis, yaxis])
'xscale', (log, linear, or date)
'yscale',
'xlim', (range limits for x)
'ylim',
'xdomain' (xdomain=xlim, unless it's a date)
'ydomain'
]
"""
self.msg += " Opening axes\n"
self.current_mpl_ax = ax
self.bar_containers = [
c
for c in ax.containers # empty is OK
if c.__class__.__name__ == "BarContainer"
]
self.current_bars = []
self.axis_ct += 1
# set defaults in axes
xaxis = go.layout.XAxis(
anchor="y{0}".format(self.axis_ct), zeroline=False, ticks="inside"
)
yaxis = go.layout.YAxis(
anchor="x{0}".format(self.axis_ct), zeroline=False, ticks="inside"
)
# update defaults with things set in mpl
mpl_xaxis, mpl_yaxis = mpltools.prep_xy_axis(
ax=ax, props=props, x_bounds=self.mpl_x_bounds, y_bounds=self.mpl_y_bounds
)
xaxis.update(mpl_xaxis)
yaxis.update(mpl_yaxis)
bottom_spine = mpltools.get_spine_visible(ax, "bottom")
top_spine = mpltools.get_spine_visible(ax, "top")
left_spine = mpltools.get_spine_visible(ax, "left")
right_spine = mpltools.get_spine_visible(ax, "right")
xaxis["mirror"] = mpltools.get_axis_mirror(bottom_spine, top_spine)
yaxis["mirror"] = mpltools.get_axis_mirror(left_spine, right_spine)
xaxis["showline"] = bottom_spine
yaxis["showline"] = top_spine
# put axes in our figure
self.plotly_fig["layout"]["xaxis{0}".format(self.axis_ct)] = xaxis
self.plotly_fig["layout"]["yaxis{0}".format(self.axis_ct)] = yaxis
# let all subsequent dates be handled properly if required
if "type" in dir(xaxis) and xaxis["type"] == "date":
self.x_is_mpl_date = True
def close_axes(self, ax):
"""Close the axes object and clean up.
Bars from bar charts are given to PlotlyRenderer one-by-one,
thus they need to be taken care of at the close of each axes object.
The self.current_bars variable should be empty unless a bar
chart has been created.
Positional arguments:
ax -- an mpl axes object, not required at this time.
"""
self.draw_bars(self.current_bars)
self.msg += " Closing axes\n"
self.x_is_mpl_date = False
def draw_bars(self, bars):
# sort bars according to bar containers
mpl_traces = []
for container in self.bar_containers:
mpl_traces.append(
[
bar_props
for bar_props in self.current_bars
if bar_props["mplobj"] in container
]
)
for trace in mpl_traces:
self.draw_bar(trace)
def draw_bar(self, coll):
"""Draw a collection of similar patches as a bar chart.
After bars are sorted, an appropriate data dictionary must be created
to tell plotly about this data. Just like draw_line or draw_markers,
draw_bar translates patch/path information into something plotly
understands.
Positional arguments:
patch_coll -- a collection of patches to be drawn as a bar chart.
"""
tol = 1e-10
trace = [mpltools.make_bar(**bar_props) for bar_props in coll]
widths = [bar_props["x1"] - bar_props["x0"] for bar_props in trace]
heights = [bar_props["y1"] - bar_props["y0"] for bar_props in trace]
vertical = abs(sum(widths[0] - widths[iii] for iii in range(len(widths)))) < tol
horizontal = (
abs(sum(heights[0] - heights[iii] for iii in range(len(heights)))) < tol
)
if vertical and horizontal:
# Check for monotonic x. Can't both be true!
x_zeros = [bar_props["x0"] for bar_props in trace]
if all(
(x_zeros[iii + 1] > x_zeros[iii] for iii in range(len(x_zeros[:-1])))
):
orientation = "v"
else:
orientation = "h"
elif vertical:
orientation = "v"
else:
orientation = "h"
if orientation == "v":
self.msg += " Attempting to draw a vertical bar chart\n"
old_heights = [bar_props["y1"] for bar_props in trace]
for bar in trace:
bar["y0"], bar["y1"] = 0, bar["y1"] - bar["y0"]
new_heights = [bar_props["y1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_heights, new_heights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "x"
x = [bar["x0"] + (bar["x1"] - bar["x0"]) / 2 for bar in trace]
y = [bar["y1"] for bar in trace]
bar_gap = mpltools.get_bar_gap(
[bar["x0"] for bar in trace], [bar["x1"] for bar in trace]
)
if self.x_is_mpl_date:
x = [bar["x0"] for bar in trace]
formatter = (
self.current_mpl_ax.get_xaxis()
.get_major_formatter()
.__class__.__name__
)
x = mpltools.mpl_dates_to_datestrings(x, formatter)
else:
self.msg += " Attempting to draw a horizontal bar chart\n"
old_rights = [bar_props["x1"] for bar_props in trace]
for bar in trace:
bar["x0"], bar["x1"] = 0, bar["x1"] - bar["x0"]
new_rights = [bar_props["x1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_rights, new_rights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "y"
x = [bar["x1"] for bar in trace]
y = [bar["y0"] + (bar["y1"] - bar["y0"]) / 2 for bar in trace]
bar_gap = mpltools.get_bar_gap(
[bar["y0"] for bar in trace], [bar["y1"] for bar in trace]
)
bar = go.Bar(
orientation=orientation,
x=x,
y=y,
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
opacity=trace[0]["alpha"], # TODO: get all alphas if array?
marker=go.bar.Marker(
color=trace[0]["facecolor"], # TODO: get all
line=dict(width=trace[0]["edgewidth"]),
),
) # TODO ditto
if len(bar["x"]) > 1:
self.msg += " Heck yeah, I drew that bar chart\n"
self.plotly_fig.add_trace(bar),
if bar_gap is not None:
self.plotly_fig["layout"]["bargap"] = bar_gap
else:
self.msg += " Bar chart not drawn\n"
warnings.warn(
"found box chart data with length <= 1, "
"assuming data redundancy, not plotting."
)
def draw_marked_line(self, **props):
"""Create a data dict for a line obj.
This will draw 'lines', 'markers', or 'lines+markers'.
props.keys() -- [
'coordinates', ('data', 'axes', 'figure', or 'display')
'data', (a list of xy pairs)
'mplobj', (the matplotlib.lines.Line2D obj being rendered)
'label', (the name of the Line2D obj being rendered)
'linestyle', (linestyle dict, can be None, see below)
'markerstyle', (markerstyle dict, can be None, see below)
]
props['linestyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'color', (color of the line if it exists, not the marker)
'linewidth',
'dasharray', (code for linestyle, see DASH_MAP in mpltools.py)
'zorder', (viewing precedence when stacked with other objects)
]
props['markerstyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'marker', (the mpl marker symbol, see SYMBOL_MAP in mpltools.py)
'facecolor', (color of the marker face)
'edgecolor', (color of the marker edge)
'edgewidth', (width of marker edge)
'markerpath', (an SVG path for drawing the specified marker)
'zorder', (viewing precedence when stacked with other objects)
]
"""
self.msg += " Attempting to draw a line "
line, marker = {}, {}
if props["linestyle"] and props["markerstyle"]:
self.msg += "... with both lines+markers\n"
mode = "lines+markers"
elif props["linestyle"]:
self.msg += "... with just lines\n"
mode = "lines"
elif props["markerstyle"]:
self.msg += "... with just markers\n"
mode = "markers"
if props["linestyle"]:
color = mpltools.merge_color_and_opacity(
props["linestyle"]["color"], props["linestyle"]["alpha"]
)
# print(mpltools.convert_dash(props['linestyle']['dasharray']))
line = go.scatter.Line(
color=color,
width=props["linestyle"]["linewidth"],
dash=mpltools.convert_dash(props["linestyle"]["dasharray"]),
)
if props["markerstyle"]:
marker = go.scatter.Marker(
opacity=props["markerstyle"]["alpha"],
color=props["markerstyle"]["facecolor"],
symbol=mpltools.convert_symbol(props["markerstyle"]["marker"]),
size=props["markerstyle"]["markersize"],
line=dict(
color=props["markerstyle"]["edgecolor"],
width=props["markerstyle"]["edgewidth"],
),
)
if props["coordinates"] == "data":
marked_line = go.Scatter(
mode=mode,
name=(
str(props["label"])
if isinstance(props["label"], six.string_types)
else props["label"]
),
x=[xy_pair[0] for xy_pair in props["data"]],
y=[xy_pair[1] for xy_pair in props["data"]],
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
line=line,
marker=marker,
)
if self.x_is_mpl_date:
formatter = (
self.current_mpl_ax.get_xaxis()
.get_major_formatter()
.__class__.__name__
)
marked_line["x"] = mpltools.mpl_dates_to_datestrings(
marked_line["x"], formatter
)
self.plotly_fig.add_trace(marked_line),
self.msg += " Heck yeah, I drew that line\n"
else:
self.msg += " Line didn't have 'data' coordinates, " "not drawing\n"
warnings.warn(
"Bummer! Plotly can currently only draw Line2D "
"objects from matplotlib that are in 'data' "
"coordinates!"
)
def draw_image(self, **props):
"""Draw image.
Not implemented yet!
"""
self.msg += " Attempting to draw image\n"
self.msg += " Not drawing image\n"
warnings.warn(
"Aw. Snap! You're gonna have to hold off on "
"the selfies for now. Plotly can't import "
"images from matplotlib yet!"
)
def draw_path_collection(self, **props):
"""Add a path collection to data list as a scatter plot.
Current implementation defaults such collections as scatter plots.
Matplotlib supports collections that have many of the same parameters
in common like color, size, path, etc. However, they needn't all be
the same. Plotly does not currently support such functionality and
therefore, the style for the first object is taken and used to define
the remaining paths in the collection.
props.keys() -- [
'paths', (structure: [vertices, path_code])
'path_coordinates', ('data', 'axes', 'figure', or 'display')
'path_transforms', (mpl transform, including Affine2D matrix)
'offsets', (offset from axes, helpful if in 'data')
'offset_coordinates', ('data', 'axes', 'figure', or 'display')
'offset_order',
'styles', (style dict, see below)
'mplobj' (the collection obj being drawn)
]
props['styles'].keys() -- [
'linewidth', (one or more linewidths)
'facecolor', (one or more facecolors for path)
'edgecolor', (one or more edgecolors for path)
'alpha', (one or more opacites for path)
'zorder', (precedence when stacked)
]
"""
self.msg += " Attempting to draw a path collection\n"
if props["offset_coordinates"] is "data":
markerstyle = mpltools.get_markerstyle_from_collection(props)
scatter_props = {
"coordinates": "data",
"data": props["offsets"],
"label": None,
"markerstyle": markerstyle,
"linestyle": None,
}
self.msg += " Drawing path collection as markers\n"
self.draw_marked_line(**scatter_props)
else:
self.msg += " Path collection not linked to 'data', " "not drawing\n"
warnings.warn(
"Dang! That path collection is out of this "
"world. I totally don't know what to do with "
"it yet! Plotly can only import path "
"collections linked to 'data' coordinates"
)
def draw_path(self, **props):
"""Draw path, currently only attempts to draw bar charts.
This function attempts to sort a given path into a collection of
horizontal or vertical bar charts. Most of the actual code takes
place in functions from mpltools.py.
props.keys() -- [
'data', (a list of verticies for the path)
'coordinates', ('data', 'axes', 'figure', or 'display')
'pathcodes', (code for the path, structure: ['M', 'L', 'Z', etc.])
'style', (style dict, see below)
'mplobj' (the mpl path object)
]
props['style'].keys() -- [
'alpha', (opacity of path obj)
'edgecolor',
'facecolor',
'edgewidth',
'dasharray', (style for path's enclosing line)
'zorder' (precedence of obj when stacked)
]
"""
self.msg += " Attempting to draw a path\n"
is_bar = mpltools.is_bar(self.current_mpl_ax.containers, **props)
if is_bar:
self.current_bars += [props]
else:
self.msg += " This path isn't a bar, not drawing\n"
warnings.warn(
"I found a path object that I don't think is part "
"of a bar chart. Ignoring."
)
def draw_text(self, **props):
"""Create an annotation dict for a text obj.
Currently, plotly uses either 'page' or 'data' to reference
annotation locations. These refer to 'display' and 'data',
respectively for the 'coordinates' key used in the Exporter.
Appropriate measures are taken to transform text locations to
reference one of these two options.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw an mpl text object\n"
if not mpltools.check_corners(props["mplobj"], self.mpl_fig):
warnings.warn(
"Looks like the annotation(s) you are trying \n"
"to draw lies/lay outside the given figure size.\n\n"
"Therefore, the resulting Plotly figure may not be \n"
"large enough to view the full text. To adjust \n"
"the size of the figure, use the 'width' and \n"
"'height' keys in the Layout object. Alternatively,\n"
"use the Margin object to adjust the figure's margins."
)
align = props["mplobj"]._multialignment
if not align:
align = props["style"]["halign"] # mpl default
if "annotations" not in self.plotly_fig["layout"]:
self.plotly_fig["layout"]["annotations"] = []
if props["text_type"] == "xlabel":
self.msg += " Text object is an xlabel\n"
self.draw_xlabel(**props)
elif props["text_type"] == "ylabel":
self.msg += " Text object is a ylabel\n"
self.draw_ylabel(**props)
elif props["text_type"] == "title":
self.msg += " Text object is a title\n"
self.draw_title(**props)
else: # just a regular text annotation...
self.msg += " Text object is a normal annotation\n"
if props["coordinates"] is not "data":
self.msg += (
" Text object isn't linked to 'data' " "coordinates\n"
)
x_px, y_px = (
props["mplobj"].get_transform().transform(props["position"])
)
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
else:
self.msg += " Text object is linked to 'data' " "coordinates\n"
x, y = props["position"]
axis_ct = self.axis_ct
xaxis = self.plotly_fig["layout"]["xaxis{0}".format(axis_ct)]
yaxis = self.plotly_fig["layout"]["yaxis{0}".format(axis_ct)]
if (
xaxis["range"][0] < x < xaxis["range"][1]
and yaxis["range"][0] < y < yaxis["range"][1]
):
xref = "x{0}".format(self.axis_ct)
yref = "y{0}".format(self.axis_ct)
else:
self.msg += (
" Text object is outside "
"plotting area, making 'paper' reference.\n"
)
x_px, y_px = (
props["mplobj"].get_transform().transform(props["position"])
)
x, y = mpltools.display_to_paper(
x_px, y_px, self.plotly_fig["layout"]
)
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
annotation = go.layout.Annotation(
text=(
str(props["text"])
if isinstance(props["text"], six.string_types)
else props["text"]
),
opacity=props["style"]["alpha"],
x=x,
y=y,
xref=xref,
yref=yref,
align=align,
xanchor=xanchor,
yanchor=yanchor,
showarrow=False, # change this later?
font=go.layout.annotation.Font(
color=props["style"]["color"], size=props["style"]["fontsize"]
),
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
self.msg += " Heck, yeah I drew that annotation\n"
def draw_title(self, **props):
"""Add a title to the current subplot in layout dictionary.
If there exists more than a single plot in the figure, titles revert
to 'page'-referenced annotations.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw a title\n"
if len(self.mpl_fig.axes) > 1:
self.msg += (
" More than one subplot, adding title as " "annotation\n"
)
x_px, y_px = props["mplobj"].get_transform().transform(props["position"])
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
annotation = go.layout.Annotation(
text=props["text"],
font=go.layout.annotation.Font(
color=props["style"]["color"], size=props["style"]["fontsize"]
),
xref="paper",
yref="paper",
x=x,
y=y,
xanchor="center",
yanchor="bottom",
showarrow=False, # no arrow for a title!
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
else:
self.msg += (
" Only one subplot found, adding as a " "plotly title\n"
)
self.plotly_fig["layout"]["title"] = props["text"]
titlefont = dict(
size=props["style"]["fontsize"], color=props["style"]["color"]
)
self.plotly_fig["layout"]["titlefont"] = titlefont
def draw_xlabel(self, **props):
"""Add an xaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding xlabel\n"
axis_key = "xaxis{0}".format(self.axis_ct)
self.plotly_fig["layout"][axis_key]["title"] = str(props["text"])
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"][axis_key]["titlefont"] = titlefont
def draw_ylabel(self, **props):
"""Add a yaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding ylabel\n"
axis_key = "yaxis{0}".format(self.axis_ct)
self.plotly_fig["layout"][axis_key]["title"] = props["text"]
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"][axis_key]["titlefont"] = titlefont
def resize(self):
"""Revert figure layout to allow plotly to resize.
By default, PlotlyRenderer tries its hardest to precisely mimic an
mpl figure. However, plotly is pretty good with aesthetics. By
running PlotlyRenderer.resize(), layout parameters are deleted. This
lets plotly choose them instead of mpl.
"""
self.msg += "Resizing figure, deleting keys from layout\n"
for key in ["width", "height", "autosize", "margin"]:
try:
del self.plotly_fig["layout"][key]
except (KeyError, AttributeError):
pass
def strip_style(self):
self.msg += "Stripping mpl style is no longer supported\n"
|
py | 1a4ef32a26e23d945e2a7b0cc51a8cabe86c755f | from .avl_tree_st import *
from .bst_st import *
from .bst import *
from .linear_probing_ht import *
from .seperate_chaining_ht import *
from .sequential_search_st import *
from ._nodes import *
|
py | 1a4ef35c706acb82c5c6b8c3a11ce6c999eb7248 | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
"""
gh_lists.py MILESTONE
Functions for Github API requests.
"""
from __future__ import print_function, division, absolute_import
import os
import re
import sys
import json
import collections
import argparse
from urllib2 import urlopen
Issue = collections.namedtuple('Issue', ('id', 'title', 'url'))
def main():
p = argparse.ArgumentParser(usage=__doc__.lstrip())
p.add_argument('--project', default='holgern/pyedflib')
p.add_argument('milestone')
args = p.parse_args()
getter = CachedGet('gh_cache.json')
try:
milestones = get_milestones(getter, args.project)
if args.milestone not in milestones:
msg = "Milestone {0} not available. Available milestones: {1}"
msg = msg.format(args.milestone, u", ".join(sorted(milestones)))
p.error(msg)
issues = get_issues(getter, args.project, args.milestone)
issues.sort()
finally:
getter.save()
prs = [x for x in issues if u'/pull/' in x.url]
issues = [x for x in issues if x not in prs]
def print_list(title, items):
print()
print(title)
print("-"*len(title))
print()
for issue in items:
msg = u"- `#{0} <{1}>`__: {2}"
title = re.sub(u"\s+", u" ", issue.title.strip())
if len(title) > 60:
remainder = re.sub(u"\s.*$", u"...", title[60:])
if len(remainder) > 20:
remainder = title[:80] + u"..."
else:
title = title[:60] + remainder
msg = msg.format(issue.id, issue.url, title)
print(msg)
print()
msg = u"Issues closed for {0}".format(args.milestone)
print_list(msg, issues)
msg = u"Pull requests for {0}".format(args.milestone)
print_list(msg, prs)
return 0
def get_milestones(getter, project):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
raw_data, info = getter.get(url)
data = json.loads(raw_data)
milestones = {}
for ms in data:
milestones[ms[u'title']] = ms[u'number']
return milestones
def get_issues(getter, project, milestone):
milestones = get_milestones(getter, project)
mid = milestones[milestone]
url = "https://api.github.com/repos/{project}/issues?milestone={mid}&state=closed&sort=created&direction=asc"
url = url.format(project=project, mid=mid)
raw_datas = []
while True:
raw_data, info = getter.get(url)
raw_datas.append(raw_data)
if 'link' not in info:
break
m = re.search('<(.*?)>; rel="next"', info['link'])
if m:
url = m.group(1)
continue
break
issues = []
for raw_data in raw_datas:
data = json.loads(raw_data)
for issue_data in data:
issues.append(Issue(issue_data[u'number'],
issue_data[u'title'],
issue_data[u'html_url']))
return issues
class CachedGet(object):
def __init__(self, filename):
self.filename = filename
if os.path.isfile(filename):
print("[gh_lists] using {0} as cache (remove it if you want fresh data)".format(filename),
file=sys.stderr)
with open(filename, 'rb') as f:
self.cache = json.load(f)
else:
self.cache = {}
def get(self, url):
url = unicode(url)
if url not in self.cache:
print("[gh_lists] get:", url, file=sys.stderr)
req = urlopen(url)
if req.getcode() != 200:
raise RuntimeError()
data = req.read()
info = dict(req.info())
self.cache[url] = (data, info)
req.close()
else:
print("[gh_lists] get (cached):", url, file=sys.stderr)
return self.cache[url]
def save(self):
tmp = self.filename + ".new"
with open(tmp, 'wb') as f:
json.dump(self.cache, f)
os.rename(tmp, self.filename)
if __name__ == "__main__":
sys.exit(main())
|
py | 1a4ef52df0ce260ffb4b3cc5d1703144aeb17455 | #!/usr/bin/env python
MAX_JOBS = 3
SECOND = 1
MINUTE = 60 * SECOND
HOUR = 60 * MINUTE
DAY = 24 * HOUR
MIN_DATE = -8640000000000000
MAX_DATE = 8640000000000000
def time(hour: int, minute: int = 0, seconds: int = 0):
return hour * HOUR + minute * MINUTE + seconds * SECOND
|
wsgi | 1a4ef629a7695d67c31e188c9891a6842d9afd1d | import sys
sys.path.append('C:/xampp/htdocs/pythonTest')
from app import app as application |
py | 1a4ef7ce607fd20e8019704ff26c2c4ea6c16dcf | import os
import numpy as np
import music21
import math
# Parameter n_pitch: Pitch-Range reicht von 0 bis 127
MAX_PITCH = 128
# Parameter d_[duration]_[dots]:
SIGN_DUR = "d"
# Parameter v_[velocity]: Lautstärke der folgenden Noten, reicht von 4, 8, 12, ... bis 128 (in 4er-Schritten)
SIGN_VELO = "v"
MIN_VELO = 0
MAX_VELO = 128
# Parameter t_[tempo]: Tempo der folgenden Noten, reicht von 24, 28, 32, ... bis 160 (in 4er-Schritten)
SIGN_TEMP0 = "t"
MIN_TEMP0 = 24
MAX_TEMPO = 128
# Zeichen zur Markierung des Ende des Stücks (End Of File)
SIGN_EOF = "\n"
# neue Note
SIGN_NOTE = "n"
# Zeichen für die Wait-Zeit
SIGN_WAIT = "w"
# 3-punktierte Halbe und 3-punktierte 32-tel
THREE_DOTTED_BREVE = 15
THREE_DOTTED_32ND = 0.21875
def load_midi(data_path, sample_freq=4, piano_range=(33, 93), transpo_range=10, stretching_range=10):
text = ""
vocab = set()
if os.path.isfile(data_path):
# gegebener Pfad ist eine einzelne Midi-Datei
file_extension = os.path.splitext(data_path)[1]
if file_extension == ".midi" or file_extension == ".mid":
text = parse_midi(file_path=data_path, piano_range=piano_range, sample_freq=sample_freq,
transpo_range=transpo_range, stretching_range=stretching_range)
vocab = set(text.split(" "))
else:
# Lade jede Datei einzeln
for file in os.listdir(data_path):
file_path = os.path.join(data_path, file)
file_extension = os.path.splitext(file_path)[1]
# Prüfen, ob der file_path kein weiterer Ordner ist und ob die Dateiendung passt (.mid oder .midi)
if os.path.isfile(file_path) and (file_extension == ".midi" or file_extension == ".mid"):
encoded_midi = parse_midi(file_path=file_path, piano_range=piano_range, sample_freq=sample_freq,
transpo_range=transpo_range, stretching_range=stretching_range)
if len(encoded_midi) > 0:
words = set(encoded_midi.split(" "))
vocab = vocab | words
text += encoded_midi + " "
# letztes Leerzeichen wird entfernt
text = text[:-1]
return text, vocab
def parse_midi(file_path: str, piano_range, sample_freq, transpo_range, stretching_range):
midi_file_path = None
print(f"> Parse MIDI-File: {file_path}")
# Als Parameter kann auch eine Datei mit Pfad übergeben werden:
midi_dir = os.path.dirname(file_path)
midi_name = os.path.basename(file_path).split(".") [0]
# Falls eine txt-Datei von dieser Midi-Datei bereits existiert, wird diese geladen
midi_txt_name = os.path.join(midi_dir, midi_name + ".txt")
if (os.path.isfile(midi_txt_name)):
midi_file_path = open(midi_txt_name, "r")
encoded_midi = midi_file_path.read()
else:
# Lade mit Music21 die Midi-Datei
midi = music21.midi.MidiFile()
midi.open(file_path)
midi.read()
midi.close()
# Konvertierung der Midi-Datei in Liste mit Noten und Akkorden
encoded_midi = midi_to_encoded(midifile=midi, piano_range=piano_range, sample_freq=sample_freq,
transpo_range=transpo_range, stretching_range=stretching_range)
if len(encoded_midi) > 0:
# neue txt-Datei erzeugen
midi_file_path = open(midi_txt_name, "w+")
midi_file_path.write(encoded_midi)
midi_file_path.flush()
if midi_file_path: midi_file_path.close()
return encoded_midi
def midi_to_encoded(midifile, piano_range, sample_freq, transpo_range, stretching_range):
try:
stream = music21.midi.translate.midiFileToStream(midifile)
except:
return []
piano_roll = midi_to_piano_roll(midi_stream=stream, sample_freq=sample_freq, piano_range=piano_range,
transpo_range=transpo_range, stretching_range=stretching_range)
encoded = piano_roll_to_encoded(piano_roll)
return " ".join(encoded)
def piano_roll_to_encoded(piano_roll):
# Konvertierung der piano_roll in eine Liste mit Strings, die die Noten darstellen sollen
encoded = {}
counter = 0
for version in piano_roll:
# letztes Tempo, Geschwindigkeit und Dauer auf -1 setzen
_tempo = -1
_velo = -1
_duration = -1.0
version_encoded = []
for i in range(len(version)):
# die letzten Noten sind in der letzten Reihe gespeichert
tempo = version[i, -1][0]
# neues Tempo wird hinzugefügt
if tempo != 0 and tempo != _tempo:
version_encoded.append(SIGN_TEMP0 + "_" + str(int(tempo)))
_tempo = tempo
# Fahre mit dem aktuellen Time Step fort
for next_step in range(len(version[i]) -1):
duration = version[i, next_step][0]
velo = int(version[i, next_step][1])
# neues Tempo
if velo != 0 and velo != _velo:
version_encoded.append(SIGN_VELO + "_" + str(velo))
_velo = velo
# neue Duration
if duration != 0 and duration != _duration:
duration_tuple = music21.duration.durationTupleFromQuarterLength(duration)
version_encoded.append(SIGN_DUR + "_" + duration_tuple.type + "_" + str(duration_tuple.dots))
_duration = duration
# neue Note wird hinzugefügt
if velo != 0 and duration != 0:
version_encoded.append(SIGN_NOTE + "_" + str(next_step))
# Ende dieses Zeitabschnittes
if (len(version_encoded) > 0) and version_encoded[-1][0] == SIGN_WAIT:
# 'Warte'-Zeit wird um 1 erhöht
version_encoded[-1] = "w_" + str(int(version_encoded[-1].split("_")[1]) + 1)
else:
version_encoded.append("w_1")
# Ende des Stücks markieren
version_encoded.append(SIGN_EOF)
# Check, ob diese Version der MIDI-Datei nicht schon mal hinzugefügt wurde
version_encoded_str = " ".join(version_encoded)
if version_encoded_str not in encoded:
encoded[version_encoded_str] = counter
counter += 1
return encoded.keys()
def write(encoded_midi, path):
# Erzeugt eine Midi-Datei mit dem gegebenen Midi-Daten
midi = encoded_to_midi(encoded_midi)
midi.open(path, "wb")
midi.write()
midi.close()
def encoded_to_midi(note_encoded, ts_duration=0.25):
notes = []
velo = 100
duration = "16th"
dots = 0
ts = 0
for note in note_encoded.split(" "):
if len(note) == 0:
continue
elif note[0] == SIGN_WAIT:
wait_counter = int(note.split("_")[1])
ts += wait_counter
elif note[0] == SIGN_NOTE:
pitch = int(note.split("_")[1])
note = music21.note.Note(pitch)
note.duration = music21.duration.Duration(type=duration, dots=dots)
note.offset = ts * ts_duration
note.volume.velocity = velo
notes.append(note)
elif note[0] == SIGN_DUR:
duration = note.split("_")[1]
dots = int(note.split("_")[2])
elif note[0] == SIGN_VELO:
velo = int(note.split("_")[1])
elif note[0] == SIGN_TEMP0:
if note.split("_")[1] != "":
tempo = int(note.split("_")[1])
if tempo > 0:
mark = music21.tempo.MetronomeMark(number=tempo)
mark.offset = ts * ts_duration
notes.append(mark)
piano = music21.instrument.fromString("Piano")
notes.insert(0, piano)
piano_stream = music21.stream.Stream(notes)
main_stream = music21.stream.Stream([piano_stream])
midi_file = music21.midi.translate.streamToMidiFile(main_stream)
return midi_file
def midi_parse_notes(midi_stream, sample_freq):
note_filter = music21.stream.filters.ClassFilter('Note')
events = []
notes_list = midi_stream.recurse().addFilter(note_filter)
for note in notes_list:
pitch = note.pitch.midi
dur = note.duration.quarterLength
velo = note.volume.velocity
# Abrunden
offset = math.floor(note.offset * sample_freq)
events.append((pitch, dur, velo, offset))
return events
def midi_parse_chords(midi_stream, sample_freq):
chord_filter = music21.stream.filters.ClassFilter('Chord')
events = []
chords_list = midi_stream.recurse().addFilter(chord_filter)
for chord in chords_list:
pitches_in_chord = chord.pitches
for p in pitches_in_chord:
pitch = p.midi
dur = chord.duration.quarterLength
velo = chord.volume.velocity
offset = math.floor(chord.offset * sample_freq)
events.append((pitch, dur, velo, offset))
return events
def midi_parse_metronome(midi_stream, sample_freq):
metro_filter = music21.stream.filters.ClassFilter('MetronomeMark')
events = []
metro_list = midi_stream.recurse().addFilter(metro_filter)
for metro in metro_list:
time = int(metro.number)
offset = math.floor(metro.offset * sample_freq)
events.append((time, offset))
return events
def midi_to_notes(midi_stream, sample_freq, transpo_range):
notes = []
notes += midi_parse_notes(midi_stream=midi_stream, sample_freq=sample_freq)
notes += midi_parse_chords(midi_stream=midi_stream, sample_freq=sample_freq)
# Transponieren aller Noten in die gewünschte Lage
transposed_notes = transpose_notes(notes, transpo_range)
return transposed_notes
def transpose_notes(notes, transpo_range):
transpos = []
first_key = -math.floor(transpo_range/2)
last_key = math.ceil(transpo_range/2)
for key in range(first_key, last_key):
notes_in_key = []
for n in notes:
pitch, dur, velo, offset = n
new_pitch = pitch + key
notes_in_key.append((new_pitch, dur, velo, offset))
transpos.append(notes_in_key)
return transpos
def midi_to_piano_roll(midi_stream, sample_freq, piano_range, transpo_range, stretching_range):
# Anzahl time_steps im Piano-Roll berechnen
time_steps = math.floor(midi_stream.duration.quarterLength * sample_freq) + 1
# Midi-Datei --> Liste mit (pitch, duration, velocity, offset)
transpos = midi_to_notes(midi_stream=midi_stream, sample_freq=sample_freq, transpo_range=transpo_range)
time_events = midi_parse_metronome(midi_stream=midi_stream, sample_freq=sample_freq)
time_stretches = stretch_time(time_events=time_events, stretching_range=stretching_range)
piano_roll_notes = notes_to_piano_roll(transpositions=transpos, time_stretches=time_stretches,
time_steps=time_steps, piano_range=piano_range)
return piano_roll_notes
def notes_to_piano_roll(transpositions, time_stretches, time_steps, piano_range):
performances = []
min_pitch, max_pitch = piano_range
for t in range(len(transpositions)):
for s in range(len(time_stretches)):
# neue Piano-Roll mit berechneter Größe
# Zusätzliche Dimension, um am Anfang die Lautstärke und Dauer zu beschreiben
piano_roll = np.zeros((time_steps, MAX_PITCH + 1, 2))
for note in transpositions[t]:
pitch, dur, velo, offset = note
if dur == 0.0:
continue
pitch = clamp_pitch(pitch=pitch, max=max_pitch, min=min_pitch)
piano_roll[offset, pitch][0] = clamp_duration(dur)
piano_roll[offset, pitch][1] = discretize_value(val=velo, bins=32, range_=(MIN_VELO, MAX_VELO))
for time_events in time_stretches[s]:
time, offset = time_events
piano_roll[offset, -1][0] = discretize_value(val=time, bins=100, range_=(MIN_TEMP0, MAX_TEMPO))
performances.append(piano_roll)
return performances
def stretch_time(time_events, stretching_range):
stretches = []
slower_time = -math.floor(stretching_range/2)
faster_time = math.ceil(stretching_range/2)
for stretch_time in range(slower_time, faster_time):
time_events_in_stretch = []
for e in time_events:
time, offset = e
s_time = time + 0.05 * stretch_time * MAX_TEMPO
time_events_in_stretch.append((s_time, offset))
stretches.append(time_events_in_stretch)
return stretches
def discretize_value(val, bins, range_):
min_val, max_val = range_
val = int(max(min_val, val))
val = int(min(val, max_val))
bin_size = (max_val/bins)
return math.floor(val/bin_size) * bin_size
def clamp_pitch(pitch, max, min):
while pitch < min:
pitch += 12
while pitch >= max:
pitch -= 12
return pitch
def clamp_duration(dur, max=THREE_DOTTED_BREVE, min=THREE_DOTTED_32ND):
# falls die gegebene Dauer (dur) höher als das Maximum (3-punktierte Halbe) ist
if dur > max:
dur = max
# falls die Dauer kleiner als das Minimum (3-punktierte 32-tel) ist
if dur < min:
dur = min
dur_tuple = music21.duration.durationTupleFromQuarterLength(dur)
if dur_tuple.type == "inexpressible":
duration_clos_type = music21.duration.quarterLengthToClosestType(dur)[0]
dur = music21.duration.typeToDuration[duration_clos_type]
return dur
|
py | 1a4ef8080feeb40a931ddd3b62646150ab0f759a | from click.testing import CliRunner
from facilyst.__main__ import cli
def test_print_cli_cmd():
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
|
py | 1a4ef874ff38f85c1629a9a54cf5d3428f9aecc1 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
class account_voucher_line(osv.osv):
def _supplier_invoice_number(self, cursor, user, ids, name, arg, context=None):
res = {}
cursor.execute("""SELECT vl.id, i.supplier_invoice_number
FROM account_voucher_line vl
inner join account_move_line ml on vl.move_line_id = ml.id
left outer join account_invoice i on ml.move_id = i.move_id
WHERE vl.id IN %s""",(tuple(ids),))
for line_id, supplier_invoice_number in cursor.fetchall():
res[line_id] = supplier_invoice_number
return res
_inherit = 'account.voucher.line'
_columns = {
'supplier_invoice_number': fields.function(_supplier_invoice_number, string='Supplier Invoice Number', type='char'),
}
account_voucher_line()
class account_voucher(osv.osv):
_inherit = 'account.voucher'
_columns = {
'date_cheque':fields.date('Cheque Date', readonly=True, select=True, states={'draft':[('readonly',False)]}),
'number_cheque':fields.char('Cheque No.', size=64),
}
_defaults = {
'date_cheque': lambda *a: time.strftime('%Y-%m-%d'),
}
account_voucher()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
py | 1a4ef959cbd75da1e5895b39f9e0e440da77f5df | # -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class CreateNetworkSmTargetGroupsModel(object):
"""Implementation of the 'createNetworkSmTargetGroups' model.
TODO: type model description here.
Attributes:
name (string): The name of this target group
scope (string): The scope and tag options of the target group. Comma
separated values beginning with one of withAny, withAll,
withoutAny, withoutAll, all, none, followed by tags
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"scope":'scope'
}
def __init__(self,
name=None,
scope=None,
additional_properties = {}):
"""Constructor for the CreateNetworkSmTargetGroupsModel class"""
# Initialize members of the class
self.name = name
self.scope = scope
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
scope = dictionary.get('scope')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(name,
scope,
dictionary)
|
py | 1a4ef9ddb749911de5f8676a98d22413676cb87e | #@+leo-ver=5-thin
#@+node:tbrown.20091029123555.5319: * @file ../plugins/attrib_edit.py
#@+<< docstring >>
#@+node:tbrown.20091009210724.10972: ** << docstring >>
r""" Edits user attributes in a Qt frame.
This plugin creates a frame for editing attributes similar to::
Name: Fred Blogs
Home: 555-555-5555
Work: 555-555-5556
``attrib_edit`` is also intended to provide attribute editing for
other plugins, see below.
The editor panel appears in the Log pane in its own tab. If the free_layout
system is active you can move it into its own pane (e.g. below the body text)
by right clicking the pane dividers.
The attributes can be stored in different ways, three modes are implemented
currently:
v.u mode
These attributes are stored in the "unknownAttributes" (uA) data for
each node, accessed via v.u.
Field:
Attributes are lines starting (no whitespace) with "AttributeName:" in
the body text.
@Child
Attributes are the head strings of child nodes when the head string
starts with '@AttributeName' where the first letter (second character)
must be capitalized.
The plugin defines the following commands, available either in the
plugin's sub-menu in the Plugins menu, or as ``Alt-X attrib-edit-*``.
attrib-edit-modes
Select which attribute setting / getting modes to use. More than one mode
can be used at the same time.
You can also control which modes are active by listing them
with the @data attrib_edit_active_modes setting. For example::
Field:
@Child
# v.u mode
would cause only the "Field:" and "@Child" modes to be active be default.
attrib-edit-manage
Select which attributes, from all attributes seen so
far in this outline, to include on the current node.
attrib-edit-scan
Scan the entire outline for attributes so ``attrib-edit-manage``
has the complete list.
attrib-edit-create
Create a new attribute on the current node. If Field: or \@Child modes
are active, they simply remind you how to create an attribute in the log pane.
If the "v.u mode" mode is active, you're prompted for a path for the attribute.
For example::
addressbook First
to store the attribute in v.u['addressbook']['_edit']['First']
As a convenience, entering a path like::
todo metadata created|creator|revised
would create::
v.u.['todo']['metadata']['_edit']['created']
v.u.['todo']['metadata']['_edit']['creator']
v.u.['todo']['metadata']['_edit']['revised']
**Technical details**
See the source for complete documentation for use with other
plugins. Here are some points of interest:
- In addition to ``v.u['addressbook']['_edit']['first']``, paths
like ``v.u['addressbook']['_edit']['_int']['age']`` may be used
to identify type, although currently there's no difference in
the edit widget.
- In the future the plugin may allow other plugins to register
to provide attribute path information, instead of just
scanning for ['_edit'] entries in v.u.
- Currently there's no sorting of the attributes in "v.u mode", which is
a problem for some applications. It's unclear where the
desired order would be stored, without even more repetition
in v.u. When other plugins can register to manipulate the
attribute list each plugin could address this, with unordered
presentation in the absence of the client plugin.
"""
#@-<< docstring >>
# Written by TNB.
from leo.core import leoGlobals as g
from leo.core.leoQt import isQt6, QtConst, QtCore, QtWidgets
from leo.core.leoQt import DialogCode, Orientation
#
# Fail fast, right after all imports.
g.assertUi('qt') # May raise g.UiTypeException, caught by the plugins manager.
#@+others
#@+node:tbrown.20091009210724.10975: ** init
def init():
"""Return True if the plugin has loaded successfully."""
if g.app.gui.guiName() != "qt":
print('attrib_edit.py plugin not loading because gui is not Qt')
return False
g.registerHandler('after-create-leo-frame', onCreate)
g.plugin_signon(__name__)
return True
#@+node:tbrown.20091009210724.10976: ** onCreate
def onCreate(tag, key):
c = key.get('c')
attrib_edit_Controller(c)
#@+node:tbrown.20091103080354.1400: ** class AttributeGetter
class AttributeGetter:
implementations = []
typeMap = {
'_int': int,
'_float': float,
'_bool': bool,
}
@classmethod
def register(cls, subclass):
cls.implementations.append(subclass)
def __init__(self, c):
self.c = c
def name(self):
return "ABSTRACT VIRTUAL BASE CLASS"
def getAttribs(self, v):
raise NotImplementedError
def setAttrib(self, v, path, value):
raise NotImplementedError
def delAttrib(self, v, path):
raise NotImplementedError
def helpCreate(self):
"""either a string telling user how to add an attribute, or
True if the Getter needs to help the user create an attribute"""
return "ABSTRACT VIRTUAL BASE CLASS"
def longDescrip(self, path):
"""give the long description of the attribute on path 'path'.
ASSUMES: path determines name
E.g. attribute named 'count' might be described as 'address.people.count'
"""
raise NotImplementedError
#@+node:tbrown.20091103080354.1402: ** class AttributeGetterUA
class AttributeGetterUA(AttributeGetter):
#@+others
#@+node:tbrown.20091103080354.1409: *3* recSearch
def recSearch(self, d, path, ans):
"""recursive search of tree of dicts for values whose
key path is like [*][*][*]['_edit'][*] or
[*][*][*]['_edit']['_int'][*]
Modifies list ans
"""
for k in d:
if isinstance(d[k], dict):
if k not in ('_edit', '_view'):
self.recSearch(d[k], path + [k], ans)
else:
# k == '_edit' or '_view'
for ek in d[k]:
if ek in self.typeMap:
# ek is '_int' or similar
type_ = self.typeMap[ek]
for ekt in d[k][ek]:
ans.append((self,
ekt, d[k][ek][ekt], tuple(path + ['_edit', ek, ekt]),
type_, k != '_edit'))
else:
ans.append((self,
ek, d[k][ek], tuple(path + ['_edit', ek]), str, k != '_edit'))
#@+node:tbrown.20091103080354.1410: *3* getAttribs
def getAttribs(self, v):
"""Return a list of tuples describing editable uAs.
(class, name, value, path, type, readonly)
e.g.
(AttributeGetterUA, 'created', '2009-09-23', ('stickynotes','_edit','created'), str, False),
(AttributeGetterUA, 'cars', 2, ('inventory','_edit','_int','cars'), int, False)
Changes should be written back to
v.uA['stickynotes']['_edit']['created'] and
v.uA['inventory']['_edit']['_int']['cars'] respectively
"""
ans = []
d = v.u
self.recSearch(d, [], ans)
return ans
#@+node:tbrown.20091103080354.1430: *3* setAttrib
def setAttrib(self, v, path, value):
"""copy value into dict a on path,
e.g. a['one']['more']['level'] = value
"""
a = v.u
for i in path[:-1]:
a = a.setdefault(i, {})
a[path[-1]] = value
#@+node:tbrown.20091103080354.1438: *3* delAttrib
def delAttrib(self, v, path):
a = v.u
for i in path[:-1]:
try:
a = a[i]
except KeyError:
return
try:
del a[path[-1]]
except KeyError:
pass
#@+node:tbrown.20091103080354.1411: *3* name
def name(self):
return "v.u mode"
#@+node:tbrown.20091103080354.1431: *3* helpCreate
def helpCreate(self):
"""does the Getter need to help the user create an attribute?"""
return True
#@+node:tbrown.20091103080354.1432: *3* createAttrib
def createAttrib(self, v, gui_parent=None):
path, ok = QtWidgets.QInputDialog.getText(gui_parent,
"Enter attribute path",
"Enter path to attribute (space separated words)")
ns = str(path).split()
if not ok or not ns:
g.es("Cancelled")
return
#FIXME type_ = {True: '_view', False: '_edit'}[readonly]
type_ = '_edit'
if '|' in ns[-1]:
nslist = [ns[:-1] + [i.strip()] for i in ns[-1].split('|')]
else:
nslist = [ns]
for ns in nslist:
if type_ not in ns:
ns.insert(-1, type_)
self.setAttrib(v, ns, '')
#FIXME self.attrPaths.add(tuple(ns))
#@+node:tbrown.20091103080354.1433: *3* longDescrip
def longDescrip(self, path):
return '.'.join([j for j in path if j not in ('_edit', '_view')])
#@-others
AttributeGetter.register(AttributeGetterUA)
#@+node:tbrown.20091103080354.1420: ** class AttributeGetterAt
class AttributeGetterAt(AttributeGetter):
#@+others
#@+node:tbrown.20091103080354.1422: *3* getAttribs
def getAttribs(self, v):
"""Return a list of tuples describing editable uAs.
(class, name, value, path, type, readonly)
e.g.
(AttributeGetterUA, 'created', '2009-09-23', ('stickynotes','_edit','created'), str, False),
(AttributeGetterUA, 'cars', 2, ('inventory','_edit','_int','cars'), int, False)
Changes should be written back to
v.uA['stickynotes']['_edit']['created'] and
v.uA['inventory']['_edit']['_int']['cars'] respectively
"""
ans = []
for n in v.children:
if n.h and n.h[0] == '@' and ('A' <= n.h[1] <= 'Z'):
words = n.h[1:].split(None, 1)
if not words:
continue
if len(words) == 1:
words.append('')
ans.append((self, words[0], words[1], words[0], str, False))
return ans
#@+node:tbrown.20091103080354.6237: *3* setAttrib
def setAttrib(self, v, path, value):
for n in v.children:
if n.h[0] == '@' and ('A' <= n.h[1] <= 'Z'):
words = n.h[1:].split(None, 1)
if len(words) == 1:
words.append('')
if words[0] == path:
n.h = "@%s %s" % (path, value)
break
else:
p = self.c.vnode2position(v)
n = p.insertAsLastChild()
n.h = "@%s %s" % (path, value)
#@+node:tbrown.20091103080354.6244: *3* delAttrib
def delAttrib(self, v, path):
for n in v.children:
if n.h[0] == '@' and ('A' <= n.h[1] <= 'Z'):
words = n.h[1:].split(None, 1)
if not words:
continue
if words[0] == path:
p = self.c.vnode2position(n)
p.doDelete()
break
#@+node:tbrown.20091103080354.1423: *3* name
def name(self):
return "@Child"
#@+node:tbrown.20091103080354.1443: *3* helpCreate
def helpCreate(self):
return "Add a child named '@AttributeName'"
#@+node:tbrown.20091103080354.1435: *3* longName
def longDescrip(self, path):
return path
#@-others
AttributeGetter.register(AttributeGetterAt)
#@+node:tbrown.20091103080354.1427: ** class AttributeGetterColon
class AttributeGetterColon(AttributeGetter):
#@+others
#@+node:tbrown.20091103080354.1428: *3* getAttribs
def getAttribs(self, v):
ans = []
parts = v.b.split('\n', 100)
for i in parts[:99]:
if not i or i[0].isspace():
continue
words = i.split(None, 1)
if words and words[0] and words[0][-1] == ':':
if len(words) == 1:
words.append('')
ans.append((self, words[0][:-1], words[1], words[0][:-1], str, False))
return ans
#@+node:tbrown.20091103080354.6246: *3* setAttrib
def setAttrib(self, v, path, value):
parts = v.b.split('\n', 100)
for n, i in enumerate(parts[:99]):
words = i.split(None, 1)
if words and words[0] and words[0][-1] == ':' and words[0][:-1] == path:
parts[n] = "%s: %s" % (path, value)
v.b = '\n'.join(parts)
break
else:
v.b = "%s: %s\n%s" % (path, value, v.b)
#@+node:tbrown.20091103080354.6248: *3* delAttrib
def delAttrib(self, v, path):
parts = v.b.split('\n', 100)
for n, i in enumerate(parts[:99]):
words = i.split(None, 1)
if words and words[0] and words[0][-1] == ':' and words[0][:-1] == path:
del parts[n]
v.b = '\n'.join(parts)
break
#@+node:tbrown.20091103080354.1429: *3* name
def name(self):
return "Field:"
#@+node:tbrown.20091103080354.1441: *3* helpCreate
def helpCreate(self):
return "Add 'AttributeName:' to the text"
#@+node:tbrown.20091103080354.1437: *3* longName
def longDescrip(self, path):
return path
#@-others
AttributeGetter.register(AttributeGetterColon)
#@+node:tbrown.20091028131637.1353: ** class ListDialog
class ListDialog(QtWidgets.QDialog):
#@+others
#@+node:tbrown.20091028131637.1354: *3* __init__ (attrib_edit.py)
def __init__(self, parent, title, text, entries):
self.entries = entries
super().__init__(parent)
vbox = QtWidgets.QVBoxLayout()
sa = QtWidgets.QScrollArea()
salo = QtWidgets.QVBoxLayout()
frame = QtWidgets.QFrame()
frame.setLayout(salo)
self.buttons = []
for entry in entries:
hbox = QtWidgets.QHBoxLayout()
cb = QtWidgets.QCheckBox(entry[0])
self.buttons.append(cb)
if entry[1]:
cb.setChecked(True if isQt6 else QtConst.Checked)
hbox.addWidget(cb)
salo.addLayout(hbox)
sa.setWidget(frame)
vbox.addWidget(sa)
hbox = QtWidgets.QHBoxLayout()
ok = QtWidgets.QPushButton("Ok")
cancel = QtWidgets.QPushButton("Cancel")
ok.clicked.connect(self.writeBack)
cancel.clicked.connect(self.reject)
# QtCore.QObject.connect(ok, QtCore.SIGNAL('clicked(bool)'), self.writeBack)
# QtCore.QObject.connect(cancel, QtCore.SIGNAL('clicked(bool)'), self.reject)
hbox.addWidget(ok)
hbox.addWidget(cancel)
vbox.addLayout(hbox)
self.setLayout(vbox)
#@+node:tbrown.20091028131637.1359: *3* writeBack
def writeBack(self, event=None):
for n, i in enumerate(self.buttons):
self.entries[n][1] = (i.isChecked())
self.accept()
#@-others
#@+node:tbrown.20091010211613.5257: ** class editWatcher
class editWatcher:
"""class to supply widget for editing attribute and handle
its textChanged signal"""
def __init__(self, c, v, class_, name, value, path, type_):
"""v - node whose attribute we edit
name - name of edited attribute
value - initial value of edited attribute
path - dictionary key path to attribute in v.u
type_ - attribute type
"""
self.c = c
self.v = v
self.class_ = class_
self.name = name
self.value = value
self.path = path
self.type_ = type_
self._widget = None
def widget(self):
"""return widget for editing this attribute"""
if not self._widget:
self._widget = w = QtWidgets.QLineEdit(str(self.value))
w.textChanged.connect(self.updateValue)
self._widget.focusOutEvent = self.lostFocus
# see lostFocus()
return self._widget
def updateValue(self, newValue):
"""copy value from widget to v.u"""
self.class_.setAttrib(self.v, self.path, self.type_(newValue))
self.v.setDirty()
def lostFocus(self, event):
"""Can activate this in in widget(), but it stops tabbing through
the attributes - unless we can check that none of our siblings
has focus..."""
sibs = self._widget.parent().findChildren(QtWidgets.QLineEdit)
for i in sibs:
if i.hasFocus():
break
else:
self.c.redraw()
#X def setValue(a, path, value):
#X """copy value into dict a on path,
#X e.g. a['one']['more']['level'] = value
#X """
#X for i in path[:-1]:
#X a = a.setdefault(i, {})
#X a[path[-1]] = value
#@+node:tbrown.20091009210724.10979: ** class attrib_edit_Controller
class attrib_edit_Controller:
"""A per-commander class that manages attribute editing."""
#@+others
#@+node:tbrown.20091009210724.10981: *3* __init__ & reloadSettings (attrib_edit_Controller)
def __init__(self, c):
self.c = c
c.attribEditor = self
self.pname = "_attrib_edit_frame" # used to tag out panel
self.reloadSettings()
self.attrPaths = set() # set of tuples (getter-class, path)
self.handlers = [
('select3', self.updateEditor),
]
for i in self.handlers:
g.registerHandler(i[0], i[1])
# 'body' or 'tab' mode
# self.guiMode = c.config.getString('attrib-edit-placement') or 'tab'
self.guiMode = 'tab'
# body mode in not compatible with nested_splitter, causes hard crash
if self.guiMode == 'body':
self.holder = QtWidgets.QSplitter(Orientation.Vertical)
self.holder.setMinimumWidth(300)
parent = c.frame.top.leo_body_frame.parent()
self.holder.addWidget(c.frame.top.leo_body_frame)
parent.addWidget(self.holder)
self.parent = self.holder
elif self.guiMode == 'tab':
self.parent = QtWidgets.QFrame()
self.holder = QtWidgets.QHBoxLayout()
self.parent.setLayout(self.holder)
c.frame.log.createTab('Attribs', widget=self.parent)
def reloadSettings(self):
c = self.c
c.registerReloadSettings(self)
active = c.config.getData('attrib_edit_active_modes') or []
self.getsetters = []
for i in AttributeGetter.implementations:
s = i(c)
self.getsetters.append([s, (s.name() in active)])
if not active:
self.getsetters[0][1] = True # turn on the first one
#@+node:tbrown.20091009210724.10983: *3* __del__
def __del__(self):
for i in self.handlers:
g.unregisterHandler(i[0], i[1])
#@+node:tbrown.20091009210724.11210: *3* initForm
def initForm(self):
"""set up self.form, the blank form layout before adding edit widgets"""
self.editors = []
w = self.holder
for i in w.parent().findChildren(QtCore.QObject):
if i.objectName() == self.pname:
i.hide()
i.deleteLater()
pnl = QtWidgets.QFrame()
pnl.setObjectName(self.pname)
self.form = QtWidgets.QFormLayout()
self.form.setVerticalSpacing(0)
pnl.setLayout(self.form)
pnl.setAutoFillBackground(True)
w.addWidget(pnl)
#@+node:tbrown.20091009210724.11047: *3* updateEditor
def updateEditor(self, tag, k):
"""update edit panel when new node selected"""
if k['c'] != self.c:
return # not our problem
self.updateEditorInt()
#@+node:tbrown.20091028100922.1493: *3* updateEditorInt
def updateEditorInt(self):
c = self.c
self.initForm()
for attr in self.getAttribs():
class_, name, value, path, type_, readonly = attr
if readonly:
self.form.addRow(QtWidgets.QLabel(name), QtWidgets.QLabel(str(value)))
else:
editor = editWatcher(c, c.currentPosition().v, class_, name, value, path, type_)
self.editors.append(editor)
self.form.addRow(QtWidgets.QLabel(name), editor.widget())
#@+node:tbrown.20091103080354.1405: *3* recSearch (not used)
# def JUNKrecSearch(self, d, path, ans):
# """recursive search of tree of dicts for values whose
# key path is like [*][*][*]['_edit'][*] or
# [*][*][*]['_edit']['_int'][*]
# Modifies list ans
# """
# for k in d:
# if isinstance(d[k], dict):
# if k not in ('_edit', '_view'):
# self.recSearch(d[k], path+[k], ans)
# else:
# # k == '_edit' or '_view'
# for ek in d[k]:
# if ek in self.typeMap:
# # ek is '_int' or similar
# type_ = self.typeMap[ek]
# for ekt in d[k][ek]:
# ans.append((ekt, d[k][ek][ekt], tuple(path+['_edit',ek,ekt]),
# type_, k != '_edit'))
# else:
# ans.append((ek, d[k][ek], tuple(path+['_edit',ek]), str, k != '_edit'))
#@+node:tbrown.20091103080354.1406: *3* getAttribs
def getAttribs(self, v=None):
"""Return a list of tuples describing editable uAs.
(class, name, value, path, type, readonly)
e.g.
(class, 'created', '2009-09-23', ('stickynotes','_edit','created'), str, False),
(class, 'cars', 2, ('inventory','_edit','_int','cars'), int, False)
Changes should be written back to
v.uA['stickynotes']['_edit']['created'] and
v.uA['inventory']['_edit']['_int']['cars'] respectively
"""
ans = []
if not v:
v = self.c.currentPosition().v
for getter, isOn in self.getsetters:
if not isOn:
continue
ans.extend(getter.getAttribs(v))
for ns in ans:
self.attrPaths.add((ns[0], ns[1], ns[3])) # class, name, path
return ans
#@+node:tbrown.20091029101116.1413: *3* addAttrib
def addAttrib(self, attrib):
attrib[0].setAttrib(self.c.currentPosition().v, attrib[2], '')
#@+node:tbrown.20091029101116.1414: *3* delAttrib
def delAttrib(self, attrib):
attrib[0].delAttrib(self.c.currentPosition().v, attrib[2])
#@+node:tbrown.20091029101116.1424: *3* scanAttribs
def scanAttribs(self):
"""scan all of c for attrbutes"""
for v in self.c.all_unique_nodes():
self.getAttribs(v) # updates internal list of attribs
g.es("%d attributes found" % len(self.attrPaths))
#@+node:tbrown.20091011151836.14788: *3* createAttrib
def createAttrib(self, event=None, readonly=False):
ans = []
for getter, isOn in self.getsetters:
if not isOn:
continue
if getter.helpCreate() is True:
ans.append(getter)
else:
g.es("For '%s' attributes:\n %s" % (getter.name(), getter.helpCreate()))
if len(ans) > 1:
g.error('Eror: more than one attribute type (%s) active' %
', '.join([i.name() for i in ans]))
elif ans:
ans[0].createAttrib(self.c.currentPosition().v, gui_parent=self.parent)
self.updateEditorInt()
self.c.currentPosition().v.setDirty()
self.c.redraw()
#@+node:tbrown.20091028131637.1358: *3* manageAttrib
def manageAttrib(self):
attribs = [(i[0], i[1], i[3]) for i in self.getAttribs()]
dat = []
for attr in self.attrPaths:
txt = attr[0].longDescrip(attr[2])
active = attr in attribs
dat.append([txt, active, attr])
if not dat:
g.es('No attributes seen (yet)')
return
dat.sort(key=lambda x: x[0])
res = ListDialog(self.parent, "Enter attribute path",
"Enter path to attribute (space separated words)", dat)
res.exec_()
if res.result() == DialogCode.Rejected:
return
# check for deletions
for i in dat:
if i[2] in attribs and not i[1]:
res = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Question,
"Really delete attributes?", "Really delete attributes?",
QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel, self.parent)
if res.exec_() == QtWidgets.QMessageBox.Cancel:
return
break
# apply changes
for i in dat:
if i[2] in attribs and not i[1]:
self.delAttrib(i[2])
elif i[2] not in attribs and i[1]:
self.addAttrib(i[2])
self.updateEditorInt()
self.c.redraw()
#@+node:tbrown.20091103080354.1415: *3* manageModes
def manageModes(self):
modes = [[i[0].name(), i[1]] for i in self.getsetters]
res = ListDialog(self.parent, "Enter attribute path",
"Enter path to attribute (space separated words)",
modes)
res.exec_()
if res.result() == DialogCode.Rejected:
return
for n, i in enumerate(modes):
self.getsetters[n][1] = i[1]
self.updateEditorInt()
#@-others
#@+node:tbrown.20091029101116.1415: ** cmd_Modes (attrib_edit_Controller)
@g.command('attrib-edit-modes')
def cmd_Modes(event):
c = event.get('c')
c.attribEditor.manageModes()
#@+node:tbrown.20091103080354.1413: ** cmd_Manage (attrib_edit_Controller)
@g.command('attrib-edit-manage')
def cmd_Manage(event):
c = event.get('c')
c.attribEditor.manageAttrib()
#@+node:tbrown.20091029101116.1419: ** cmd_Create (attrib_edit_Controller)
@g.command('attrib-edit-create')
def cmd_Create(event):
c = event.get('c')
c.attribEditor.createAttrib()
#@+node:tbrown.20091029101116.1421: ** cmd_CreateReadonly (attrib_edit_Controller)
def Xcmd_CreateReadonly(c):
c.attribEditor.createAttrib(readonly=True)
#@+node:tbrown.20091029101116.1426: ** cmd_Scan (attrib_edit_Controller)
@g.command('attrib-edit-scan')
def cmd_Scan(event):
c = event.get('c')
c.attribEditor.scanAttribs()
#@-others
#@@language python
#@@tabwidth -4
#@-leo
|
py | 1a4efb7a5fc37dbd8b6d2e13bbabca7c817aa116 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 15:08:07 2018
@author: joshu
"""
import math
# Because you used the import you access methods by referencing the module
print("ceil(4.4) = ", math.ceil(4.4))
print("floor(4.4) = ", math.floor(4.4))
print("fabs(-4.4) = ", math.fabs(-4.4))
# Factorial = 1 * 2 * 3 * 4
print("factorial(4) = ", math.factorial(4))
# Return remainder of division
print("fmod(5,4) = ", math.fmod(5,4))
# Receive a float and return an int
print("trunc(4.2) = ", math.trunc(4.2))
# Returns x^y
print("pow(2,2) = ", math.pow(2,2))
# Return the square root
print("sqrt(4) = ", math.sqrt(4))
# Special Values
print("math.e = ", math.e)
print("math.pi = ", math.pi)
# Return e^x
print("exp(4) = ", math.factorial(4))
# Return the natural log e * e * e ~= 20 so log(20) tells
# you that e^3 ~= 20
print("log(20) = ", math.log(20))
# You can define the base and 10^3 = 1000
print("log(1000,10) = ", math.log(1000,10))
# You can also use base 10 like this
print("log10(1000) = ", math.log10(1000))
# We have the following trif functions
# sin, cos, tan, asin, acos, atan, atan2, asinh, acosh,
# atanh, sinh, cosh, tanh
# Convert radians to degrees and vice versa
print("degrees(1.5708) = ", math.degrees(1.5708))
print("radians(90) = ", math.radians(90)) |
py | 1a4efbee70ca3e5aee929f3363d4ac393b947a6e | """
pommesdispatch
---------------
A bottom-up fundamental power market model for the German electricity sector
"""
__version__ = "0.1.0"
__author__ = (
"Johannes Kochems, Yannick Werner, "
"Johannes Giehl, Benjamin Grosse"
)
__credits__ = (
"Sophie Westphal, Flora von Mikulicz-Radecki, Carla Spiller, "
"Fabian Büllesbach, Timona Ghosh, Paul Verwiebe, "
"Leticia Encinas Rosa, Joachim Müller-Kirchenbauer"
)
|
py | 1a4efd1a32e9606d09bed738aaf1dc2c56e6e5c4 | #coding: utf-8
""" git branch [-r | -a] [--abbrev=n | --no-abbrev\n
git branch [--set-upstream | --track | --no-track] [-l][-f] <branchname> <startpoint>
git branch (-m | -M) [<oldbranch>] <newbranch>
git branch (-d | -D) [-r] <branchname>…
git branch --edit-description [<branchname>]"""
import sys,os
import dulwich
from dulwich import porcelain
from dulwich.walk import Walker
from gittle import Gittle
import argparse
from git.gitutils import _get_repo, find_revision_sha, is_ancestor, merge_base, can_ff, any_one, count_commits_between, get_remote_tracking_branch, GitError
def branch(args):
repo=_get_repo()
parser = argparse.ArgumentParser(prog='git branch'
, description="List, create, or delete branches")
#list
list_grp=parser.add_mutually_exclusive_group(required= False)
list_grp.add_argument('-r','--remotes',action='store_true',help='list or delete remotep tracking branches')
list_grp.add_argument('-a','--all',action='store_true',help='list both remote and local branches')
# move type commands
move_type=parser.add_mutually_exclusive_group(required=False)
move_type.add_argument('-m','--move', nargs='+', metavar=('[oldbranch]','newbranch'), help='move/rename oldbranch or HEAD')
move_type.add_argument('-M',nargs='+',metavar=('[oldbranch]','newbranch'),help='move/rename even if branch already exists')
# delete type commands
delete_flags=parser.add_mutually_exclusive_group(required=False)
delete_flags.add_argument('-d','--delete', nargs=1, metavar=('branchname'), help='delete branchname,TODO: branch must be fully merged with upstream ')
delete_flags.add_argument('-D',nargs=1,metavar=('branchname'),help='Delete a branch irrespective of its merged status.')
# misc flags
parser.add_argument('-v','--verbose',action='count', help='When in list mode, show sha1 and commit subject line for each head, along with relationship to upstream branch (if any). If given twice, print the name of the upstream branch, as well (see also git remote show <remote>).')
parser.add_argument('-f','--force',action='store_true', help='Reset <branchname> to <startpoint> if <branchname> exists already. Without -f git branch refuses to change an existing branch.')
abbrevgrp=parser.add_mutually_exclusive_group()
abbrevgrp.add_argument('--abbrev',action='store',nargs='?',help='set number of characters to display in sha',type=int,default=7)
abbrevgrp.add_argument('--no-abbrev',action='store_const',help='do not abbreviate sha ',const=40,dest='abbrev')
track_flags=parser.add_mutually_exclusive_group(required=False )
track_flags.add_argument('--set-upstream',action='store', nargs=2, metavar=('branchname','upstream') ,help='set branchname to track upstream')
track_flags.add_argument('--no-track', nargs='+',metavar=('branchname','startpoint'),help='set existing branch to not track, or create new branch that doesnt track')
# add_branch
parser.add_argument('branchname',nargs='?')
parser.add_argument('startpoint',nargs='?')
parser.add_argument('--edit_description',action='store',nargs='?',metavar='branchname', const=repo.active_branch)
result = parser.parse_args(args)
# combine args
edit_description=result.edit_description
delete_branchname=result.delete or result.D
move_branchname = result.move or result.M
no_track=result.no_track
add_branchname = (result.branchname, result.startpoint or repo.active_branch)
set_upstream= result.set_upstream
force = result.force or result.D or result.M
mutual_exclusive_list=( delete_branchname,
move_branchname,
edit_description,
result.branchname,
set_upstream,
no_track)
list_flag=not any_one(mutual_exclusive_list)
if not any_one((list_flag,)+ mutual_exclusive_list):
raise GitError('too many options specified.\n'+parser.print_help())
if list_flag:
branch_list(result)
elif delete_branchname:
delete_branch(delete_branchname[0], force , result.remotes, result.verbose)
elif move_branchname:
move_branch(move_branchname, force, result.verbose)
elif add_branchname[0]:
create_branch(add_branchname[0],add_branchname[1],force,False )
elif edit_description:
edit_branch_description(edit_description)
elif set_upstream:
add_tracking(set_upstream[0], *( ['origin']+set_upstream[1].split('/'))[-2:])
print set_upstream[0], format_tracking_branch_desc(repo,set_upstream[0])
elif no_track:
if len(no_track)==1:
remove_tracking(no_track[0])
else:
create_branch(no_track[0],no_track[1],force,True)
#print result
def format_tracking_branch_desc(repo,branchname):
try:
remote=get_remote_tracking_branch(repo,branchname)
mysha=repo.branches[branchname]
theirsha=repo.remote_branches[remote]
ahead,behind=count_commits_between(repo,mysha, theirsha)
return '+{}/-{} relative to {} ({})'.format(ahead,behind,remote,theirsha)
except KeyError:
return ''
def edit_branch_description(branchname, description=None):
description = description or raw_input('enter description:')
config = _get_repo().repo.get_config()
if not branchname in _get_repo().branches:
GitError('{} is not an existing branch'.format(branchname))
config.set(('branch',branchname),'description',description)
config.write_to_path()
def branch_list(result):
# TODO: tracking branches
N=result.abbrev
repo = _get_repo()
if not result.remotes:
for key,value in repo.branches.iteritems():
dispval=value[0:N] #todo, --abbrev=n
commitmsg=(repo[value].message if result.verbose else '').strip()
tracking=get_remote_tracking_branch(repo,key)
trackmsg=''
diffmsg=trackingsha=''
if tracking:
trackingsha=repo.remote_branches[tracking]
ahead,behind= count_commits_between(repo,value,trackingsha)
diffmsg='+{}/-{} compare to'.format(ahead,behind) if result.verbose else ''
trackmsg='[{} {} {}]'.format(diffmsg,tracking,trackingsha[0:N])
print ('* ' if repo.active_branch == key else '') + key, dispval, trackmsg, commitmsg
if result.remotes or result.all:
for key, value in repo.remote_branches.iteritems():
dispval=value[0:N] #todo, --abbrev=n
commitmsg=(repo[value].message if result.verbose else '').strip()
print ('* ' if repo.active_branch == key else '') + key, dispval, commitmsg
def delete_branch(delete_branchname,force=False,remote=None, verbose=0):
'''delete a branch.
if remote=True, then look in refs/remotes, otherwise check refs/heads
for local, check if it has a remote tracking branch, and only allow delete if upstream has merged
'''
print 'delete',delete_branchname,force,remote
repo=_get_repo()
if remote:
qualified_branch=repo._format_ref_remote(delete_branchname)
else:
qualified_branch=repo._format_ref_branch(delete_branchname)
if delete_branchname == repo.active_branch:
GitError('Cannot delete active branch. ')
remote_tracking_branch=get_remote_tracking_branch(repo,delete_branchname)
if remote_tracking_branch and not force:
#see if local is ahead of remote
commits_ahead=count_commits_between(repo,
repo.refs[qualified_branch],
repo.remote_branches[remote_tracking_branch]
)[0]
if commits_ahead:
raise GitError('{0} is ahead of {1} by {2} commits.\nuse git branch -D\n'.format(delete_branchname,
remote_tracking_branch,
commits_ahead))
print 'removing {} (was {})\n'.format(delete_branchname,repo.refs[qualified_branch])
del repo.repo.refs[qualified_branch]
if not remote:
remove_tracking(delete_branchname)
#todo reflog
def move_branch(movebranch,force,verbose):
'''move oldbranch (or active_branch) to newbranch. update config if needed'''
repo=_get_repo()
oldbranch,newbranch=([repo.active_branch]+movebranch)[-2:]
if oldbranch not in repo.branches:
raise GitError('{} does not exist in branches'.format(oldbranch))
if newbranch in repo.branches and not force:
raise GitError('{} already exists. use -M to force overwriting'.format(newbranch))
if newbranch != oldbranch:
print 'Renaming {} ({}) to {}\n'.format(oldbranch,repo.branches[oldbranch],newbranch)
repo.add_ref(repo._format_ref_branch(newbranch),repo._format_ref_branch(oldbranch))
del repo.repo.refs[repo._format_ref_branch(oldbranch)]
#todo: reflog
if oldbranch == repo.active_branch:
repo.active_branch=newbranch
def remove_tracking(branchname):
'''remove branch entry from config'''
# Get repo's config
config = _get_repo().repo.get_config()
try:
del config[('branch', branchname)]['remote']
del config[('branch', branchname)]['merge']
if not config[('branch', branchname)]:
del config[('branch', branchname)]
except KeyError:
pass
# Write to disk
config.write_to_path()
def add_tracking(branchname, remote, remotebranch):
# Get repo's config
config = _get_repo().repo.get_config()
# Add new entries for remote
config.set(('branch', branchname), 'remote', remote)
config.set(('branch', branchname), 'merge', 'refs/heads/'+remotebranch)
# Write to disk
config.write_to_path()
def create_branch(new_branch, base_rev, force=False ,no_track=False ):
"""Try creating a new branch which tracks the given remote
if such a branch does not exist then branch off a local branch
"""
repo=_get_repo()
# Already exists
if new_branch in repo.branches:
if not force:
raise GitError("branch %s already exists\n use --force to overwrite anyway" % new_branch)
# fork with new sha
new_ref = repo._format_ref_branch(new_branch)
base_sha=find_revision_sha(repo,base_rev)
repo.repo.refs[new_ref] = base_sha
#handle tracking, only if this was a remote
tracking,remote_branch =( ['origin']+base_rev.split('/'))[-2:] #branch-> origin/branch. remote/branch stays as is
qualified_remote_branch=os.path.sep.join([tracking,remote_branch])
if qualified_remote_branch in repo.remote_branches and not base_rev in repo.branches:
if not no_track:
add_tracking(new_branch,tracking,remote_branch)
else:
remove_tracking(new_branch)
#todo reflog
return new_ref
def test():
import os
os.chdir('../..')
def run(cmd):
print 'branch ', cmd
branch(cmd.split())
print ''
#run('-d test')
run('')
run('-f test origin/master')
run('')
print 'delete test: should delete'
run('-d test')
print 'set to remote'
run('test origin/master')
run('-v')
try:
run('test dev')
except GitError:
pass
else:
print 'did not error!'
run('-f test dev')
run('-v')
run('-m test test2')
if __name__=='__main__':
branch(sys.argv[1:]) |
py | 1a4efd62b8fabb7d8074a58859fb780336076565 | """
This file offers the methods to automatically retrieve the graph Listeria monocytogenes Scott.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ListeriaMonocytogenesScott(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Listeria monocytogenes Scott graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Listeria monocytogenes Scott graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ListeriaMonocytogenesScott",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
bzl | 1a4efda44231fbe3f6973d17ee854fa10e5412a4 | # This file is automatically updated by emsdk/scripts/update_bazel_workspace.sh
# DO NOT MODIFY
EMSCRIPTEN_TAGS = {
"3.1.9": struct(
hash = "edabe25af34554d19c046078f853999b074259ca",
sha_linux = "89fa75c981e47ad19942b8236d2604b2666dfd516a08626aaa1bfb0d657c87bf",
sha_mac = "6c7f59dd84d1484f1dfa041d71cc79fc97db8d15834b6220e5868bd9bd373a24",
sha_mac_arm64 = "13a258de0daaa3c09a53e21a67414cbf5fa5706f955767fe791a059ed5eb90bf",
sha_win = "0857b03919b948558f9a57d15cf2b220852cc070359c386da0e6e4831c7ac5e0",
),
"3.1.8": struct(
hash = "8c9e0a76ebed2c5e88a718d43e8b62452def3771",
sha_linux = "6b170777eb523e62972ad458e533b1853cd0c4e02f6f2cf4cd68e109499ccd9b",
sha_mac = "ede01fe160c3b8443f53f94dbad530e0e7e8197a1b874c7bb9038b187279080c",
sha_mac_arm64 = "9ecc8678f948875e7f64defeababc0320f98e103547f395c390c01d76e5a1d64",
sha_win = "039d27d4ae43b50d0858dbc4dcf412f572351e98e1056d7fdcdf2aab1740557e",
),
"3.1.7": struct(
hash = "d0e637fe48197587d981f79e8114757731d0c2a9",
sha_linux = "d941738a3c755d6d530bab66d38325515b9dbaa588d2db2b8a63b2a8a1961e52",
sha_mac = "597aacdb25d422094427014d3a97e8b91ec80df2255a66e0986414bf71aaf37d",
sha_mac_arm64 = "a0b2db0269c55e854d1007a59f95b8e5f14d32309e76f985ea9afe481b2bd6e6",
sha_win = "cb44339db27b694862efb37539d41eaff7253c93c0882cf7d9aaf4afeaa82912",
),
"3.1.6": struct(
hash = "8791c3e936141cbc2dd72d76290ea9b2726d39f3",
sha_linux = "f43dfe707dff18fa7a08dbfe2fa3f8d46afb65ccba9bbe554465d83d5d80e388",
sha_mac = "13a01080ff042560b9a9b1b2c9fc5f8c154710bc41db8bbd907a9e53c286afd0",
sha_mac_arm64 = "7ae97e85593b037c345b539e7f8b8952b82c001be982219060c83f0834bb6827",
sha_win = "e7005c0a5439e532cb64f34ba90405792288a1ed8845cdafcedd3de5af6fd3f2",
),
"3.1.5": struct(
hash = "2dee36c7163f7394ab9341854ef5281501dd97d0",
sha_linux = "6641703b7da1805aa5a8488d231ae7fedfe27f1a5a33e7d05a2ee5902ab84180",
sha_mac = "9dba57f09702a7eed53f3f71cdd8a4ed1202ca5a5f4449249c2d98a285b26f75",
sha_mac_arm64 = "0093b4d47c9eb9c8bab5b3048c68855255b5e5a8bfd78f4183424009489327e6",
sha_win = "849edc42b494f670df4763dbc8ebbb5464ac28787482668c3f6e27588a77cb3a",
),
"3.1.4": struct(
hash = "39e60dda6945cfcd6487725bdb1361ae7975173f",
sha_linux = "4a57c0d60eeb4e021de61c8497f0b595a0a9db0235f1640a528de752409f4fcf",
sha_mac = "f28a9a4f42f67de1d5c4d8a288f29e5082bbf4fcb172e0c6e248695163372478",
sha_mac_arm64 = "be35043edad7a7022f7b174e8efc90e2db54ba4fd71288760bea4db082835f56",
sha_win = "d97ff247bdfc7e839610cbcd87d30a65018f964d183d5b852b6021d43c5d199a",
),
"3.1.3": struct(
hash = "2ddc66235392b37e5b33477fd86cbe01a14b8aa2",
sha_linux = "8b840819eb88f9178c11bad25859ce448a0559e485823a863a6add21380636ca",
sha_mac = "0cb3f9bfbcc744233eae9d20036155738409405eacf8a3d4f9beefc5919d809a",
sha_mac_arm64 = "ee2772f380419df17d154e00388a16bcddc78c7af035c16a2ee534d6ecf099aa",
sha_win = "c0549e1dbaa581ae66934c38beebd4250cd450cc2778e9a602cd9431bc81bc37",
),
"3.1.2": struct(
hash = "6626e25d6d866cf283147ca68d54ac9326fe399f",
sha_linux = "4fb53364a2ba1de8978445aa26b2204bfd215b41da5d7df04f231040b197010a",
sha_mac = "a8e347accb1ff402d96a128912ac8cda1731611c9f89095fee0ad39a6a18bbc3",
sha_mac_arm64 = "4374f5c852d0403b0a3b0e9dc8a3856a340e9d82ecf0f20aa8b36c6179d31fc8",
sha_win = "e96f6ab8252fefa42f461676311d4c4e2d96fdc2e876ece07d9d7a49ef31aef0",
),
"3.1.1": struct(
hash = "5ee64de9809592480da01372880ea11debd6c740",
sha_linux = "ba94c5ecabacbedc89665a742c37c4c132c739aea46aa66fd744cb72b260c870",
sha_mac = "8b5f8cec55af0e6816a08d8d1e8b873f96d0e0504fdd6e8deb2fc024957d1aa7",
sha_win = "6cbe976aff6155cf1c48707f0520b5aa6a7770860e9b1964bfca3e5923ce7225",
),
"3.1.0": struct(
hash = "562e3a0af169e6dea5e6dbecac2255d67c2c8b94",
sha_linux = "0714344e32e244e6d44d9ea75937633ab1338e417a232fb66d6dcd7d4b704e8c",
sha_mac = "f6c1cad729ed799e1df09eacf5aa80cce9861d69ec6d9581c17e4ba8d9b064ce",
sha_win = "756c41cbcab4ae6077cca30834d16151392b8c19ab186c13d42d7d05d6d727cc",
),
"3.0.1": struct(
hash = "91b7a67a486d2430e73423a38d950d8a550826ed",
sha_linux = "25fd430268596229c4ac38e188d7c2b31f75c2ec8172b1351d763e37c830c6af",
sha_mac = "52ec2204115b727cc4de38b5eeae147eead12b299b98e5a88653d12958cae4d4",
sha_win = "0e072736b471c9a07cdf534ba4da46b3b6545b63c8a6cbb0ef7d544251e15092",
),
"3.0.0": struct(
hash = "7fbe748230f2ce99abbf975d9ad997699efb3153",
sha_linux = "10646b64daea15354f14f89f7e79937f420b77f31bda7c4b174de2474835950f",
sha_mac = "ebb17bc91c6a72ca06d17337d27aa1a2be4c9af4c68644c221712123f663b8ab",
sha_win = "0d4f2ff5d88a8eef5ed769ee4ffc5d5574143911d2e0079325cdc5206c9e9bb1",
),
"2.0.34": struct(
hash = "d8fc1b92dbc0ce8d740a7adb937c5137ba4755e0",
sha_linux = "a6304e3a52c172eb178c6f9817d74aa3ee411e97ef00bcae0884377799c49954",
sha_mac = "975ae11000100362baf19d161fec04d82e1f7c9fb7d43c43864ddd65a47f1780",
sha_win = "8167a44bb895a0fdc153836bed91bf387be57f2dc1b8f103bf70e68923b61d39",
),
"2.0.33": struct(
hash = "cef8850d57278271766fb2163eebcb07354018e7",
sha_linux = "958a0f4b1533e877c1a5ed3c13cb8baabc80e791d45858c2c94ac62325ada953",
sha_mac = "8ecb248653d44c3748e23c089cb9f0e3d4eee7cda13fdec27ec0113b896e34c4",
sha_mac_arm64 = "1ec6f3d7afa5e10f3af996e26d9c3a66f02ae49e48e512a4b5d6b7165c61290f",
sha_win = "6b6b2831f8b338488f787b4a8c34700277bf3988358dbb54426f017155603ac9",
),
"2.0.32": struct(
hash = "74646397e3c5010824ad60d1de86c6bcbe334dff",
sha_linux = "236b3954e71d3bb30d347c655b9f47f2a091aa2e61046e1912c8da90152f4ca1",
sha_mac = "6a03267574534948e3b041e5d3e31bd757751ef17912eb6e90b96a47da03afb6",
sha_win = "2f8fbf0db097d67d0c364946faceec27c569c5c2d7b22068eef8db55645aba36",
),
"2.0.31": struct(
hash = "597724ca3f6cd6e84bea73f1f519a3953b5c273d",
sha_linux = "ef70c7733aa0df41cb4c812f5a89bf6b2ed13ca8aa252872396c0be271156d9e",
sha_mac = "77e57c3e98758488ef676f8f58a85faa0bd65a1d326a91771ad83d7cb0e373ca",
sha_win = "541605b740afccd08a39f5ae815978f699f350d621a1b2dfba0763970b56aee4",
),
"2.0.30": struct(
hash = "c69458f1bbf3ef5b8da4e934de210659cc9bca04",
sha_linux = "ee1c8270096a728966ae38af548047d1f64c18318e06ba75952e657136f02537",
sha_mac = "574a5819308eba6c8be6a780e26dff415a0e7178d3f44162dd8dca87eb40d4a7",
sha_win = "242d244f4f5f5af08e6e6ac9c143aebf1b7bb2a23fd2992350731e59acfee07c",
),
"2.0.29": struct(
hash = "c2369dc425725fff86ba90a9007a4603ddf7941b",
sha_linux = "7df4a8f3e25820becadfa7f1fe0d78e764102ec3ee50c474ca1634ed90d48890",
sha_mac = "d998521ba95882a27792f0113ea2c972fbb891c240649f4c994f0260c0e1a213",
sha_win = "c64aa3f2af6503f6711b2322986a45784e00d7c7fe13ec3f5c4f740472d065a0",
),
"2.0.28": struct(
hash = "866055ea639d64dfedc625d28ec981e47ce37168",
sha_linux = "7dca7704eb14e367bb67e9abc9eaf59e75f59b74e32422e04556de10897a9a86",
sha_mac = "370f76493e3805e2538290b698a381f04b6d78a77771e48fc0099cf89dad985f",
sha_win = "e913c50ea5f196d36971f7cf5b1cf9a9ca27ce0818aba56be3a66e31e95c0e5b",
),
"2.0.27": struct(
hash = "1ac46e3b84955231ab4a4f4cbe0c7ac28c86b8cc",
sha_linux = "3e124e278de168cf22e03b93b2f14a65a86777e428cdaab7e5e1c2289eb41605",
sha_mac = "388262b9e1042ef9a3a1945d5a23dcd634c8042a225e8fdf80bcc2c1cb7e05cc",
sha_win = "762276a332432e717afb988310d21ae10e36facc1e05bfd77042a364fb43cc3c",
),
"2.0.26": struct(
hash = "823d37b15d1ab61bc9ac0665ceef6951d3703842",
sha_linux = "996e16d368a99dd4dd12126acbcb8bea9a607b5257cc7b747c4afc2f036fd8cf",
sha_mac = "8b2d7e84cc449531e88034beb31da89a0b61ccaeaa1584ffb6da7842c6348fdc",
sha_win = "095e772764d7f8c0f8228bda4b8500ae43aac2303567da5cdc9f8623f70a5743",
),
"2.0.25": struct(
hash = "f6f001b08fbb67935379cf13d17fd9bfdbaff791",
sha_linux = "06d8e2f3d4f4b35a57de9c15e62a559c941cfba1dd7ec02353d815904d912c3b",
sha_mac = "6541bf3a648aae7df84de424ff392dd1513ab5450203c84f72a6a03e321a301b",
sha_win = "267fbfa809ec0eb911c1962b1b9768675cb82228e694a5f9ef570232ee71db76",
),
"2.0.24": struct(
hash = "6ab7fc5622a67e6111d07c4ba61c8d3c8fc33ed2",
sha_linux = "e5daa0e87f3afd2197e7975297cb0cd4c245edccb964ca5f1f32ee7d985bf440",
sha_mac = "e4b7f2a7b71d6ac4610ee7b14743570e0dfba3668dc6b4f984cbe7a135888527",
sha_win = "db2aad422a3ca2295be6101b0151eeee55dcea29ba1f31b4594c02ba46591cbe",
),
"2.0.23": struct(
hash = "77b065ace39e6ab21446e13f92897f956c80476a",
sha_linux = "7713a9a5572d839aea9eaa84a7c4779d11c6c8818ee64a0f443b62081fae6d47",
sha_mac = "b793087462d581e25c8c267fca9d30519619e3272480862a56cc316a32c7afab",
sha_win = "b8885cbb41a39e4734861462e05ee58c7ff7562016a842bcee2603f229940e8b",
),
"2.0.22": struct(
hash = "6465a9acb820207acf7da44661a7de52d0a1ae3c",
sha_linux = "c079781124e763c53c9fc73781fcee40296ce3314276836bc694f07bd331a859",
sha_mac = "ab95574dfc685b0300e37bea36aba413045bbfa2ab06b93eceb881670489eec1",
sha_win = "ba142e7e380596cba763e3a414de6511bcb86de48e4b48cf393b1ea449a24aaa",
),
"2.0.21": struct(
hash = "72f4ec97fbc7ec16c15ae68a75b0a257b2835160",
sha_linux = "741264f33f96ba4b785ed0b133861ebdfefbaefab76ddcfe7bde6522829d6f70",
sha_mac = "b07c0d65ee7e2799170c6f3b2aacebfe070c2e4975088bcd1b3a4140fecd8418",
sha_win = "dc3cbf47aa4be52a92526f1790a013734ecbd407f7f36286ed0283c96355999a",
),
"2.0.20": struct(
hash = "e0c15cd14170f407a9eb27fcbad22931dc67feb7",
sha_linux = "a196504fd1095836ca3961208338ff9e292be7729ea529bc19800aa7c966d34a",
sha_mac = "6cdbf17ed61486b38ea79d3f31d74483e7388d1e7468518dccba3f24e0ddd4c4",
sha_win = "4d22a32c219dbe18c55b635d014b9eaf7da60536171b7af37d9a8099fd33794b",
),
"2.0.19": struct(
hash = "9b9ff2dabfb4a7fbacbc004c0bead12a60f9d05c",
sha_linux = "bd7c2a38ac88d219a1ab5003ddbf8fdc66a6ba55bc69f99077346edf2753b4ea",
sha_mac = "6cc44029c9052855a55938eb6496b5659da4b1ce9cb34502b740af5993a94f93",
sha_win = "a1fa8b1c387b9307f9b87c43dc83c0ff1bc04b9f29fbe4f39aff2dd946ca4b70",
),
"2.0.18": struct(
hash = "c2ac7520fad29a7937ed60ab6a95b08eb374c7ba",
sha_linux = "e9f777de592f606b10104b2efe5179a7a8f44e3a9dffa1e3aaf73e05eb8893d7",
sha_mac = "86b1dd62e424e3788bf132292a694a25ca9b0875d06f50d0f5d424593697452c",
sha_win = "49ce07bda6be070251db44a08fcc05cae21ffdbd7522423a0c79bde635e87e28",
),
"2.0.17": struct(
hash = "f5c45e60392b82f603e3a8039c62db294fab02d2",
sha_linux = "b40a4874057e4cace600f8ee9787dcbe236e3dc5b2fff5c2ecb0e867e426f99c",
sha_mac = "081f61abf7d5ac0ec31aaffc5550013d4093ea4ea39520b7a32b7448d2a6ee70",
sha_win = "45d06e597e6a1185a76200bd0481495e7298800a4805045d9cdbcce6311c91b2",
),
"2.0.16": struct(
hash = "80d9674f2fafa6b9346d735c42d5c52b8cc8aa8e",
sha_linux = "e527638b224d9a30dc7e5fa4b9bd2eb2ab76ad306739ba8cacf5a5e333933a2a",
sha_mac = "061020eb0e3ee0611dc5a0008ccc7778168a4f838d49ca41c0aad8c52c1a01c9",
sha_win = "99364ed0388f928e0594f790662bf3a30c2894b0eff81797e1b64f62128561cb",
),
"2.0.15": struct(
hash = "89202930a98fe7f9ed59b574469a9471b0bda7dd",
sha_linux = "7ff49fc63adf29970f6e7af1df445d7f554bdbbb2606db1cb5d3567ce69df1db",
sha_mac = "e35cced1514ad0da40584f8dd6f76aabf847ce0fa82c6dc8dd9442fb74ed6d0d",
sha_win = "31d5f8107c87833cea57edc57613bba4b36b16152772f744c5ad204594b4e666",
),
"2.0.14": struct(
hash = "fc5562126762ab26c4757147a3b4c24e85a7289e",
sha_linux = "e466cd47ddd4bf0acd645412fdf08eda6d232484e48e5a2643e08062a7a4cf56",
sha_mac = "1c554c08459b7025638ca4eddba0d35babe8c26b202a70a74e9442d577896211",
sha_win = "428bc6094671937af96f26d803871fc5cd83d4d2b1c1df45fa6873a9bc5cac51",
),
"2.0.13": struct(
hash = "ce0e4a4d1cab395ee5082a60ebb4f3891a94b256",
sha_linux = "8986ed886e111c661099c5147126b8a379a4040aab6a1f572fe01f0f9b99a343",
sha_mac = "88c91332c8c76fed14ebf0edc9a08f586012f54f04ad61e5b1b6d02bf96bdeab",
sha_win = "9fb3b945b7bd56e34d17ec04de4cce475f26c49d161aee9d9c0b8b1434591f88",
),
}
|
py | 1a4efdbe24aadb8b2ab65b93f2069c4510a44a49 | import pandas as pd
class Metric():
"""
Create a dataframe from items, which is the data
fetched by Perceval
:param items: A list of dictionaries.
Each element is a Perceval dictionary, obtained from a JSON
file or from Perceval directly.
"""
def __init__(self, items):
flat_items = self._flatten_data(items)
self.raw_df = pd.DataFrame(flat_items)
def _flatten_data(self, items):
raise NotImplementedError
|
py | 1a4efe6cd18d249bb564f8ac9003830ae53913c8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compute (upsampled) Nifti label image from bundle and centroid.
Each voxel will have the label of its nearest centroid point.
"""
import argparse
import logging
import nibabel as nib
import numpy as np
from scilpy.io.streamlines import load_tractogram_with_reference
from scilpy.io.utils import (add_overwrite_arg,
add_reference_arg,
assert_inputs_exist,
assert_outputs_exist,
add_verbose_arg)
from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map
from scilpy.tractanalysis.distance_to_centroid import min_dist_to_centroid
def _build_arg_parser():
p = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('in_bundle',
help='Fiber bundle file.')
p.add_argument('in_centroid',
help='Centroid streamline corresponding to bundle.')
p.add_argument('output_map',
help='Nifti image with corresponding labels.')
p.add_argument('--upsample',
type=float, default=2,
help='Upsample reference grid by this factor. '
'[%(default)s]')
add_reference_arg(p)
add_overwrite_arg(p)
add_verbose_arg(p)
return p
def main():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser,
[args.in_bundle, args.in_centroid],
optional=args.reference)
assert_outputs_exist(parser, args, args.output_map)
sft_bundle = load_tractogram_with_reference(parser, args, args.in_bundle)
sft_centroid = load_tractogram_with_reference(parser, args,
args.in_centroid)
if not len(sft_bundle.streamlines):
logging.error('Empty bundle file {}. '
'Skipping'.format(args.in_bundle))
raise ValueError
if not len(sft_centroid.streamlines):
logging.error('Centroid file {} should contain one streamline. '
'Skipping'.format(args.in_centroid))
raise ValueError
sft_bundle.to_vox()
bundle_streamlines_vox = sft_bundle.streamlines
bundle_streamlines_vox._data *= args.upsample
sft_centroid.to_vox()
centroid_streamlines_vox = sft_centroid.streamlines
centroid_streamlines_vox._data *= args.upsample
upsampled_shape = [s * args.upsample for s in sft_bundle.dimensions]
tdi_mask = compute_tract_counts_map(bundle_streamlines_vox,
upsampled_shape) > 0
tdi_mask_nzr = np.nonzero(tdi_mask)
tdi_mask_nzr_ind = np.transpose(tdi_mask_nzr)
min_dist_ind, _ = min_dist_to_centroid(tdi_mask_nzr_ind,
centroid_streamlines_vox[0])
# Save the (upscaled) labels mask
labels_mask = np.zeros(tdi_mask.shape)
labels_mask[tdi_mask_nzr] = min_dist_ind + 1 # 0 is background value
rescaled_affine = sft_bundle.affine
rescaled_affine[:3, :3] /= args.upsample
labels_img = nib.Nifti1Image(labels_mask, rescaled_affine)
upsampled_spacing = sft_bundle.voxel_sizes / args.upsample
labels_img.header.set_zooms(upsampled_spacing)
nib.save(labels_img, args.output_map)
if __name__ == '__main__':
main()
|
py | 1a4efe9a3eb186b01ee0b46646d168d4c783fedf | """
Import as:
import im.airflow.devops.dags.im_infra as imaddimin
"""
import os
import airflow
from airflow import DAG
from airflow.operators.bash import BashOperator
P1_AIRFLOW_WORKER_DB_LOADER_QUEUE = os.environ[
"P1_AIRFLOW_WORKER_DB_LOADER_QUEUE"
]
STAGE = os.environ["STAGE"]
SEND_EMAIL = STAGE not in ["LOCAL", "TEST"]
default_args = {
"owner": "airflow",
"start_date": airflow.utils.dates.days_ago(1),
"email": [],
"email_on_failure": SEND_EMAIL,
"email_on_retry": SEND_EMAIL,
}
dag = DAG(
"IM_INFRA",
default_args=default_args,
schedule_interval=None,
max_active_runs=1,
)
# Create EDGAR DB schema.
test = BashOperator(
task_id="test",
bash_command='bash -c "/app/im/devops/docker_build/entrypoints/entrypoint_worker.sh '
"im/app/transform/convert_s3_to_sql.py "
"--provider kibot "
"--symbol AAPL "
"--frequency T "
"--contract_type continuous "
"--asset_class stocks "
'--exchange NYSE"',
dag=dag,
queue=P1_AIRFLOW_WORKER_DB_LOADER_QUEUE,
)
|
py | 1a4efead71cfd6b690d3a27fa7d981fe8c022a16 | import aspose.slides as slides
#ExStart:ExtractEmbeddedFileDataFromOLEObject
dataDir = "./examples/data/"
outDir = "./examples/out/"
with slides.Presentation(dataDir + "shapes_ole_objects.pptx") as pres:
objectnum = 0
for slide in pres.slides:
for shape in slide.shapes:
if type(shape) is slides.OleObjectFrame:
objectnum += 1
data = shape.embedded_data.embedded_file_data
extension = shape.embedded_data.embedded_file_extension
with open(outDir + "shapes_ole_objects{idx}_out{ex}".format(idx = str(objectnum), ex = extension), "wb") as fs:
fs.write(data)
#ExEnd:ExtractEmbeddedFileDataFromOLEObject |
py | 1a4efed412029b2687ffb076d45a0169ab71510c | from estring.emoji.emoji import dtbfn
from estring.emoji.emoji import dtb
from estring.emoji.emoji import d
from estring.emoji.emoji import emoji
from estring.emoji.emoji import _kl
from estring.emoji.emoji import _vl
import sys
_dkl = _kl
def tab_strict(dkl,kl,vl,cmd):
tabs = []
for i in range(len(kl)):
dk = dkl[i]
k = kl[i]
v = vl[i]
if(cmd==dk):
tabs.append((v,'value'))
elif(dk.startswith(cmd)):
tabs.append((k,'key'))
else:
pass
return(tabs)
def tab_loose(dkl,kl,vl,cmd):
tabs = []
for i in range(len(kl)):
dk = dkl[i]
k = kl[i]
v = vl[i]
if(cmd==dk):
tabs.append((v,'value'))
elif(cmd in dk):
tabs.append((k,'key'))
else:
pass
return(tabs)
def parr(tabs):
for t in tabs:
if(t[1]=='value'):
print(t[0])
else:
print("< "+t[0].strip(';')+" >")
cmd = ""
try:
cmd = sys.argv[1]
except:
cmd = ""
else:
pass
def loose():
tabs = tab_loose(_dkl,_kl,_vl,cmd)
parr(tabs)
def strict():
tabs = tab_strict(_dkl,_kl,_vl,cmd)
parr(tabs)
|
py | 1a4eff018f644849c4e108efa48e2c0be24ce3e6 | # Copyright 2018, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pybench import Test
class TupleSlicing(Test):
version = 2.0
operations = 3 * 25 * 10 * 7
rounds = 500
def test(self):
r = range(25)
t = tuple(range(100))
for i in xrange(self.rounds):
for j in r:
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
m = t[50:]
m = t[:25]
m = t[50:55]
m = t[:-1]
m = t[1:]
m = t[-10:]
m = t[:10]
def calibrate(self):
r = range(25)
t = tuple(range(100))
for i in xrange(self.rounds):
for j in r:
pass
class SmallTuples(Test):
version = 2.0
operations = 5*(1 + 3 + 6 + 2)
rounds = 90000
def test(self):
for i in xrange(self.rounds):
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
t = (1,2,3,4,5,6)
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c,d,e,f = t
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
a,b,c = t[:3]
l = list(t)
t = tuple(l)
def calibrate(self):
for i in xrange(self.rounds):
pass
|
py | 1a4eff04868ffc3f74593eaa6cf993de2739a701 | from rest_framework import serializers
from estoque.models import Item
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = Item
fields = ['id', 'nome', 'quantidade', 'valor'] |
py | 1a4f00572fe405355a70cd96b2099432f07487cb | from os import name
from django.urls import path
from . import views
from django.conf import settings
urlpatterns = [
path('',views.index, name='index'),
path('counter', views.counter, name='counter')
] |
py | 1a4f021d73a31dce7f922cc517fdcbcb2257ee23 | from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from tensorflow.contrib.layers.python import layers as tf_layers
from rsa import *
from cka import *
import matplotlib.pyplot as plt
# heiner activation maximization filters early layers
# based on https://github.com/zonghua94/mnist/blob/master/mnist_cnn.py
def compute_accuracy(v_x, v_y):
global prediction
y_pre = sess.run(prediction, feed_dict={x: v_x})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={x: v_x, y: v_y})
return result
def conv_block(inp, cweight, bweight, reuse, scope, activation=tf.nn.relu, max_pool_pad='VALID', residual=False):
""" Perform, conv, batch norm, nonlinearity, and max pool """
stride, no_stride = [1,2,2,1], [1,1,1,1]
conv_output = tf.nn.conv2d(inp, cweight, stride, 'SAME') + bweight
normed = tf_layers.batch_norm(conv_output, activation_fn=activation, reuse=reuse, scope=scope)
return normed
def reshape_elems_of_list(layers, shape = (10000, -1)):
reshaped_layers = []
for layer in layers:
layer = np.reshape(layer, shape)
reshaped_layers.append(layer)
return reshaped_layers
# load mnist data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
# reshape(data you want to reshape, [-1, reshape_height, reshape_weight, imagine layers]) image layers=1 when the imagine is in white and black, =3 when the imagine is RGB
x_image = tf.reshape(x, [-1, 28, 28, 1])
weights = {}
convolution = True
if convolution:
dtype = tf.float32
conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)
weights['conv1'] = tf.get_variable('conv1', [3, 3, 1, 64], initializer=conv_initializer, dtype=dtype)
weights['b1'] = tf.Variable(tf.zeros([64]))
weights['conv2'] = tf.get_variable('conv2', [3, 3, 64, 64], initializer=conv_initializer, dtype=dtype)
weights['b2'] = tf.Variable(tf.zeros([64]))
weights['conv3'] = tf.get_variable('conv3', [3, 3, 64, 64], initializer=conv_initializer, dtype=dtype)
weights['b3'] = tf.Variable(tf.zeros([64]))
weights['conv4'] = tf.get_variable('conv4', [3, 3, 64, 64], initializer=conv_initializer, dtype=dtype)
weights['b4'] = tf.Variable(tf.zeros([64]))
weights['w5'] = tf.Variable(tf.random_normal([64, 10]), name='w5')
weights['b5'] = tf.Variable(tf.zeros([10]), name='b5')
tvars = tf.trainable_variables()
scope = ""
hidden1 = conv_block(x_image, weights['conv1'], weights['b1'], False, scope + '0')
hidden2 = conv_block(hidden1, weights['conv2'], weights['b2'], False, scope + '1')
hidden3 = conv_block(hidden2, weights['conv3'], weights['b3'], False, scope + '2')
hidden4 = conv_block(hidden3, weights['conv4'], weights['b4'], False, scope + '3')
hidden4 = tf.reduce_mean(hidden4, [1, 2])
out = tf.matmul(hidden4, weights['w5']) + weights['b5']
prediction = tf.nn.softmax(out)
tvars = tf.trainable_variables()
layer_names = ["Pooling layer 1", "Pooling layer 2", "Pooling layer 3", "Pooling layer 4", "Logits/Head"]
else:
weights = {}
dims = [200, 100, 50, 20]
weights['w1'] = tf.Variable(tf.truncated_normal([784, dims[0]], stddev=0.01))
weights['b1'] = tf.Variable(tf.zeros(dims[0]))
for i, dim in enumerate(dims):
if i == len(dims) -1:
break
weights['w'+str(i+2)] = tf.Variable(tf.truncated_normal([dims[i], dims[i+1]], stddev=0.01))
weights['b'+str(i+2)] = tf.Variable(tf.zeros(dims[i+1]))
weights['w5'] = tf.Variable(tf.random_normal([dims[-1], 10]), name='w5')
weights['b5'] = tf.Variable(tf.zeros([10]), name='b5')
x_image = tf.reshape(x_image, [-1, 784])
hidden1 = tf.nn.relu(tf_layers.batch_norm(tf.matmul(x_image, weights['w1']) + weights['b1']))
hidden2 = tf.nn.relu(tf_layers.batch_norm(tf.matmul(hidden1, weights['w2']) + weights['b2']))
hidden3 = tf.nn.relu(tf_layers.batch_norm(tf.matmul(hidden2, weights['w3']) + weights['b3']))
hidden4 = tf.nn.relu(tf_layers.batch_norm(tf.matmul(hidden3, weights['w4']) + weights['b4']))
out = tf.matmul(hidden4, weights['w5']) + weights['b5']
prediction = tf.nn.softmax(out)
layer_names = [f"Hidden Layer {i+1} FC {dim}" for i, dim in enumerate(dims)]
layer_names.append("Logits/Head")
# calculate the loss
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(prediction), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)#, var_list=g_vars)
N = 100
test_images = mnist.test.images[:N]
test_labels = mnist.test.labels[:N]
for sim_measure in ["cka", "euclidean"]:
# init session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
start = sess.run([hidden1, hidden2, hidden3, hidden4, out],
feed_dict={x: test_images, y: test_labels})
prev = start
similarities = []
similarities_prev = []
steps = []
all_representations = []
labels = []
colors = []
for i in range(200):
batch_x, batch_y = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
if i % 5 == 0:
steps.append(i)
representations = sess.run([hidden1, hidden2, hidden3, hidden4, out],
feed_dict={x: test_images, y: test_labels})
labels = labels + [f"{i} ({j+1})" for j in range(5)]
colors = colors + list(range(5))
peter = representations[0].reshape((N,-1))
all_representations = all_representations + [r.reshape((N,-1)) for r in representations]
if sim_measure == "cka":
similarities_of_step = [kernel_CKA(np.reshape(s, (N, -1)), np.reshape(r, (N, -1))) for s, r in zip(start, representations)]
similarities_of_step_prev = [kernel_CKA(np.reshape(s, (N, -1)), np.reshape(r, (N, -1))) for s, r in zip(prev, representations)]
else:
print(np.mean(start[0]), np.mean(representations[0]))
similarities_of_step = [rsa(np.array([np.reshape(s, (N, -1)), np.reshape(r, (N, -1))]), sim_measure) for
s, r in zip(start, representations)]
similarities_of_step_prev = [rsa(np.array([np.reshape(s, (N, -1)), np.reshape(r, (N, -1))]), sim_measure)
for s, r in zip(prev, representations)]
similarities.append(similarities_of_step)
similarities_prev.append(similarities_of_step_prev)
prev = representations.copy()
print(i, compute_accuracy(mnist.test.images, mnist.test.labels))
plot_rsa(all_representations, labels, colors)
similarities = np.array(similarities).transpose()
similarities_prev = np.array(similarities_prev).transpose()
fig = plt.figure(figsize=(8, 2.5))
if sim_measure == "cka":
plt.title(f"CKA similarity before and after training")
plt.ylabel("Similarity")
else:
plt.title(f"RSA ({sim_measure}) dissimilarity before and after training")
plt.ylabel("Dissimilarity")
plt.xlabel("Number of training steps")
#plt.yscale('symlog', linthreshy=0.015)
plt.ylim(-0.05, 1.05)
for i in range(len(similarities)):
plt.plot(steps, similarities[i], label=layer_names[i])
#plt.plot(range(len(similarities_prev[i])), similarities_prev[i], label=layer_names[i]+" to prev")
plt.legend(bbox_to_anchor=(1.04, 0.5), loc="center left", borderaxespad=0)
plt.show()
|
py | 1a4f02328abfcdf3b9338df1d99692421d39cb43 | # Generated by Django 2.1.3 on 2018-11-23 05:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staf', '0004_auto_20181103_0056'),
]
operations = [
migrations.AlterField(
model_name='unit',
name='symbol',
field=models.CharField(max_length=255),
),
]
|
py | 1a4f03b7c2ff99660c2740898945614e2082254c | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.prediction",
manifest={"VideoActionRecognitionPredictionResult",},
)
class VideoActionRecognitionPredictionResult(proto.Message):
r"""Prediction output format for Video Action Recognition.
Attributes:
id (str):
The resource ID of the AnnotationSpec that
had been identified.
display_name (str):
The display name of the AnnotationSpec that
had been identified.
time_segment_start (google.protobuf.duration_pb2.Duration):
The beginning, inclusive, of the video's time
segment in which the AnnotationSpec has been
identified. Expressed as a number of seconds as
measured from the start of the video, with
fractions up to a microsecond precision, and
with "s" appended at the end.
time_segment_end (google.protobuf.duration_pb2.Duration):
The end, exclusive, of the video's time
segment in which the AnnotationSpec has been
identified. Expressed as a number of seconds as
measured from the start of the video, with
fractions up to a microsecond precision, and
with "s" appended at the end.
confidence (google.protobuf.wrappers_pb2.FloatValue):
The Model's confidence in correction of this
prediction, higher value means higher
confidence.
"""
id = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
time_segment_start = proto.Field(
proto.MESSAGE, number=4, message=duration_pb2.Duration,
)
time_segment_end = proto.Field(
proto.MESSAGE, number=5, message=duration_pb2.Duration,
)
confidence = proto.Field(proto.MESSAGE, number=6, message=wrappers_pb2.FloatValue,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | 1a4f03eea2ab6afa95c4390b96dc06051614e307 | """
mss.tutorials.tutorial_waypoints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This python script generates an automatic demonstration of how to play with and use waypoints
for activating/creating a flight track.
This file is part of mss.
:copyright: Copyright 2021 Hrithik Kumar Verma
:copyright: Copyright 2021-2022 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pyautogui as pag
import multiprocessing
import sys
import datetime
from sys import platform
from pyscreeze import ImageNotFoundException
from tutorials import screenrecorder as sr
from mslib.msui import mss_pyui
def initial_ops():
"""
Executes the initial operations such as closing all opened windows and showing the desktop.
"""
pag.sleep(5)
if platform == "linux" or platform == "linux2":
pag.hotkey('winleft', 'd')
print("\n INFO : Automation is running on Linux system..\n")
elif platform == "darwin":
pag.hotkey('option', 'command', 'm')
print("\n INFO : Automation is running on Mac OS..\n")
elif platform == "win32":
pag.hotkey('win', 'd')
print("\n INFO : Automation is running on Windows OS..\n")
else:
pag.alert(text="Sorry, no support on this platform!", title="Platform Exception", button='OK')
def call_recorder():
"""
Calls the screen recorder class to start the recording of the automation.
"""
sr.main()
def call_mss():
"""
Calls the main MSS GUI window since operations are to be performed on it only.
"""
mss_pyui.main()
def automate_waypoints():
"""
This is the main automating script of the MSS waypoints tutorial which will be recorded and saved
to a file having dateframe nomenclature with a .mp4 extension(codec).
"""
# Giving time for loading of the MSS GUI.
pag.sleep(15)
# Maximizing the window
try:
if platform == 'linux' or platform == 'linux2':
pag.hotkey('winleft', 'up')
elif platform == 'darwin':
pag.hotkey('ctrl', 'command', 'f')
elif platform == 'win32':
pag.hotkey('win', 'up')
except Exception:
print("\nException : Enable Shortcuts for your system or try again!")
pag.sleep(2)
pag.hotkey('ctrl', 'h')
pag.sleep(5)
# Adding waypoints
try:
x, y = pag.locateCenterOnScreen('pictures/add_waypoint.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\nException : Clickable button/option not found on the screen.")
pag.move(-50, 150, duration=1)
pag.click(interval=2)
pag.sleep(1)
pag.move(65, 65, duration=1)
pag.click(interval=2)
pag.sleep(1)
pag.move(-150, 30, duration=1)
x1, y1 = pag.position()
pag.click(interval=2)
pag.sleep(1)
pag.move(200, 150, duration=1)
pag.click(interval=2)
x2, y2 = pag.position()
pag.sleep(3)
# Moving waypoints
try:
x, y = pag.locateCenterOnScreen('pictures/move_waypoint.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Move Waypoint button could not be located on the screen")
pag.moveTo(x2, y2, duration=1)
pag.click(interval=2)
pag.dragRel(100, 150, duration=1)
pag.moveTo(x1, y1, duration=1)
pag.dragRel(35, -50, duration=1)
x1, y1 = pag.position()
# Deleting waypoints
try:
x, y = pag.locateCenterOnScreen('pictures/remove_waypoint.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Remove Waypoint button could not be located on the screen")
pag.moveTo(x1, y1, duration=1)
pag.click(duration=1)
pag.press('left')
pag.sleep(3)
if platform == 'linux' or platform == 'linux2' or platform == 'win32':
pag.press('enter', interval=1)
elif platform == 'darwin':
pag.press('return', interval=1)
pag.sleep(2)
# Changing map to Global
try:
if platform == 'linux' or platform == 'linux2' or platform == 'darwin':
x, y = pag.locateCenterOnScreen('pictures/europe(cyl).PNG')
pag.click(x, y, interval=2)
elif platform == 'win32':
x, y = pag.locateCenterOnScreen('pictures/europe(cyl)win.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Map change dropdown could not be located on the screen")
pag.press('down', presses=2, interval=0.5)
if platform == 'linux' or platform == 'linux2' or platform == 'win32':
pag.press('enter', interval=1)
elif platform == 'darwin':
pag.press('return', interval=1)
pag.sleep(5)
# Zooming into the map
try:
x, y = pag.locateCenterOnScreen('pictures/zoom.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Zoom button could not be located on the screen")
pag.move(150, 200, duration=1)
pag.dragRel(400, 250, duration=2)
pag.sleep(5)
# Panning into the map
try:
x, y = pag.locateCenterOnScreen('pictures/pan.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Pan button could not be located on the screen")
pag.moveRel(400, 400, duration=1)
pag.dragRel(-100, -50, duration=2)
pag.sleep(5)
pag.move(-20, -25, duration=1)
pag.dragRel(90, 50, duration=2)
pag.sleep(5)
# Switching to the previous appearance of the map
try:
x, y = pag.locateCenterOnScreen('pictures/previous.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Previous button could not be located on the screen")
pag.sleep(5)
# Switching to the next appearance of the map
try:
x, y = pag.locateCenterOnScreen('pictures/next.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Next button could not be located on the screen")
pag.sleep(5)
# Resetting the map to the original size
try:
x, y = pag.locateCenterOnScreen('pictures/home.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Home button could not be located on the screen")
pag.sleep(5)
# Saving the figure
try:
x, y = pag.locateCenterOnScreen('pictures/save.PNG')
pag.click(x, y, interval=2)
except ImageNotFoundException:
print("\n Exception : Save button could not be located on the screen")
current_time = datetime.datetime.now().strftime('%d-%m-%Y %H-%M-%S')
fig_filename = f'Fig_{current_time}.PNG'
pag.sleep(3)
if platform == 'win32':
pag.write(fig_filename, interval=0.25)
pag.press('enter', interval=1)
if platform == 'linux' or platform == 'linux2':
pag.hotkey('altleft', 'tab') # if the save file system window is not in the forefront, use this statement.
# This can happen sometimes. At that time, you just need to uncomment it.
pag.write(fig_filename, interval=0.25)
pag.press('enter', interval=1)
elif platform == 'darwin':
pag.write(fig_filename, interval=0.25)
pag.press('return', interval=1)
print("\nAutomation is over for this tutorial. Watch next tutorial for other functions.")
# Close Everything!
try:
if platform == 'linux' or platform == 'linux2':
for _ in range(2):
pag.hotkey('altleft', 'f4')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('enter')
pag.sleep(2)
pag.keyDown('altleft')
pag.press('tab')
pag.press('left')
pag.keyUp('altleft')
pag.press('q')
if platform == 'win32':
for _ in range(2):
pag.hotkey('alt', 'f4')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('enter')
pag.sleep(2)
pag.hotkey('alt', 'tab')
pag.press('q')
elif platform == 'darwin':
for _ in range(2):
pag.hotkey('command', 'w')
pag.sleep(3)
pag.press('left')
pag.sleep(3)
pag.press('return')
pag.sleep(2)
pag.hotkey('command', 'tab')
pag.press('q')
except Exception:
print("Cannot automate : Enable Shortcuts for your system or try again")
pag.press('q')
def main():
"""
This function runs the above functions as different processes at the same time and can be
controlled from here. (This is the main process.)
"""
p1 = multiprocessing.Process(target=call_mss)
p2 = multiprocessing.Process(target=automate_waypoints)
p3 = multiprocessing.Process(target=call_recorder)
print("\nINFO : Starting Automation.....\n")
p3.start()
pag.sleep(5)
initial_ops()
p1.start()
p2.start()
p2.join()
p1.join()
p3.join()
print("\n\nINFO : Automation Completes Successfully!")
sys.exit()
if __name__ == '__main__':
main()
|
py | 1a4f048674d749cb459e4b4e61b63c7e584689f1 | from workspace.pipelines import pipelines
import workspace.util as util
JOB_SPEC_PATH = "package.json"
BUCKET_NAME = "gs://ivanmkc-test2/pipeline_staging"
pipeline_root = "{}/pipeline_root".format(BUCKET_NAME)
# TODO: Run in parallel
for pipeline in [
# pipelines.tabular.bqml_custom_predict,
pipelines.tabular.bq_automl,
# pipelines.tabular.bq_custom,
# pipelines.tabular.bqml_export_vertexai,
]:
print(f"Running pipeline: {pipeline.name}")
util.run_pipeline(
project_id="python-docs-samples-tests",
location="us-central1",
pipeline_root=pipeline_root,
pipeline=pipeline,
)
|
py | 1a4f05aec5570a36c3a9e352f25338862a01c2c1 | from typing import List
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
from scipy.optimize import curve_fit
def nice_string_output(
names: List[str], values: List[str], extra_spacing: int = 0,
):
max_values = len(max(values, key=len))
max_names = len(max(names, key=len))
string = ""
for name, value in zip(names, values):
string += "{0:s} {1:>{spacing}} \n".format(
name,
value,
spacing=extra_spacing + max_values + max_names - len(name),
)
return string[:-2]
def plot_gaussian(
data, ax: plt.Axes, nBins=100, textpos="l", legend=False, short_text=False
):
# make sure our data is an ndarray
if type(data) == list:
data = np.array(data)
### FITTING WITH A GAUSSIAN
def func_gauss(x, N, mu, sigma):
return N * stats.norm.pdf(x, mu, sigma)
counts, bin_edges = np.histogram(data, bins=nBins)
bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2
s_counts = np.sqrt(counts)
x = bin_centers[counts > 0]
y = counts[counts > 0]
sy = s_counts[counts > 0]
popt_gauss, pcov_gauss = curve_fit(
func_gauss, x, y, p0=[1, data.mean(), data.std()]
)
y_func = func_gauss(x, *popt_gauss)
pKS = stats.ks_2samp(y, y_func)
pKS_g1, pKS_g2 = pKS[0], pKS[1]
# print('LOOK! \n \n \n pKS is {} \n \n \n '.format(pKS_g2))
chi2_gauss = sum((y - y_func) ** 2 / sy ** 2)
NDOF_gauss = nBins - 3
prob_gauss = stats.chi2.sf(chi2_gauss, NDOF_gauss)
if short_text == True:
namesl = [
"Gauss_N",
"Gauss_Mu",
"Gauss_Sigma",
]
valuesl = [
"{:.3f} +/- {:.3f}".format(val, unc)
for val, unc in zip(popt_gauss, np.diagonal(pcov_gauss))
]
del namesl[0] # remove gauss n
del valuesl[0]
else:
namesl = [
"Gauss_N",
"Gauss_Mu",
"Gauss_Sigma",
"KS stat",
"KS_pval",
"Chi2 / NDOF",
"Prob",
]
valuesl = (
[
"{:.3f} +/- {:.3f}".format(val, unc)
for val, unc in zip(popt_gauss, np.diagonal(pcov_gauss))
]
+ ["{:.3f}".format(pKS_g1)]
+ ["{:.3f}".format(pKS_g2)]
+ ["{:.3f} / {}".format(chi2_gauss, NDOF_gauss)]
+ ["{:.3f}".format(prob_gauss)]
)
ax.errorbar(x, y, yerr=sy, xerr=0, fmt=".", elinewidth=1)
ax.plot(x, y_func, "--", label="Gaussian")
if textpos == "l":
ax.text(
0.02,
0.98,
nice_string_output(namesl, valuesl),
family="monospace",
transform=ax.transAxes,
fontsize=10,
verticalalignment="top",
alpha=0.5,
)
elif textpos == "r":
ax.text(
0.6,
0.98,
nice_string_output(namesl, valuesl),
family="monospace",
transform=ax.transAxes,
fontsize=10,
verticalalignment="top",
alpha=0.5,
)
if legend:
ax.legend(loc="center left")
return ax
if __name__ == '__main__':
samples = stats.expon.rvs(5.7, size=10000)
# samples = stats.poisson.rvs(mu=2, size=10000)
# samples = stats.cauchy.rvs(size=10000)
sums = np.zeros(1000)
for si in range(len(sums)):
sums[si] = np.mean(np.random.choice(samples, size=10))
fig, ax = plt.subplots()
plot_gaussian(sums, ax)
plt.show() |
py | 1a4f068d063dc4c5af0b6fcf14ff47a223c45376 | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nonblocking_timer Library'
copyright = u'2017 Michael Schneider'
author = u'Michael Schneider'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nonblocking_timerLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nonblocking_timerLibrary.tex', u'nonblocking_timer Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nonblocking_timerlibrary', u'nonblocking_timer Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nonblocking_timerLibrary', u' nonblocking_timer Library Documentation',
author, 'nonblocking_timerLibrary', 'One line description of project.',
'Miscellaneous'),
]
|
py | 1a4f06ba382b045660a5cfa3f88c4a196f9ca85b | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2019 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Basic tests for PlotWidget"""
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "03/01/2019"
import unittest
import logging
import numpy
import sys
from silx.utils.testutils import ParametricTestCase, parameterize
from silx.gui.utils.testutils import SignalListener
from silx.gui.utils.testutils import TestCaseQt
from silx.test.utils import test_options
from silx.gui import qt
from silx.gui.plot import PlotWidget
from silx.gui.plot.items.curve import CurveStyle
from silx.gui.colors import Colormap
from .utils import PlotWidgetTestCase
SIZE = 1024
"""Size of the test image"""
DATA_2D = numpy.arange(SIZE ** 2).reshape(SIZE, SIZE)
"""Image data set"""
logger = logging.getLogger(__name__)
class TestSpecialBackend(PlotWidgetTestCase, ParametricTestCase):
def __init__(self, methodName='runTest', backend=None):
TestCaseQt.__init__(self, methodName=methodName)
self.__backend = backend
def _createPlot(self):
return PlotWidget(backend=self.__backend)
def testPlot(self):
self.assertIsNotNone(self.plot)
class TestPlotWidget(PlotWidgetTestCase, ParametricTestCase):
"""Basic tests for PlotWidget"""
def testShow(self):
"""Most basic test"""
pass
def testSetTitleLabels(self):
"""Set title and axes labels"""
title, xlabel, ylabel = 'the title', 'x label', 'y label'
self.plot.setGraphTitle(title)
self.plot.getXAxis().setLabel(xlabel)
self.plot.getYAxis().setLabel(ylabel)
self.qapp.processEvents()
self.assertEqual(self.plot.getGraphTitle(), title)
self.assertEqual(self.plot.getXAxis().getLabel(), xlabel)
self.assertEqual(self.plot.getYAxis().getLabel(), ylabel)
def _checkLimits(self,
expectedXLim=None,
expectedYLim=None,
expectedRatio=None):
"""Assert that limits are as expected"""
xlim = self.plot.getXAxis().getLimits()
ylim = self.plot.getYAxis().getLimits()
ratio = abs(xlim[1] - xlim[0]) / abs(ylim[1] - ylim[0])
if expectedXLim is not None:
self.assertEqual(expectedXLim, xlim)
if expectedYLim is not None:
self.assertEqual(expectedYLim, ylim)
if expectedRatio is not None:
self.assertTrue(
numpy.allclose(expectedRatio, ratio, atol=0.01))
def testChangeLimitsWithAspectRatio(self):
self.plot.setKeepDataAspectRatio()
self.qapp.processEvents()
xlim = self.plot.getXAxis().getLimits()
ylim = self.plot.getYAxis().getLimits()
defaultRatio = abs(xlim[1] - xlim[0]) / abs(ylim[1] - ylim[0])
self.plot.getXAxis().setLimits(1., 10.)
self._checkLimits(expectedXLim=(1., 10.), expectedRatio=defaultRatio)
self.qapp.processEvents()
self._checkLimits(expectedXLim=(1., 10.), expectedRatio=defaultRatio)
self.plot.getYAxis().setLimits(1., 10.)
self._checkLimits(expectedYLim=(1., 10.), expectedRatio=defaultRatio)
self.qapp.processEvents()
self._checkLimits(expectedYLim=(1., 10.), expectedRatio=defaultRatio)
def testResizeWidget(self):
"""Test resizing the widget and receiving limitsChanged events"""
self.plot.resize(200, 200)
self.qapp.processEvents()
self.qWait(100)
xlim = self.plot.getXAxis().getLimits()
ylim = self.plot.getYAxis().getLimits()
listener = SignalListener()
self.plot.getXAxis().sigLimitsChanged.connect(listener.partial('x'))
self.plot.getYAxis().sigLimitsChanged.connect(listener.partial('y'))
# Resize without aspect ratio
self.plot.resize(200, 300)
self.qapp.processEvents()
self.qWait(100)
self._checkLimits(expectedXLim=xlim, expectedYLim=ylim)
self.assertEqual(listener.callCount(), 0)
# Resize with aspect ratio
self.plot.setKeepDataAspectRatio(True)
self.qapp.processEvents()
self.qWait(1000)
listener.clear() # Clean-up received signal
self.plot.resize(200, 200)
self.qapp.processEvents()
self.qWait(100)
self.assertNotEqual(listener.callCount(), 0)
def testAddRemoveItemSignals(self):
"""Test sigItemAdded and sigItemAboutToBeRemoved"""
listener = SignalListener()
self.plot.sigItemAdded.connect(listener.partial('add'))
self.plot.sigItemAboutToBeRemoved.connect(listener.partial('remove'))
self.plot.addCurve((1, 2, 3), (3, 2, 1), legend='curve')
self.assertEqual(listener.callCount(), 1)
curve = self.plot.getCurve('curve')
self.plot.remove('curve')
self.assertEqual(listener.callCount(), 2)
self.assertEqual(listener.arguments(callIndex=0), ('add', curve))
self.assertEqual(listener.arguments(callIndex=1), ('remove', curve))
def testGetItems(self):
"""Test getItems method"""
curve_x = 1, 2
self.plot.addCurve(curve_x, (3, 4))
image = (0, 1), (2, 3)
self.plot.addImage(image)
scatter_x = 10, 11
self.plot.addScatter(scatter_x, (12, 13), (0, 1))
marker_pos = 5, 5
self.plot.addMarker(*marker_pos)
marker_x = 6
self.plot.addXMarker(marker_x)
self.plot.addItem((0, 5), (2, 10), shape='rectangle')
items = self.plot.getItems()
self.assertEqual(len(items), 6)
self.assertTrue(numpy.all(numpy.equal(items[0].getXData(), curve_x)))
self.assertTrue(numpy.all(numpy.equal(items[1].getData(), image)))
self.assertTrue(numpy.all(numpy.equal(items[2].getXData(), scatter_x)))
self.assertTrue(numpy.all(numpy.equal(items[3].getPosition(), marker_pos)))
self.assertTrue(numpy.all(numpy.equal(items[4].getPosition()[0], marker_x)))
self.assertEqual(items[5].getType(), 'rectangle')
def testBackGroundColors(self):
self.plot.setVisible(True)
self.qWaitForWindowExposed(self.plot)
self.qapp.processEvents()
# Custom the full background
color = self.plot.getBackgroundColor()
self.assertTrue(color.isValid())
self.assertEqual(color, qt.QColor(255, 255, 255))
self.plot.setBackgroundColor("red")
color = self.plot.getBackgroundColor()
self.assertTrue(color.isValid())
self.qapp.processEvents()
# Custom the data background
color = self.plot.getDataBackgroundColor()
self.assertFalse(color.isValid())
self.plot.setDataBackgroundColor("red")
color = self.plot.getDataBackgroundColor()
self.assertTrue(color.isValid())
self.qapp.processEvents()
# Back to default
self.plot.setBackgroundColor('white')
self.plot.setDataBackgroundColor(None)
color = self.plot.getBackgroundColor()
self.assertTrue(color.isValid())
self.assertEqual(color, qt.QColor(255, 255, 255))
color = self.plot.getDataBackgroundColor()
self.assertFalse(color.isValid())
self.qapp.processEvents()
class TestPlotImage(PlotWidgetTestCase, ParametricTestCase):
"""Basic tests for addImage"""
def setUp(self):
super(TestPlotImage, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
def testPlotColormapTemperature(self):
self.plot.setGraphTitle('Temp. Linear')
colormap = Colormap(name='temperature',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap)
def testPlotColormapGray(self):
self.plot.setKeepDataAspectRatio(False)
self.plot.setGraphTitle('Gray Linear')
colormap = Colormap(name='gray',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap)
def testPlotColormapTemperatureLog(self):
self.plot.setGraphTitle('Temp. Log')
colormap = Colormap(name='temperature',
normalization=Colormap.LOGARITHM,
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap)
def testPlotRgbRgba(self):
self.plot.setKeepDataAspectRatio(False)
self.plot.setGraphTitle('RGB + RGBA')
rgb = numpy.array(
(((0, 0, 0), (128, 0, 0), (255, 0, 0)),
((0, 128, 0), (0, 128, 128), (0, 128, 256))),
dtype=numpy.uint8)
self.plot.addImage(rgb, legend="rgb",
origin=(0, 0), scale=(10, 10),
resetzoom=False)
rgba = numpy.array(
(((0, 0, 0, .5), (.5, 0, 0, 1), (1, 0, 0, .5)),
((0, .5, 0, 1), (0, .5, .5, 1), (0, 1, 1, .5))),
dtype=numpy.float32)
self.plot.addImage(rgba, legend="rgba",
origin=(5, 5), scale=(10, 10),
resetzoom=False)
self.plot.resetZoom()
def testPlotColormapCustom(self):
self.plot.setKeepDataAspectRatio(False)
self.plot.setGraphTitle('Custom colormap')
colormap = Colormap(name=None,
normalization=Colormap.LINEAR,
vmin=None,
vmax=None,
colors=((0., 0., 0.), (1., 0., 0.),
(0., 1., 0.), (0., 0., 1.)))
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap,
resetzoom=False)
colormap = Colormap(name=None,
normalization=Colormap.LINEAR,
vmin=None,
vmax=None,
colors=numpy.array(
((0, 0, 0, 0), (0, 0, 0, 128),
(128, 128, 128, 128), (255, 255, 255, 255)),
dtype=numpy.uint8))
self.plot.addImage(DATA_2D, legend="image 2", colormap=colormap,
origin=(DATA_2D.shape[0], 0),
resetzoom=False)
self.plot.resetZoom()
def testImageOriginScale(self):
"""Test of image with different origin and scale"""
self.plot.setGraphTitle('origin and scale')
tests = [ # (origin, scale)
((10, 20), (1, 1)),
((10, 20), (-1, -1)),
((-10, 20), (2, 1)),
((10, -20), (-1, -2)),
(100, 2),
(-100, (1, 1)),
((10, 20), 2),
]
for origin, scale in tests:
with self.subTest(origin=origin, scale=scale):
self.plot.addImage(DATA_2D, origin=origin, scale=scale)
try:
ox, oy = origin
except TypeError:
ox, oy = origin, origin
try:
sx, sy = scale
except TypeError:
sx, sy = scale, scale
xbounds = ox, ox + DATA_2D.shape[1] * sx
ybounds = oy, oy + DATA_2D.shape[0] * sy
# Check limits without aspect ratio
xmin, xmax = self.plot.getXAxis().getLimits()
ymin, ymax = self.plot.getYAxis().getLimits()
self.assertEqual(xmin, min(xbounds))
self.assertEqual(xmax, max(xbounds))
self.assertEqual(ymin, min(ybounds))
self.assertEqual(ymax, max(ybounds))
# Check limits with aspect ratio
self.plot.setKeepDataAspectRatio(True)
xmin, xmax = self.plot.getXAxis().getLimits()
ymin, ymax = self.plot.getYAxis().getLimits()
self.assertTrue(round(xmin, 7) <= min(xbounds))
self.assertTrue(round(xmax, 7) >= max(xbounds))
self.assertTrue(round(ymin, 7) <= min(ybounds))
self.assertTrue(round(ymax, 7) >= max(ybounds))
self.plot.setKeepDataAspectRatio(False) # Reset aspect ratio
self.plot.clear()
self.plot.resetZoom()
def testPlotColormapDictAPI(self):
"""Test that the addImage API using a colormap dictionary is still
working"""
self.plot.setGraphTitle('Temp. Log')
colormap = {
'name': 'temperature',
'normalization': 'log',
'vmin': None,
'vmax': None
}
self.plot.addImage(DATA_2D, legend="image 1", colormap=colormap)
def testPlotComplexImage(self):
"""Test that a complex image is displayed as its absolute value."""
data = numpy.linspace(1, 1j, 100).reshape(10, 10)
self.plot.addImage(data, legend='complex')
image = self.plot.getActiveImage()
retrievedData = image.getData(copy=False)
self.assertTrue(
numpy.all(numpy.equal(retrievedData, numpy.absolute(data))))
def testPlotBooleanImage(self):
"""Test that a boolean image is displayed and converted to int8."""
data = numpy.zeros((10, 10), dtype=numpy.bool)
data[::2, ::2] = True
self.plot.addImage(data, legend='boolean')
image = self.plot.getActiveImage()
retrievedData = image.getData(copy=False)
self.assertTrue(numpy.all(numpy.equal(retrievedData, data)))
self.assertIs(retrievedData.dtype.type, numpy.int8)
def testPlotAlphaImage(self):
"""Test with an alpha image layer"""
data = numpy.random.random((10, 10))
alpha = numpy.linspace(0, 1, 100).reshape(10, 10)
self.plot.addImage(data, legend='image')
image = self.plot.getActiveImage()
image.setData(data, alpha=alpha)
self.qapp.processEvents()
self.assertTrue(numpy.array_equal(alpha, image.getAlphaData()))
class TestPlotCurve(PlotWidgetTestCase):
"""Basic tests for addCurve."""
# Test data sets
xData = numpy.arange(1000)
yData = -500 + 100 * numpy.sin(xData)
xData2 = xData + 1000
yData2 = xData - 1000 + 200 * numpy.random.random(1000)
def setUp(self):
super(TestPlotCurve, self).setUp()
self.plot.setGraphTitle('Curve')
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.setActiveCurveHandling(False)
def testPlotCurveColorFloat(self):
color = numpy.array(numpy.random.random(3 * 1000),
dtype=numpy.float32).reshape(1000, 3)
self.plot.addCurve(self.xData, self.yData,
legend="curve 1",
replace=False, resetzoom=False,
color=color,
linestyle="", symbol="s")
self.plot.addCurve(self.xData2, self.yData2,
legend="curve 2",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
self.plot.resetZoom()
def testPlotCurveColorByte(self):
color = numpy.array(255 * numpy.random.random(3 * 1000),
dtype=numpy.uint8).reshape(1000, 3)
self.plot.addCurve(self.xData, self.yData,
legend="curve 1",
replace=False, resetzoom=False,
color=color,
linestyle="", symbol="s")
self.plot.addCurve(self.xData2, self.yData2,
legend="curve 2",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
self.plot.resetZoom()
def testPlotCurveColors(self):
color = numpy.array(numpy.random.random(3 * 1000),
dtype=numpy.float32).reshape(1000, 3)
self.plot.addCurve(self.xData, self.yData,
legend="curve 2",
replace=False, resetzoom=False,
color=color, linestyle="-", symbol='o')
self.plot.resetZoom()
# Test updating color array
# From array to array
newColors = numpy.ones((len(self.xData), 3), dtype=numpy.float32)
self.plot.addCurve(self.xData, self.yData,
legend="curve 2",
replace=False, resetzoom=False,
color=newColors, symbol='o')
# Array to single color
self.plot.addCurve(self.xData, self.yData,
legend="curve 2",
replace=False, resetzoom=False,
color='green', symbol='o')
# single color to array
self.plot.addCurve(self.xData, self.yData,
legend="curve 2",
replace=False, resetzoom=False,
color=color, symbol='o')
class TestPlotScatter(PlotWidgetTestCase, ParametricTestCase):
"""Basic tests for addScatter"""
def testScatter(self):
x = numpy.arange(100)
y = numpy.arange(100)
value = numpy.arange(100)
self.plot.addScatter(x, y, value)
self.plot.resetZoom()
def testScatterVisualization(self):
self.plot.addScatter((0, 1, 2, 3), (2, 0, 2, 1), (0, 1, 2, 3))
self.plot.resetZoom()
self.qapp.processEvents()
scatter = self.plot.getItems()[0]
for visualization in ('solid',
'points',
scatter.Visualization.SOLID,
scatter.Visualization.POINTS):
with self.subTest(visualization=visualization):
scatter.setVisualization(visualization)
self.qapp.processEvents()
class TestPlotMarker(PlotWidgetTestCase):
"""Basic tests for add*Marker"""
def setUp(self):
super(TestPlotMarker, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.getXAxis().setAutoScale(False)
self.plot.getYAxis().setAutoScale(False)
self.plot.setKeepDataAspectRatio(False)
self.plot.setLimits(0., 100., -100., 100.)
def testPlotMarkerX(self):
self.plot.setGraphTitle('Markers X')
markers = [
(10., 'blue', False, False),
(20., 'red', False, False),
(40., 'green', True, False),
(60., 'gray', True, True),
(80., 'black', False, True),
]
for x, color, select, drag in markers:
name = str(x)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addXMarker(x, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerY(self):
self.plot.setGraphTitle('Markers Y')
markers = [
(-50., 'blue', False, False),
(-30., 'red', False, False),
(0., 'green', True, False),
(10., 'gray', True, True),
(80., 'black', False, True),
]
for y, color, select, drag in markers:
name = str(y)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addYMarker(y, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerPt(self):
self.plot.setGraphTitle('Markers Pt')
markers = [
(10., -50., 'blue', False, False),
(40., -30., 'red', False, False),
(50., 0., 'green', True, False),
(50., 20., 'gray', True, True),
(70., 50., 'black', False, True),
]
for x, y, color, select, drag in markers:
name = "{0},{1}".format(x, y)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addMarker(x, y, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerWithoutLegend(self):
self.plot.setGraphTitle('Markers without legend')
self.plot.getYAxis().setInverted(True)
# Markers without legend
self.plot.addMarker(10, 10)
self.plot.addMarker(10, 20)
self.plot.addMarker(40, 50, text='test', symbol=None)
self.plot.addMarker(40, 50, text='test', symbol='+')
self.plot.addXMarker(25)
self.plot.addXMarker(35)
self.plot.addXMarker(45, text='test')
self.plot.addYMarker(55)
self.plot.addYMarker(65)
self.plot.addYMarker(75, text='test')
self.plot.resetZoom()
def testPlotMarkerYAxis(self):
# Check only the API
legend = self.plot.addMarker(10, 10)
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "left")
legend = self.plot.addMarker(10, 10, yaxis="right")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "right")
legend = self.plot.addMarker(10, 10, yaxis="left")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "left")
legend = self.plot.addXMarker(10, yaxis="right")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "right")
legend = self.plot.addXMarker(10, yaxis="left")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "left")
legend = self.plot.addYMarker(10, yaxis="right")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "right")
legend = self.plot.addYMarker(10, yaxis="left")
item = self.plot._getMarker(legend)
self.assertEqual(item.getYAxis(), "left")
self.plot.resetZoom()
# TestPlotItem ################################################################
class TestPlotItem(PlotWidgetTestCase):
"""Basic tests for addItem."""
# Polygon coordinates and color
polygons = [ # legend, x coords, y coords, color
('triangle', numpy.array((10, 30, 50)),
numpy.array((55, 70, 55)), 'red'),
('square', numpy.array((10, 10, 50, 50)),
numpy.array((10, 50, 50, 10)), 'green'),
('star', numpy.array((60, 70, 80, 60, 80)),
numpy.array((25, 50, 25, 40, 40)), 'blue'),
]
# Rectangle coordinantes and color
rectangles = [ # legend, x coords, y coords, color
('square 1', numpy.array((1., 10.)),
numpy.array((1., 10.)), 'red'),
('square 2', numpy.array((10., 20.)),
numpy.array((10., 20.)), 'green'),
('square 3', numpy.array((20., 30.)),
numpy.array((20., 30.)), 'blue'),
('rect 1', numpy.array((1., 30.)),
numpy.array((35., 40.)), 'black'),
('line h', numpy.array((1., 30.)),
numpy.array((45., 45.)), 'darkRed'),
]
def setUp(self):
super(TestPlotItem, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.getXAxis().setAutoScale(False)
self.plot.getYAxis().setAutoScale(False)
self.plot.setKeepDataAspectRatio(False)
self.plot.setLimits(0., 100., -100., 100.)
def testPlotItemPolygonFill(self):
self.plot.setGraphTitle('Item Fill')
for legend, xList, yList, color in self.polygons:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="polygon", fill=True, color=color)
self.plot.resetZoom()
def testPlotItemPolygonNoFill(self):
self.plot.setGraphTitle('Item No Fill')
for legend, xList, yList, color in self.polygons:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="polygon", fill=False, color=color)
self.plot.resetZoom()
def testPlotItemRectangleFill(self):
self.plot.setGraphTitle('Rectangle Fill')
for legend, xList, yList, color in self.rectangles:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="rectangle", fill=True, color=color)
self.plot.resetZoom()
def testPlotItemRectangleNoFill(self):
self.plot.setGraphTitle('Rectangle No Fill')
for legend, xList, yList, color in self.rectangles:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="rectangle", fill=False, color=color)
self.plot.resetZoom()
class TestPlotActiveCurveImage(PlotWidgetTestCase):
"""Basic tests for active curve and image handling"""
xData = numpy.arange(1000)
yData = -500 + 100 * numpy.sin(xData)
xData2 = xData + 1000
yData2 = xData - 1000 + 200 * numpy.random.random(1000)
def tearDown(self):
self.plot.setActiveCurveHandling(False)
super(TestPlotActiveCurveImage, self).tearDown()
def testActiveCurveAndLabels(self):
# Active curve handling off, no label change
self.plot.setActiveCurveHandling(False)
self.plot.getXAxis().setLabel('XLabel')
self.plot.getYAxis().setLabel('YLabel')
self.plot.addCurve((1, 2), (1, 2))
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
self.plot.addCurve((1, 2), (2, 3), xlabel='x1', ylabel='y1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
self.plot.clear()
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
# Active curve handling on, label changes
self.plot.setActiveCurveHandling(True)
self.plot.getXAxis().setLabel('XLabel')
self.plot.getYAxis().setLabel('YLabel')
# labels changed as active curve
self.plot.addCurve((1, 2), (1, 2), legend='1',
xlabel='x1', ylabel='y1')
self.plot.setActiveCurve('1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
# labels not changed as not active curve
self.plot.addCurve((1, 2), (2, 3), legend='2')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
# labels changed
self.plot.setActiveCurve('2')
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
self.plot.setActiveCurve('1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
self.plot.clear()
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
def testPlotActiveCurveSelectionMode(self):
self.plot.clear()
self.plot.setActiveCurveHandling(True)
legend = "curve 1"
self.plot.addCurve(self.xData, self.yData,
legend=legend,
color="green")
# active curve should be None
self.assertEqual(self.plot.getActiveCurve(just_legend=True), None)
# active curve should be None when None is set as active curve
self.plot.setActiveCurve(legend)
current = self.plot.getActiveCurve(just_legend=True)
self.assertEqual(current, legend)
self.plot.setActiveCurve(None)
current = self.plot.getActiveCurve(just_legend=True)
self.assertEqual(current, None)
# testing it automatically toggles if there is only one
self.plot.setActiveCurveSelectionMode("legacy")
current = self.plot.getActiveCurve(just_legend=True)
self.assertEqual(current, legend)
# active curve should not change when None set as active curve
self.assertEqual(self.plot.getActiveCurveSelectionMode(), "legacy")
self.plot.setActiveCurve(None)
current = self.plot.getActiveCurve(just_legend=True)
self.assertEqual(current, legend)
# situation where no curve is active
self.plot.clear()
self.plot.setActiveCurveHandling(True)
self.assertEqual(self.plot.getActiveCurveSelectionMode(), "atmostone")
self.plot.addCurve(self.xData, self.yData,
legend=legend,
color="green")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), None)
self.plot.addCurve(self.xData2, self.yData2,
legend="curve 2",
color="red")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), None)
self.plot.setActiveCurveSelectionMode("legacy")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), None)
# the first curve added should be active
self.plot.clear()
self.plot.addCurve(self.xData, self.yData,
legend=legend,
color="green")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), legend)
self.plot.addCurve(self.xData2, self.yData2,
legend="curve 2",
color="red")
self.assertEqual(self.plot.getActiveCurve(just_legend=True), legend)
def testActiveCurveStyle(self):
"""Test change of active curve style"""
self.plot.setActiveCurveHandling(True)
self.plot.setActiveCurveStyle(color='black')
style = self.plot.getActiveCurveStyle()
self.assertEqual(style.getColor(), (0., 0., 0., 1.))
self.assertIsNone(style.getLineStyle())
self.assertIsNone(style.getLineWidth())
self.assertIsNone(style.getSymbol())
self.assertIsNone(style.getSymbolSize())
self.plot.addCurve(x=self.xData, y=self.yData, legend="curve1")
curve = self.plot.getCurve("curve1")
curve.setColor('blue')
curve.setLineStyle('-')
curve.setLineWidth(1)
curve.setSymbol('o')
curve.setSymbolSize(5)
# Check default current style
defaultStyle = curve.getCurrentStyle()
self.assertEqual(defaultStyle, CurveStyle(color='blue',
linestyle='-',
linewidth=1,
symbol='o',
symbolsize=5))
# Activate curve with highlight color=black
self.plot.setActiveCurve("curve1")
style = curve.getCurrentStyle()
self.assertEqual(style.getColor(), (0., 0., 0., 1.))
self.assertEqual(style.getLineStyle(), '-')
self.assertEqual(style.getLineWidth(), 1)
self.assertEqual(style.getSymbol(), 'o')
self.assertEqual(style.getSymbolSize(), 5)
# Change highlight to linewidth=2
self.plot.setActiveCurveStyle(linewidth=2)
style = curve.getCurrentStyle()
self.assertEqual(style.getColor(), (0., 0., 1., 1.))
self.assertEqual(style.getLineStyle(), '-')
self.assertEqual(style.getLineWidth(), 2)
self.assertEqual(style.getSymbol(), 'o')
self.assertEqual(style.getSymbolSize(), 5)
self.plot.setActiveCurve(None)
self.assertEqual(curve.getCurrentStyle(), defaultStyle)
def testActiveImageAndLabels(self):
# Active image handling always on, no API for toggling it
self.plot.getXAxis().setLabel('XLabel')
self.plot.getYAxis().setLabel('YLabel')
# labels changed as active curve
self.plot.addImage(numpy.arange(100).reshape(10, 10),
legend='1', xlabel='x1', ylabel='y1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
# labels not changed as not active curve
self.plot.addImage(numpy.arange(100).reshape(10, 10),
legend='2')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
# labels changed
self.plot.setActiveImage('2')
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
self.plot.setActiveImage('1')
self.assertEqual(self.plot.getXAxis().getLabel(), 'x1')
self.assertEqual(self.plot.getYAxis().getLabel(), 'y1')
self.plot.clear()
self.assertEqual(self.plot.getXAxis().getLabel(), 'XLabel')
self.assertEqual(self.plot.getYAxis().getLabel(), 'YLabel')
##############################################################################
# Log
##############################################################################
class TestPlotEmptyLog(PlotWidgetTestCase):
"""Basic tests for log plot"""
def testEmptyPlotTitleLabelsLog(self):
self.plot.setGraphTitle('Empty Log Log')
self.plot.getXAxis().setLabel('X')
self.plot.getYAxis().setLabel('Y')
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
self.plot.resetZoom()
class TestPlotAxes(TestCaseQt, ParametricTestCase):
# Test data
xData = numpy.arange(1, 10)
yData = xData ** 2
def __init__(self, methodName='runTest', backend=None):
unittest.TestCase.__init__(self, methodName)
self.__backend = backend
def setUp(self):
super(TestPlotAxes, self).setUp()
self.plot = PlotWidget(backend=self.__backend)
# It is not needed to display the plot
# It saves a lot of time
# self.plot.show()
# self.qWaitForWindowExposed(self.plot)
def tearDown(self):
self.qapp.processEvents()
self.plot.setAttribute(qt.Qt.WA_DeleteOnClose)
self.plot.close()
del self.plot
super(TestPlotAxes, self).tearDown()
def testDefaultAxes(self):
axis = self.plot.getXAxis()
self.assertEqual(axis.getScale(), axis.LINEAR)
axis = self.plot.getYAxis()
self.assertEqual(axis.getScale(), axis.LINEAR)
axis = self.plot.getYAxis(axis="right")
self.assertEqual(axis.getScale(), axis.LINEAR)
def testOldPlotAxis_getterSetter(self):
"""Test silx API prior to silx 0.6"""
x = self.plot.getXAxis()
y = self.plot.getYAxis()
p = self.plot
tests = [
# setters
(p.setGraphXLimits, (10, 20), x.getLimits, (10, 20)),
(p.setGraphYLimits, (10, 20), y.getLimits, (10, 20)),
(p.setGraphXLabel, "foox", x.getLabel, "foox"),
(p.setGraphYLabel, "fooy", y.getLabel, "fooy"),
(p.setYAxisInverted, True, y.isInverted, True),
(p.setXAxisLogarithmic, True, x.getScale, x.LOGARITHMIC),
(p.setYAxisLogarithmic, True, y.getScale, y.LOGARITHMIC),
(p.setXAxisAutoScale, False, x.isAutoScale, False),
(p.setYAxisAutoScale, False, y.isAutoScale, False),
# getters
(x.setLimits, (11, 20), p.getGraphXLimits, (11, 20)),
(y.setLimits, (11, 20), p.getGraphYLimits, (11, 20)),
(x.setLabel, "fooxx", p.getGraphXLabel, "fooxx"),
(y.setLabel, "fooyy", p.getGraphYLabel, "fooyy"),
(y.setInverted, False, p.isYAxisInverted, False),
(x.setScale, x.LINEAR, p.isXAxisLogarithmic, False),
(y.setScale, y.LINEAR, p.isYAxisLogarithmic, False),
(x.setAutoScale, True, p.isXAxisAutoScale, True),
(y.setAutoScale, True, p.isYAxisAutoScale, True),
]
for testCase in tests:
setter, value, getter, expected = testCase
with self.subTest():
if setter is not None:
if not isinstance(value, tuple):
value = (value, )
setter(*value)
if getter is not None:
self.assertEqual(getter(), expected)
def testOldPlotAxis_Logarithmic(self):
"""Test silx API prior to silx 0.6"""
x = self.plot.getXAxis()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
self.assertEqual(x.getScale(), x.LINEAR)
self.assertEqual(y.getScale(), x.LINEAR)
self.assertEqual(yright.getScale(), x.LINEAR)
self.plot.setXAxisLogarithmic(True)
self.assertEqual(x.getScale(), x.LOGARITHMIC)
self.assertEqual(y.getScale(), x.LINEAR)
self.assertEqual(yright.getScale(), x.LINEAR)
self.assertEqual(self.plot.isXAxisLogarithmic(), True)
self.assertEqual(self.plot.isYAxisLogarithmic(), False)
self.plot.setYAxisLogarithmic(True)
self.assertEqual(x.getScale(), x.LOGARITHMIC)
self.assertEqual(y.getScale(), x.LOGARITHMIC)
self.assertEqual(yright.getScale(), x.LOGARITHMIC)
self.assertEqual(self.plot.isXAxisLogarithmic(), True)
self.assertEqual(self.plot.isYAxisLogarithmic(), True)
yright.setScale(yright.LINEAR)
self.assertEqual(x.getScale(), x.LOGARITHMIC)
self.assertEqual(y.getScale(), x.LINEAR)
self.assertEqual(yright.getScale(), x.LINEAR)
self.assertEqual(self.plot.isXAxisLogarithmic(), True)
self.assertEqual(self.plot.isYAxisLogarithmic(), False)
def testOldPlotAxis_AutoScale(self):
"""Test silx API prior to silx 0.6"""
x = self.plot.getXAxis()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
self.assertEqual(x.isAutoScale(), True)
self.assertEqual(y.isAutoScale(), True)
self.assertEqual(yright.isAutoScale(), True)
self.plot.setXAxisAutoScale(False)
self.assertEqual(x.isAutoScale(), False)
self.assertEqual(y.isAutoScale(), True)
self.assertEqual(yright.isAutoScale(), True)
self.assertEqual(self.plot.isXAxisAutoScale(), False)
self.assertEqual(self.plot.isYAxisAutoScale(), True)
self.plot.setYAxisAutoScale(False)
self.assertEqual(x.isAutoScale(), False)
self.assertEqual(y.isAutoScale(), False)
self.assertEqual(yright.isAutoScale(), False)
self.assertEqual(self.plot.isXAxisAutoScale(), False)
self.assertEqual(self.plot.isYAxisAutoScale(), False)
yright.setAutoScale(True)
self.assertEqual(x.isAutoScale(), False)
self.assertEqual(y.isAutoScale(), True)
self.assertEqual(yright.isAutoScale(), True)
self.assertEqual(self.plot.isXAxisAutoScale(), False)
self.assertEqual(self.plot.isYAxisAutoScale(), True)
def testOldPlotAxis_Inverted(self):
"""Test silx API prior to silx 0.6"""
x = self.plot.getXAxis()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
self.assertEqual(x.isInverted(), False)
self.assertEqual(y.isInverted(), False)
self.assertEqual(yright.isInverted(), False)
self.plot.setYAxisInverted(True)
self.assertEqual(x.isInverted(), False)
self.assertEqual(y.isInverted(), True)
self.assertEqual(yright.isInverted(), True)
self.assertEqual(self.plot.isYAxisInverted(), True)
yright.setInverted(False)
self.assertEqual(x.isInverted(), False)
self.assertEqual(y.isInverted(), False)
self.assertEqual(yright.isInverted(), False)
self.assertEqual(self.plot.isYAxisInverted(), False)
def testLogXWithData(self):
self.plot.setGraphTitle('Curve X: Log Y: Linear')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
axis = self.plot.getXAxis()
axis.setScale(axis.LOGARITHMIC)
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
def testLogYWithData(self):
self.plot.setGraphTitle('Curve X: Linear Y: Log')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
axis = self.plot.getYAxis()
axis.setScale(axis.LOGARITHMIC)
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
axis = self.plot.getYAxis(axis="right")
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
def testLogYRightWithData(self):
self.plot.setGraphTitle('Curve X: Linear Y: Log')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
axis = self.plot.getYAxis(axis="right")
axis.setScale(axis.LOGARITHMIC)
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
axis = self.plot.getYAxis()
self.assertEqual(axis.getScale(), axis.LOGARITHMIC)
def testLimitsChanged_setLimits(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
self.plot.getXAxis().sigLimitsChanged.connect(listener.partial(axis="x"))
self.plot.getYAxis().sigLimitsChanged.connect(listener.partial(axis="y"))
self.plot.getYAxis(axis="right").sigLimitsChanged.connect(listener.partial(axis="y2"))
self.plot.setLimits(0, 1, 0, 1, 0, 1)
# at least one event per axis
self.assertEqual(len(set(listener.karguments(argumentName="axis"))), 3)
def testLimitsChanged_resetZoom(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
self.plot.getXAxis().sigLimitsChanged.connect(listener.partial(axis="x"))
self.plot.getYAxis().sigLimitsChanged.connect(listener.partial(axis="y"))
self.plot.getYAxis(axis="right").sigLimitsChanged.connect(listener.partial(axis="y2"))
self.plot.resetZoom()
# at least one event per axis
self.assertEqual(len(set(listener.karguments(argumentName="axis"))), 3)
def testLimitsChanged_setXLimit(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
axis = self.plot.getXAxis()
axis.sigLimitsChanged.connect(listener)
axis.setLimits(20, 30)
# at least one event per axis
self.assertEqual(listener.arguments(callIndex=-1), (20.0, 30.0))
self.assertEqual(axis.getLimits(), (20.0, 30.0))
def testLimitsChanged_setYLimit(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
axis = self.plot.getYAxis()
axis.sigLimitsChanged.connect(listener)
axis.setLimits(20, 30)
# at least one event per axis
self.assertEqual(listener.arguments(callIndex=-1), (20.0, 30.0))
self.assertEqual(axis.getLimits(), (20.0, 30.0))
def testLimitsChanged_setYRightLimit(self):
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=False,
color='green', linestyle="-", symbol='o')
listener = SignalListener()
axis = self.plot.getYAxis(axis="right")
axis.sigLimitsChanged.connect(listener)
axis.setLimits(20, 30)
# at least one event per axis
self.assertEqual(listener.arguments(callIndex=-1), (20.0, 30.0))
self.assertEqual(axis.getLimits(), (20.0, 30.0))
def testScaleProxy(self):
listener = SignalListener()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
y.sigScaleChanged.connect(listener.partial("left"))
yright.sigScaleChanged.connect(listener.partial("right"))
yright.setScale(yright.LOGARITHMIC)
self.assertEqual(y.getScale(), y.LOGARITHMIC)
events = listener.arguments()
self.assertEqual(len(events), 2)
self.assertIn(("left", y.LOGARITHMIC), events)
self.assertIn(("right", y.LOGARITHMIC), events)
def testAutoScaleProxy(self):
listener = SignalListener()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
y.sigAutoScaleChanged.connect(listener.partial("left"))
yright.sigAutoScaleChanged.connect(listener.partial("right"))
yright.setAutoScale(False)
self.assertEqual(y.isAutoScale(), False)
events = listener.arguments()
self.assertEqual(len(events), 2)
self.assertIn(("left", False), events)
self.assertIn(("right", False), events)
def testInvertedProxy(self):
listener = SignalListener()
y = self.plot.getYAxis()
yright = self.plot.getYAxis(axis="right")
y.sigInvertedChanged.connect(listener.partial("left"))
yright.sigInvertedChanged.connect(listener.partial("right"))
yright.setInverted(True)
self.assertEqual(y.isInverted(), True)
events = listener.arguments()
self.assertEqual(len(events), 2)
self.assertIn(("left", True), events)
self.assertIn(("right", True), events)
def testAxesDisplayedFalse(self):
"""Test coverage on setAxesDisplayed(False)"""
self.plot.setAxesDisplayed(False)
def testAxesDisplayedTrue(self):
"""Test coverage on setAxesDisplayed(True)"""
self.plot.setAxesDisplayed(True)
class TestPlotCurveLog(PlotWidgetTestCase, ParametricTestCase):
"""Basic tests for addCurve with log scale axes"""
# Test data
xData = numpy.arange(1000) + 1
yData = xData ** 2
def _setLabels(self):
self.plot.getXAxis().setLabel('X')
self.plot.getYAxis().setLabel('X * X')
def testPlotCurveLogX(self):
self._setLabels()
self.plot.getXAxis()._setLogarithmic(True)
self.plot.setGraphTitle('Curve X: Log Y: Linear')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
def testPlotCurveLogY(self):
self._setLabels()
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('Curve X: Linear Y: Log')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
def testPlotCurveLogXY(self):
self._setLabels()
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('Curve X: Log Y: Log')
self.plot.addCurve(self.xData, self.yData,
legend="curve",
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
def testPlotCurveErrorLogXY(self):
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
# Every second error leads to negative number
errors = numpy.ones_like(self.xData)
errors[::2] = self.xData[::2] + 1
tests = [ # name, xerror, yerror
('xerror=3', 3, None),
('xerror=N array', errors, None),
('xerror=Nx1 array', errors.reshape(len(errors), 1), None),
('xerror=2xN array', numpy.array((errors, errors)), None),
('yerror=6', None, 6),
('yerror=N array', None, errors ** 2),
('yerror=Nx1 array', None, (errors ** 2).reshape(len(errors), 1)),
('yerror=2xN array', None, numpy.array((errors, errors)) ** 2),
]
for name, xError, yError in tests:
with self.subTest(name):
self.plot.setGraphTitle(name)
self.plot.addCurve(self.xData, self.yData,
legend=name,
xerror=xError, yerror=yError,
replace=False, resetzoom=True,
color='green', linestyle="-", symbol='o')
self.qapp.processEvents()
self.plot.clear()
self.plot.resetZoom()
self.qapp.processEvents()
def testPlotCurveToggleLog(self):
"""Add a curve with negative data and toggle log axis"""
arange = numpy.arange(1000) + 1
tests = [ # name, xData, yData
('x>0, some negative y', arange, arange - 500),
('x>0, y<0', arange, -arange),
('some negative x, y>0', arange - 500, arange),
('x<0, y>0', -arange, arange),
('some negative x and y', arange - 500, arange - 500),
('x<0, y<0', -arange, -arange),
]
for name, xData, yData in tests:
with self.subTest(name):
self.plot.addCurve(xData, yData, resetzoom=True)
self.qapp.processEvents()
# no log axis
xLim = self.plot.getXAxis().getLimits()
self.assertEqual(xLim, (min(xData), max(xData)))
yLim = self.plot.getYAxis().getLimits()
self.assertEqual(yLim, (min(yData), max(yData)))
# x axis log
self.plot.getXAxis()._setLogarithmic(True)
self.qapp.processEvents()
xLim = self.plot.getXAxis().getLimits()
yLim = self.plot.getYAxis().getLimits()
positives = xData > 0
if numpy.any(positives):
self.assertTrue(numpy.allclose(
xLim, (min(xData[positives]), max(xData[positives]))))
self.assertEqual(
yLim, (min(yData[positives]), max(yData[positives])))
else: # No positive x in the curve
self.assertEqual(xLim, (1., 100.))
self.assertEqual(yLim, (1., 100.))
# x axis and y axis log
self.plot.getYAxis()._setLogarithmic(True)
self.qapp.processEvents()
xLim = self.plot.getXAxis().getLimits()
yLim = self.plot.getYAxis().getLimits()
positives = numpy.logical_and(xData > 0, yData > 0)
if numpy.any(positives):
self.assertTrue(numpy.allclose(
xLim, (min(xData[positives]), max(xData[positives]))))
self.assertTrue(numpy.allclose(
yLim, (min(yData[positives]), max(yData[positives]))))
else: # No positive x and y in the curve
self.assertEqual(xLim, (1., 100.))
self.assertEqual(yLim, (1., 100.))
# y axis log
self.plot.getXAxis()._setLogarithmic(False)
self.qapp.processEvents()
xLim = self.plot.getXAxis().getLimits()
yLim = self.plot.getYAxis().getLimits()
positives = yData > 0
if numpy.any(positives):
self.assertEqual(
xLim, (min(xData[positives]), max(xData[positives])))
self.assertTrue(numpy.allclose(
yLim, (min(yData[positives]), max(yData[positives]))))
else: # No positive y in the curve
self.assertEqual(xLim, (1., 100.))
self.assertEqual(yLim, (1., 100.))
# no log axis
self.plot.getYAxis()._setLogarithmic(False)
self.qapp.processEvents()
xLim = self.plot.getXAxis().getLimits()
self.assertEqual(xLim, (min(xData), max(xData)))
yLim = self.plot.getYAxis().getLimits()
self.assertEqual(yLim, (min(yData), max(yData)))
self.plot.clear()
self.plot.resetZoom()
self.qapp.processEvents()
class TestPlotImageLog(PlotWidgetTestCase):
"""Basic tests for addImage with log scale axes."""
def setUp(self):
super(TestPlotImageLog, self).setUp()
self.plot.getXAxis().setLabel('Columns')
self.plot.getYAxis().setLabel('Rows')
def testPlotColormapGrayLogX(self):
self.plot.getXAxis()._setLogarithmic(True)
self.plot.setGraphTitle('CMap X: Log Y: Linear')
colormap = Colormap(name='gray',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1",
origin=(1., 1.), scale=(1., 1.),
resetzoom=False, colormap=colormap)
self.plot.resetZoom()
def testPlotColormapGrayLogY(self):
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('CMap X: Linear Y: Log')
colormap = Colormap(name='gray',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1",
origin=(1., 1.), scale=(1., 1.),
resetzoom=False, colormap=colormap)
self.plot.resetZoom()
def testPlotColormapGrayLogXY(self):
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('CMap X: Log Y: Log')
colormap = Colormap(name='gray',
normalization='linear',
vmin=None,
vmax=None)
self.plot.addImage(DATA_2D, legend="image 1",
origin=(1., 1.), scale=(1., 1.),
resetzoom=False, colormap=colormap)
self.plot.resetZoom()
def testPlotRgbRgbaLogXY(self):
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
self.plot.setGraphTitle('RGB + RGBA X: Log Y: Log')
rgb = numpy.array(
(((0, 0, 0), (128, 0, 0), (255, 0, 0)),
((0, 128, 0), (0, 128, 128), (0, 128, 256))),
dtype=numpy.uint8)
self.plot.addImage(rgb, legend="rgb",
origin=(1, 1), scale=(10, 10),
resetzoom=False)
rgba = numpy.array(
(((0, 0, 0, .5), (.5, 0, 0, 1), (1, 0, 0, .5)),
((0, .5, 0, 1), (0, .5, .5, 1), (0, 1, 1, .5))),
dtype=numpy.float32)
self.plot.addImage(rgba, legend="rgba",
origin=(5., 5.), scale=(10., 10.),
resetzoom=False)
self.plot.resetZoom()
class TestPlotMarkerLog(PlotWidgetTestCase):
"""Basic tests for markers on log scales"""
# Test marker parameters
markers = [ # x, y, color, selectable, draggable
(10., 10., 'blue', False, False),
(20., 20., 'red', False, False),
(40., 100., 'green', True, False),
(40., 500., 'gray', True, True),
(60., 800., 'black', False, True),
]
def setUp(self):
super(TestPlotMarkerLog, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.getXAxis().setAutoScale(False)
self.plot.getYAxis().setAutoScale(False)
self.plot.setKeepDataAspectRatio(False)
self.plot.setLimits(1., 100., 1., 1000.)
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
def testPlotMarkerXLog(self):
self.plot.setGraphTitle('Markers X, Log axes')
for x, _, color, select, drag in self.markers:
name = str(x)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addXMarker(x, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerYLog(self):
self.plot.setGraphTitle('Markers Y, Log axes')
for _, y, color, select, drag in self.markers:
name = str(y)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addYMarker(y, name, name, color, select, drag)
self.plot.resetZoom()
def testPlotMarkerPtLog(self):
self.plot.setGraphTitle('Markers Pt, Log axes')
for x, y, color, select, drag in self.markers:
name = "{0},{1}".format(x, y)
if select:
name += " sel."
if drag:
name += " drag"
self.plot.addMarker(x, y, name, name, color, select, drag)
self.plot.resetZoom()
class TestPlotItemLog(PlotWidgetTestCase):
"""Basic tests for items with log scale axes"""
# Polygon coordinates and color
polygons = [ # legend, x coords, y coords, color
('triangle', numpy.array((10, 30, 50)),
numpy.array((55, 70, 55)), 'red'),
('square', numpy.array((10, 10, 50, 50)),
numpy.array((10, 50, 50, 10)), 'green'),
('star', numpy.array((60, 70, 80, 60, 80)),
numpy.array((25, 50, 25, 40, 40)), 'blue'),
]
# Rectangle coordinantes and color
rectangles = [ # legend, x coords, y coords, color
('square 1', numpy.array((1., 10.)),
numpy.array((1., 10.)), 'red'),
('square 2', numpy.array((10., 20.)),
numpy.array((10., 20.)), 'green'),
('square 3', numpy.array((20., 30.)),
numpy.array((20., 30.)), 'blue'),
('rect 1', numpy.array((1., 30.)),
numpy.array((35., 40.)), 'black'),
('line h', numpy.array((1., 30.)),
numpy.array((45., 45.)), 'darkRed'),
]
def setUp(self):
super(TestPlotItemLog, self).setUp()
self.plot.getYAxis().setLabel('Rows')
self.plot.getXAxis().setLabel('Columns')
self.plot.getXAxis().setAutoScale(False)
self.plot.getYAxis().setAutoScale(False)
self.plot.setKeepDataAspectRatio(False)
self.plot.setLimits(1., 100., 1., 100.)
self.plot.getXAxis()._setLogarithmic(True)
self.plot.getYAxis()._setLogarithmic(True)
def testPlotItemPolygonLogFill(self):
self.plot.setGraphTitle('Item Fill Log')
for legend, xList, yList, color in self.polygons:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="polygon", fill=True, color=color)
self.plot.resetZoom()
def testPlotItemPolygonLogNoFill(self):
self.plot.setGraphTitle('Item No Fill Log')
for legend, xList, yList, color in self.polygons:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="polygon", fill=False, color=color)
self.plot.resetZoom()
def testPlotItemRectangleLogFill(self):
self.plot.setGraphTitle('Rectangle Fill Log')
for legend, xList, yList, color in self.rectangles:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="rectangle", fill=True, color=color)
self.plot.resetZoom()
def testPlotItemRectangleLogNoFill(self):
self.plot.setGraphTitle('Rectangle No Fill Log')
for legend, xList, yList, color in self.rectangles:
self.plot.addItem(xList, yList, legend=legend,
replace=False,
shape="rectangle", fill=False, color=color)
self.plot.resetZoom()
def suite():
testClasses = (TestPlotWidget,
TestPlotImage,
TestPlotCurve,
TestPlotScatter,
TestPlotMarker,
TestPlotItem,
TestPlotAxes,
TestPlotActiveCurveImage,
TestPlotEmptyLog,
TestPlotCurveLog,
TestPlotImageLog,
TestPlotMarkerLog,
TestPlotItemLog)
test_suite = unittest.TestSuite()
# Tests with matplotlib
for testClass in testClasses:
test_suite.addTest(parameterize(testClass, backend=None))
test_suite.addTest(parameterize(TestSpecialBackend, backend=u"mpl"))
if sys.version_info[0] == 2:
test_suite.addTest(parameterize(TestSpecialBackend, backend=b"mpl"))
if test_options.WITH_GL_TEST:
# Tests with OpenGL backend
for testClass in testClasses:
test_suite.addTest(parameterize(testClass, backend='gl'))
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
py | 1a4f079259f3e66f00dd42d2e9e57d043f853cc3 | class GalleryInfo:
"""Meta-data about a gallery."""
def __init__(self, meta_data: dict):
self.meta_data = meta_data
@property
def title(self) -> str:
if "title" in self.meta_data:
return self.meta_data["title"]
|
py | 1a4f0876e2404ab4eb5abae22c4c2ec27f4f0d12 | # Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for specifying BSON codec options."""
import datetime
from abc import abstractmethod
from collections import namedtuple
from bson.py3compat import ABC, abc, abstractproperty, string_type
from bson.binary import (ALL_UUID_REPRESENTATIONS,
PYTHON_LEGACY,
UUID_REPRESENTATION_NAMES)
_RAW_BSON_DOCUMENT_MARKER = 101
def _raw_document_class(document_class):
"""Determine if a document_class is a RawBSONDocument class."""
marker = getattr(document_class, '_type_marker', None)
return marker == _RAW_BSON_DOCUMENT_MARKER
class TypeEncoder(ABC):
"""Base class for defining type codec classes which describe how a
custom type can be transformed to one of the types BSON understands.
Codec classes must implement the ``python_type`` attribute, and the
``transform_python`` method to support encoding.
"""
@abstractproperty
def python_type(self):
"""The Python type to be converted into something serializable."""
pass
@abstractmethod
def transform_python(self, value):
"""Convert the given Python object into something serializable."""
pass
class TypeDecoder(ABC):
"""Base class for defining type codec classes which describe how a
BSON type can be transformed to a custom type.
Codec classes must implement the ``bson_type`` attribute, and the
``transform_bson`` method to support decoding.
"""
@abstractproperty
def bson_type(self):
"""The BSON type to be converted into our own type."""
pass
@abstractmethod
def transform_bson(self, value):
"""Convert the given BSON value into our own type."""
pass
class TypeCodec(TypeEncoder, TypeDecoder):
"""Base class for defining type codec classes which describe how a
custom type can be transformed to/from one of the types BSON already
understands, and can encode/decode.
Codec classes must implement the ``python_type`` attribute, and the
``transform_python`` method to support encoding, as well as the
``bson_type`` attribute, and the ``transform_bson`` method to support
decoding.
"""
pass
class TypeRegistry(object):
"""Encapsulates type codecs used in encoding and / or decoding BSON, as
well as the fallback encoder. Type registries cannot be modified after
instantiation.
``TypeRegistry`` can be initialized with an iterable of type codecs, and
a callable for the fallback encoder::
>>> from bson.codec_options import TypeRegistry
>>> type_registry = TypeRegistry([Codec1, Codec2, Codec3, ...],
... fallback_encoder)
:Parameters:
- `type_codecs` (optional): iterable of type codec instances. If
``type_codecs`` contains multiple codecs that transform a single
python or BSON type, the transformation specified by the type codec
occurring last prevails.
- `fallback_encoder` (optional): callable that accepts a single,
unencodable python value and transforms it into a type that BSON can
encode.
"""
def __init__(self, type_codecs=None, fallback_encoder=None):
self.__type_codecs = list(type_codecs or [])
self._fallback_encoder = fallback_encoder
self._encoder_map = {}
self._decoder_map = {}
if self._fallback_encoder is not None:
if not callable(fallback_encoder):
raise TypeError("fallback_encoder %r is not a callable" % (
fallback_encoder))
for codec in self.__type_codecs:
is_valid_codec = False
if isinstance(codec, TypeEncoder):
self._validate_type_encoder(codec)
is_valid_codec = True
self._encoder_map[codec.python_type] = codec.transform_python
if isinstance(codec, TypeDecoder):
is_valid_codec = True
self._decoder_map[codec.bson_type] = codec.transform_bson
if not is_valid_codec:
raise TypeError(
"Expected an instance of %s, %s, or %s, got %r instead" % (
TypeEncoder.__name__, TypeDecoder.__name__,
TypeCodec.__name__, codec))
def _validate_type_encoder(self, codec):
from bson import _BUILT_IN_TYPES
for pytype in _BUILT_IN_TYPES:
if issubclass(codec.python_type, pytype):
err_msg = ("TypeEncoders cannot change how built-in types are "
"encoded (encoder %s transforms type %s)" %
(codec, pytype))
raise TypeError(err_msg)
def __repr__(self):
return ('%s(type_codecs=%r, fallback_encoder=%r)' % (
self.__class__.__name__, self.__type_codecs,
self._fallback_encoder))
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return ((self._decoder_map == other._decoder_map) and
(self._encoder_map == other._encoder_map) and
(self._fallback_encoder == other._fallback_encoder))
_options_base = namedtuple(
'CodecOptions',
('document_class', 'tz_aware', 'uuid_representation',
'unicode_decode_error_handler', 'tzinfo', 'type_registry'))
class CodecOptions(_options_base):
"""Encapsulates options used encoding and / or decoding BSON.
The `document_class` option is used to define a custom type for use
decoding BSON documents. Access to the underlying raw BSON bytes for
a document is available using the :class:`~bson.raw_bson.RawBSONDocument`
type::
>>> from bson.raw_bson import RawBSONDocument
>>> from bson.codec_options import CodecOptions
>>> codec_options = CodecOptions(document_class=RawBSONDocument)
>>> coll = db.get_collection('test', codec_options=codec_options)
>>> doc = coll.find_one()
>>> doc.raw
'\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00'
The document class can be any type that inherits from
:class:`~collections.MutableMapping`::
>>> class AttributeDict(dict):
... # A dict that supports attribute access.
... def __getattr__(self, key):
... return self[key]
... def __setattr__(self, key, value):
... self[key] = value
...
>>> codec_options = CodecOptions(document_class=AttributeDict)
>>> coll = db.get_collection('test', codec_options=codec_options)
>>> doc = coll.find_one()
>>> doc._id
ObjectId('5b3016359110ea14e8c58b93')
See :doc:`/examples/datetimes` for examples using the `tz_aware` and
`tzinfo` options.
See :class:`~bson.binary.UUIDLegacy` for examples using the
`uuid_representation` option.
:Parameters:
- `document_class`: BSON documents returned in queries will be decoded
to an instance of this class. Must be a subclass of
:class:`~collections.MutableMapping`. Defaults to :class:`dict`.
- `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone
aware instances of :class:`~datetime.datetime`. Otherwise they will be
naive. Defaults to ``False``.
- `uuid_representation`: The BSON representation to use when encoding
and decoding instances of :class:`~uuid.UUID`. Defaults to
:data:`~bson.binary.PYTHON_LEGACY`.
- `unicode_decode_error_handler`: The error handler to apply when
a Unicode-related error occurs during BSON decoding that would
otherwise raise :exc:`UnicodeDecodeError`. Valid options include
'strict', 'replace', and 'ignore'. Defaults to 'strict'.
- `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the
timezone to/from which :class:`~datetime.datetime` objects should be
encoded/decoded.
- `type_registry`: Instance of :class:`TypeRegistry` used to customize
encoding and decoding behavior.
.. warning:: Care must be taken when changing
`unicode_decode_error_handler` from its default value ('strict').
The 'replace' and 'ignore' modes should not be used when documents
retrieved from the server will be modified in the client application
and stored back to the server.
"""
def __new__(cls, document_class=dict,
tz_aware=False, uuid_representation=PYTHON_LEGACY,
unicode_decode_error_handler="strict",
tzinfo=None, type_registry=None):
if not (issubclass(document_class, abc.MutableMapping) or
_raw_document_class(document_class)):
raise TypeError("document_class must be dict, bson.son.SON, "
"bson.raw_bson.RawBSONDocument, or a "
"sublass of collections.MutableMapping")
if not isinstance(tz_aware, bool):
raise TypeError("tz_aware must be True or False")
if uuid_representation not in ALL_UUID_REPRESENTATIONS:
raise ValueError("uuid_representation must be a value "
"from bson.binary.ALL_UUID_REPRESENTATIONS")
if not isinstance(unicode_decode_error_handler, (string_type, None)):
raise ValueError("unicode_decode_error_handler must be a string "
"or None")
if tzinfo is not None:
if not isinstance(tzinfo, datetime.tzinfo):
raise TypeError(
"tzinfo must be an instance of datetime.tzinfo")
if not tz_aware:
raise ValueError(
"cannot specify tzinfo without also setting tz_aware=True")
type_registry = type_registry or TypeRegistry()
if not isinstance(type_registry, TypeRegistry):
raise TypeError("type_registry must be an instance of TypeRegistry")
return tuple.__new__(
cls, (document_class, tz_aware, uuid_representation,
unicode_decode_error_handler, tzinfo, type_registry))
def _arguments_repr(self):
"""Representation of the arguments used to create this object."""
document_class_repr = (
'dict' if self.document_class is dict
else repr(self.document_class))
uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation,
self.uuid_representation)
return ('document_class=%s, tz_aware=%r, uuid_representation=%s, '
'unicode_decode_error_handler=%r, tzinfo=%r, '
'type_registry=%r' %
(document_class_repr, self.tz_aware, uuid_rep_repr,
self.unicode_decode_error_handler, self.tzinfo,
self.type_registry))
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._arguments_repr())
def with_options(self, **kwargs):
"""Make a copy of this CodecOptions, overriding some options::
>>> from bson.codec_options import DEFAULT_CODEC_OPTIONS
>>> DEFAULT_CODEC_OPTIONS.tz_aware
False
>>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True)
>>> options.tz_aware
True
.. versionadded:: 3.5
"""
return CodecOptions(
kwargs.get('document_class', self.document_class),
kwargs.get('tz_aware', self.tz_aware),
kwargs.get('uuid_representation', self.uuid_representation),
kwargs.get('unicode_decode_error_handler',
self.unicode_decode_error_handler),
kwargs.get('tzinfo', self.tzinfo),
kwargs.get('type_registry', self.type_registry)
)
DEFAULT_CODEC_OPTIONS = CodecOptions()
def _parse_codec_options(options):
"""Parse BSON codec options."""
return CodecOptions(
document_class=options.get(
'document_class', DEFAULT_CODEC_OPTIONS.document_class),
tz_aware=options.get(
'tz_aware', DEFAULT_CODEC_OPTIONS.tz_aware),
uuid_representation=options.get(
'uuidrepresentation', DEFAULT_CODEC_OPTIONS.uuid_representation),
unicode_decode_error_handler=options.get(
'unicode_decode_error_handler',
DEFAULT_CODEC_OPTIONS.unicode_decode_error_handler),
tzinfo=options.get('tzinfo', DEFAULT_CODEC_OPTIONS.tzinfo),
type_registry=options.get(
'type_registry', DEFAULT_CODEC_OPTIONS.type_registry))
|
py | 1a4f09ade43da4170af6b60198e5864a4f6dd52f | # Generated by Django 3.0.4 on 2020-05-07 15:48
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0013_auto_20200507_1846'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug',
field=autoslug.fields.AutoSlugField(blank=True, editable=False, populate_from='q'),
),
]
|
py | 1a4f09fb2d56c051dccbda200895b5ca87452e9d | from flask import url_for
def test_ping(client):
resp = client.get(url_for('main.ping'))
assert resp.status_code == 200
resp = resp.json
assert resp == {
'addition': {'msg': "it's alive!"},
'description': {},
'result': True,
'status': 200,
}
|
py | 1a4f0a3de5a0a6f87bdfe7c03f5deda0052f2907 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ardour2fxp.py
#
"""Convert one or more Ardour VST presets XML file to VST2 FXP preset files."""
import argparse
import os
import sys
from base64 import b64decode
from collections import namedtuple
from os.path import exists, isdir, join
from struct import calcsize, pack
from xml.etree import ElementTree as ET
FXP_HEADER_FMT = '>4si4s4i28s'
FXP_PREAMBEL_SIZE = calcsize('>4si')
FXP_HEADER_SIZE = calcsize(FXP_HEADER_FMT)
FXP_FORMAT_VERSION = 1
CHUNK_MAGIC = b'CcnK'
FX_MAGIC_PARAMS = b'FxCk'
FX_MAGIC_CHUNK = b'FPCh'
FX_DEFAULT_VERSION = 1
PRESET_BASE_FIELDS = (
'plugin_id',
'plugin_version',
'hash',
'label',
'num_params',
)
ChunkPreset = namedtuple('ChunkPreset', PRESET_BASE_FIELDS + ('chunk',))
Preset = namedtuple('Preset', PRESET_BASE_FIELDS + ('params',))
def label2fn(label):
"""Replace characters in label unsuitable for filenames with underscore."""
return label.strip().replace(' ', '_')
def parse_ardourpresets(root):
"""Parse ardour VST presets XML document.
Returns list of Preset or ChunkPreset instances.
"""
if root.tag != 'VSTPresets':
raise ValueError("Root node must be 'VSTPresets'.")
presets = []
for preset in root:
if preset.tag not in ('Preset', 'ChunkPreset'):
print("Invalid preset type: {}".format(preset.tag))
continue
try:
type, plugin_id, hash = preset.attrib['uri'].split(':', 2)
plugin_id = int(plugin_id)
version = preset.attrib.get('version')
num_params = preset.attrib.get('numParams')
label = preset.attrib['label']
if version is not None:
version = int(version)
if num_params is not None:
num_params = int(num_params)
if type != "VST":
raise ValueError
except (KeyError, ValueError):
print("Invalid preset format: {}".format(preset.attrib))
continue
if preset.tag == 'Preset':
params = {int(param.attrib['index']): param.attrib['value']
for param in preset}
params = [float(value) for _, value in sorted(params.items())]
presets.append(Preset(plugin_id, version, hash, label, num_params,
params))
elif preset.tag == 'ChunkPreset':
presets.append(ChunkPreset(plugin_id, version, hash, label,
num_params, b64decode(preset.text)))
return presets
def main(args=None):
argparser = argparse.ArgumentParser()
argparser.add_argument('-v', '--fx-version', type=int,
help="VST plugin version number")
argparser.add_argument('-f', '--force', action="store_true",
help="Overwrite existing destination file(s)")
argparser.add_argument('-o', '--output-dir',
help="Ardour presets output directory")
argparser.add_argument('infiles', nargs='*', metavar='XML',
help="Ardour VST presets XML (input) file(s)")
args = argparser.parse_args(args)
output_dir = args.output_dir or os.getcwd()
if not args.infiles:
argparser.print_help()
return 2
for infile in args.infiles:
try:
root_node = ET.parse(infile).getroot()
presets = parse_ardourpresets(root_node)
except Exception as exc:
return "Error reading Ardour preset file '{}': {}".format(
infile, exc)
if not presets:
return "No valid presets found in input file(s)."
for preset in presets:
plugin_id = pack('>I', preset.plugin_id).decode('ascii')
dstdir = join(output_dir, plugin_id)
if not isdir(dstdir):
os.makedirs(dstdir)
fxp_fn = join(dstdir, label2fn(preset.label)) + '.fxp'
if exists(fxp_fn) and not args.force:
print("FXP output file '{}' already exists. Skipping".format(
fxp_fn))
continue
with open(fxp_fn, 'wb') as fp:
if args.fx_version is not None:
fx_version = args.fx_version
elif preset.plugin_version is not None:
fx_version = preset.plugin_version
else:
fx_version = FX_DEFAULT_VERSION
if isinstance(preset, Preset):
if preset.num_params is None:
num_params = len(preset.params)
else:
num_params = preset.num_params
params_fmt = '>{:d}f'.format(num_params)
size = (FXP_HEADER_SIZE - FXP_PREAMBEL_SIZE +
calcsize(params_fmt))
fx_magic = FX_MAGIC_PARAMS
elif isinstance(preset, ChunkPreset):
if preset.num_params is None:
num_params = int(len(preset.chunk) / 4)
else:
num_params = preset.num_params
chunk_len = len(preset.chunk)
chunk_size = pack('>i', chunk_len)
size = (FXP_HEADER_SIZE - FXP_PREAMBEL_SIZE +
len(chunk_size) + chunk_len)
fx_magic = FX_MAGIC_CHUNK
else:
raise TypeError("Wrong preset type: {!r}".format(preset))
header = pack(
FXP_HEADER_FMT,
CHUNK_MAGIC,
size,
fx_magic,
FXP_FORMAT_VERSION,
preset.plugin_id,
fx_version,
num_params,
preset.label.encode('latin1', errors='replace')
)
fp.write(header)
if isinstance(preset, Preset):
data = pack(params_fmt, *preset.params)
fp.write(data)
elif isinstance(preset, ChunkPreset):
fp.write(chunk_size)
fp.write(preset.chunk)
if __name__ == '__main__':
sys.exit(main() or 0)
|
py | 1a4f0ab1cb6b0b498d29c0fcb7875eefd0a6eef6 | l = int(input())
count = 0
while l >= 2:
l = l/2
count += 2
print(2**count) |
py | 1a4f0ac700f38c8788cf5b18a50ac9e3c1156e4d | try:
import _thread
except ModuleNotFoundError:
_thread = None
class Producer:
"""
Uses a list instead of a set to ensure correct ordering of subscriptions.
Does not allow lambda functions to be used.
:params name: name of producer
:params validation: a function which will accept arguments passed into emit and check values / types raising a ValueError if incorrect type
:params as_threads: option to run handlers as threads
:raises NotImplementedError if micro-python version does not implement _thread and as_threads keyword set to True.
"""
def __init__(self, *args, name=None, validation=None, as_threads=False):
if as_threads and not _thread:
raise NotImplementedError(
'threading is not available in this distribution')
self.__handlers = []
self.__name = name
self.__validation = validation
self.__as_threads = as_threads
# private methods
def _add_handler(self, handler_func):
if handler_func in self.__handlers:
raise ValueError('handler is already subscribed.')
self.__handlers.append(handler_func)
return self
def _remove_handler(self, handler_func):
if not handler_func in self.__handlers:
raise ValueError('handler is not subscribed to producer')
self.__handlers.remove(handler_func)
return self
# public methods
def subscribe(self, handler_func):
"""
Subscribe a function as a callback to the producer.
:params handler_func: a callback function that will be invoked
when a value is sent to the emit method. Function cannot be a lambda.
:raises ValueError if handler is a lambda or already subscribed.
"""
if handler_func.__name__ == '<lambda>':
raise ValueError('handler cannot be a lambda function')
return self._add_handler(handler_func)
def unsubscribe(self, handler_func):
"""
Unsubscribe a callback from the producer.
:raises ValueError if handler is not already subscribed.
"""
return self._remove_handler(handler_func)
def emit(self, *args, **kwargs):
"""
Send arguments and keyword arguments to subscribed functions.
Arguments are first passed through the validation function and then
passed sequentially to each subscribed callback.
If as_threads is set to True callbacks are started as separate threads.
"""
if self.__validation:
self.__validation(*args, **kwargs)
for handler in self.__handlers:
if self.__as_threads and _thread:
_thread.start_new_thread(handler, args, kwargs)
else:
handler(*args, **kwargs)
# datamodel methods
def __repr__(self):
return "Producer(%s)" % self.__name
def __len__(self):
return len(self.__handlers)
__call__ = emit
__iadd__ = subscribe
__isub__ = unsubscribe
|
py | 1a4f0b201a9e8589a7ba4284f5026c1e21d45f19 | # From https://github.com/taki0112/ResNet-Tensorflow.
import tensorflow as tf
import tensorflow.contrib as tf_contrib
weight_init = tf_contrib.layers.variance_scaling_initializer()
weight_regularizer = tf_contrib.layers.l2_regularizer(0.0001)
def conv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, scope='conv_0'):
with tf.variable_scope(scope):
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias, padding=padding)
return x
def fully_conneted(x, units, use_bias=True, scope='fully_0'):
with tf.variable_scope(scope):
x = flatten(x)
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x
def resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='resblock'):
with tf.variable_scope(scope):
x = batch_norm(x_init, is_training, scope='batch_norm_0')
x = relu(x)
if downsample:
x = conv(x, channels, kernel=3, stride=2,
use_bias=use_bias, scope='conv_0')
x_init = conv(x_init, channels, kernel=1, stride=2,
use_bias=use_bias, scope='conv_init')
else:
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_0')
x = batch_norm(x, is_training, scope='batch_norm_1')
x = relu(x)
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_1')
return x + x_init
def bottle_resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='bottle_resblock'):
with tf.variable_scope(scope):
x = batch_norm(x_init, is_training, scope='batch_norm_1x1_front')
shortcut = relu(x)
x = conv(shortcut, channels, kernel=1, stride=1,
use_bias=use_bias, scope='conv_1x1_front')
x = batch_norm(x, is_training, scope='batch_norm_3x3')
x = relu(x)
if downsample:
x = conv(x, channels, kernel=3, stride=2,
use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels*4, kernel=1,
stride=2, use_bias=use_bias, scope='conv_init')
else:
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels * 4, kernel=1,
stride=1, use_bias=use_bias, scope='conv_init')
x = batch_norm(x, is_training, scope='batch_norm_1x1_back')
x = relu(x)
x = conv(x, channels*4, kernel=1, stride=1,
use_bias=use_bias, scope='conv_1x1_back')
return x + shortcut
def get_residual_layer(res_n):
x = []
if res_n == 18:
x = [2, 2, 2, 2]
if res_n == 34:
x = [3, 4, 6, 3]
if res_n == 50:
x = [3, 4, 6, 3]
if res_n == 101:
x = [3, 4, 23, 3]
if res_n == 152:
x = [3, 8, 36, 3]
return x
def flatten(x):
return tf.layers.flatten(x)
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
return gap
def avg_pooling(x):
return tf.layers.average_pooling2d(x, pool_size=2, strides=2, padding='SAME')
def relu(x):
return tf.nn.relu(x)
def batch_norm(x, is_training=True, scope='batch_norm'):
return tf_contrib.layers.batch_norm(x,
decay=0.9, epsilon=1e-05,
center=True, scale=True, updates_collections=None,
is_training=is_training, scope=scope)
def classification_loss(logit, label):
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logit))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
def classification_loss_weighted(logit, label):
loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(
targets=label, logits=logit, pos_weight=2))
# cost1 = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=y, logits=pred,pos_weight=1))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
|
py | 1a4f0b2d07727f3ba621953308bca08b39a3469a | import copy
import numpy
import logging
from six.moves import xrange
import theano
from theano import tensor, scalar, gof
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB,
SequenceDB, Optimizer, toolbox)
from theano.gof.optdb import LocalGroupDB
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet.conv import ConvOp
from theano.tests.breakpoint import PdbBreakpoint
from .type import GpuArrayType, GpuArrayConstant, get_context
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemv, GpuGemm, GpuGer,
gpugemm_no_inplace)
from .conv import GpuConv
from .nnet import (GpuCrossentropySoftmaxArgmax1HotWithBias,
GpuCrossentropySoftmax1HotWithBiasDx,
GpuSoftmaxWithBias, GpuSoftmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)
from .opt_util import alpha_merge, output_merge
_logger = logging.getLogger("theano.sandbox.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
gpu_seqopt = SequenceDB()
# Don't register this right now
conv_groupopt = LocalGroupDB()
conv_groupopt.__name__ = "gpua_conv_opts"
gpu_seqopt.register('gpuarray_local_optimiziations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'inplace', 'gpuarray')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return host_from_gpu(x)
else:
return x
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if i.owner and i.owner.op == host_from_gpu:
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != 'cuda')):
return False
new_op = maker(node, context_name)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
# tag the inputs with the context in case
# the context was derived from the outputs
def tag(i, ctx):
i.tag.context_name = ctx
return i
inputs = [tag(i, context_name) for i in node.inputs]
return [safe_to_cpu(o) for o in
new_op(*inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [safe_to_cpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [host_from_gpu(new_op)]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input to the gpu to start the rolling wave.
"""
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
if (len(input.clients) == 1 and
(input.clients[0][0] == 'output' or
isinstance(input.clients[0][0].op, GpuFromHost))):
continue
ctx_name = getattr(input.tag, 'context_name', None)
try:
new_input = host_from_gpu(GpuFromHost(ctx_name)(input))
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ValueError:
# If there is no context tag and no default context
# then it stays on the CPU
if not hasattr(input.tag, 'context_name'):
raise
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
@local_optimizer([GpuFromHost, GpuToGpu, host_from_gpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
node.inputs[0].owner.op == host_from_gpu):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (node.op == host_from_gpu and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [host_from_gpu(n2.inputs[0])]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [GpuFromHost(node.op.context_name)(n2.inputs[0])]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'inplace', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpuaalloc2(node):
"""
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
try:
get_context(None)
except ValueError:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
c.op == tensor.join and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [host_from_gpu(GpuAlloc(None)(*node.inputs))]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
def local_gpuaalloc(node, context_name):
return GpuAlloc(context_name)(*node.inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(numpy.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
return [new_op(*node.inputs)]
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
def local_gpureshape(node, context_name):
op = node.op
name = op.name
if name:
name = 'Gpu' + name
res = GpuReshape(op.ndim, op.name)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
def local_gpu_rebroadcast(node, context_name):
if isinstance(node.inputs[0].owner.op, HostFromGpu):
return node.op(node.inputs[0].owner.inputs[0])
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
def local_gpuflatten(node, context_name):
op = node.op
shp = []
if op.outdim != 1:
shp = [node.inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim, None)
o = res(node.inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
def local_gpu_elemwise(node, context_name):
op = node.op
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(node.outputs) > 1:
return
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
# If the elemwise operation is a pow, casts might be required on the
# inputs and or outputs because only the (float, float)->float and
# (double, double)->double cases are implemented at the moment.
if isinstance(op.scalar_op, Pow):
# Only transfer the computation on the gpu if the output dtype is
# floating point. Else, give up on the transfer to the gpu.
out_dtype = node.outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
# Transfer the inputs on the GPU and cast them to the right dtype.
new_inputs = []
for inp in node.inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp)))
else:
new_inputs.append(as_gpuarray_variable(inp))
# Perform the exponent on the gpu and transfer the output back to the
# cpu.
gpu_output = res(*new_inputs)
cpu_output = host_from_gpu(gpu_output)
return [cpu_output]
else:
return res
def max_inputs_to_GpuElemwise(node):
ptr_size = 8
int_size = 4
# we take the limit from CUDA for now
argument_limit = 232
ndim = node.inputs[0].type.ndim
# number of elements and shape
size_param_mandatory = (int_size * (ndim + 1)) + \
(ptr_size + int_size * ndim) * len(node.outputs)
nb_bytes_avail = argument_limit - size_param_mandatory
nb_bytes_per_input = ptr_size + ndim * int_size
max_nb_inputs = nb_bytes_avail // nb_bytes_per_input
return max_nb_inputs
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 71.00,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.inplace_elemwise_optimizer_op(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
def local_gpua_dimshuffle(node, context_name):
return GpuDimShuffle(node.op.input_broadcastable,
node.op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
def local_gpua_specifyShape(node, context_name):
if isinstance(node.inputs[0].type, GpuArrayType):
return
inp = [GpuFromHost(context_name)(node.inputs[0])] + node.inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(node, context_name):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(node.inputs[0].type, GpuArrayType):
return
return [GpuFromHost(context_name)(node.inputs[0]).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, numpy.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
def local_gpu_print_op(node, context_name):
x, = node.inputs
gpu_x, = x.owner.inputs
new_op = node.op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = node.op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transfering on GPU those
# for which the input comes from the GPU or the output will be
# transfered on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transfered to the gpu
new_inputs.append(GpuFromHost(context_name)(inp))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transfered to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(host_from_gpu(new_op_outputs[i]))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([tensor.Join])
def local_gpua_join(node, context_name):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpuajoin_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
def local_gpua_split(node, context_name):
return GpuSplit(node.op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(node, context_name):
x = node.inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in node.outputs[0].clients]):
return
else:
return [host_from_gpu(gpu_x.owner.op(node.outputs[0]))]
return GpuSubtensor(node.op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
def local_gpua_incsubtensor(node, context_name):
return GpuIncSubtensor(node.op.idx_list, node.op.inplace,
node.op.set_instead_of_inc,
node.op.destroyhandler_tolerate_aliased)
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
def local_gpua_advanced_subtensor(node, context_name):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
def local_gpua_advanced_incsubtensor(node, context_name):
# This is disabled on non-cuda contexts
if get_context(context_name).kind != 'cuda':
return None
x, y, ilist = node.inputs
# Gpu Ops needs both inputs to have the same dtype
if (x.type.dtype != y.type.dtype):
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if x.type.dtype != dtype:
x = tensor.cast(x, dtype)
if y.type.dtype != dtype:
y = tensor.cast(y, dtype)
set_instead_of_inc = node.op.set_instead_of_inc
active_device_no = theano.sandbox.cuda.active_device_number()
device_properties = theano.sandbox.cuda.device_properties
compute_capability = device_properties(active_device_no)['major']
if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
return [GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
else:
return [GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
def local_gpua_careduce(node, context_name):
if isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == 'opencl':
op = GpuCAReduceCPY
if node.op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == 'cuda':
op = GpuCAReduceCuda
else:
return False
x, = node.inputs
greduce = op(
node.op.scalar_op, axis=node.op.axis,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([GpuFromHost(context_name)(x)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if node.op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in node.op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
shape_of = node.fgraph.shape_feature.shape_of
x_shape = shape_of[x]
new_in_shp = [x_shape[0]]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= x_shape[i]
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(x_shape[i])
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op(
node.op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = GpuFromHost(context_name)(reshaped_x)
gvar = greduce(gpu_reshaped_x)
# We need to have the make node called, otherwise the mask can
# be None
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = host_from_gpu(
greduce(gpu_reshaped_x))
if reduce_reshaped_x.ndim != node.outputs[0].ndim:
unreshaped_reduce = reduce_reshaped_x.reshape(
tensor.stack(shape_of[node.outputs[0]]))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
def local_gpua_gemv(node, context_name):
return GpuGemv(inplace=node.op.inplace)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
def local_gpua_gemm(node, context_name):
return GpuGemm(inplace=node.op.inplace)
@register_opt('fast_compile')
@op_lifter([tensor.basic.Dot])
def local_gpua_hgemm(node, context_name):
from theano.sandbox.cuda import nvcc_compiler
if nvcc_compiler.nvcc_version < '7.5':
_logger.warning("Not performing dot of float16 on the GPU since "
"cuda 7.5 is not available. Updating could speed up "
"your code.")
return
A = node.inputs[0]
B = node.inputs[1]
if (A.ndim == 2 and B.ndim == 2 and
A.dtype == 'float16' and B.dtype == 'float16'):
fgraph = node.inputs[0].fgraph
C = GpuAllocEmpty(dtype='float16', context_name=context_name)(
shape_i(A, 0, fgraph),
shape_i(B, 1, fgraph))
return gpugemm_no_inplace(C, 1.0, A, B, 0.0)
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpuagemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpuagemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
def local_gpua_ger(node, context_name):
return GpuGer(inplace=node.op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
def local_gpua_dot22(node, context_name):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
def local_gpua_eye(node, context_name):
return GpuEye(dtype=node.op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], cuda_only=True)
def local_gpua_crossentropysoftmaxargmax1hotwithbias(node, context_name):
return GpuCrossentropySoftmaxArgmax1HotWithBias()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], cuda_only=True)
def local_gpua_crossentropysoftmax1hotwithbiasdx(node, context_name):
return GpuCrossentropySoftmax1HotWithBiasDx()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax], cuda_only=True)
def local_gpua_softmax(node, context_name):
return GpuSoftmax()
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias], cuda_only=True)
def local_gpua_softmaxwithbias(node, context_name):
return GpuSoftmaxWithBias()
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_assert(node, context_name):
if (node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
return [host_from_gpu(node.op(node.inputs[0].owner.inputs[0],
*node.inputs[1:]))]
@register_opt('fast_compile')
@op_lifter([ConvOp])
def local_gpu_conv(node, context_name):
def GpuConvOp_from_ConvOp(op):
logical_img_hw = None
if op.kshp_logical is not None and op.kshp_logical != op.kshp:
return None
ret = GpuConv(border_mode=op.out_mode,
subsample=(op.dx, op.dy),
logical_img_hw=logical_img_hw,
logical_kern_hw=op.kshp_logical,
logical_kern_align_top=op.kshp_logical_top_aligned,
kshp=op.kshp,
version=op.version,
direction_hint=op.direction_hint,
verbose=op.verbose,
imshp=op.imshp,
nkern=op.nkern,
bsize=op.bsize,
fft_opt=op.fft_opt)
if op.imshp_logical is not None:
logical_img_hw = op.imshp_logical[1:3]
if logical_img_hw != op.imshp[1:3]:
rstride = int(numpy.ceil(op.imshp_logical[1] /
float(op.imshp[1])))
cstride = int(numpy.ceil(op.imshp_logical[2] /
float(op.imshp[2])))
def make_graph(img, kern):
buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),
img.shape[0], *op.imshp_logical)
img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],
img)
img = GpuFromHost(context_name)(img)
return ret(img, kern)
return make_graph
return ret
def values_eq_approx(a, b):
"""
This fct is needed to don't have DebugMode raise useless
error due to ronding error.
This happen as We reduce on the two last dimensions, so this
can raise the absolute error if the number of element we
reduce on is significant.
"""
assert a.ndim == 4
atol = None
if a.shape[-1] * a.shape[-2] > 100:
# For float32 the default atol is 1e-5
atol = 3e-5
return GpuArrayType.values_eq_approx(a, b, atol=atol)
img, kern = node.inputs
gpu_conv = GpuConvOp_from_ConvOp(node.op)
if gpu_conv is None:
return
out = gpu_conv(GpuFromHost(context_name)(img),
GpuFromHost(context_name)(kern))
assert isinstance(out.type, GpuArrayType)
# Make sure to keep the broadcastable pattern of the original
# convolution even if we might gain or lose some due to different
# information at the node level.
out = tensor.patternbroadcast(out, node.outputs[0].broadcastable)
out.values_eq_approx = values_eq_approx
return [out]
# Register this here so that it goes after 'local_gpu_conv'
register_opt()(conv_groupopt)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
"""
Merge some GpuCAReduceCuda and GPUElemwise.
"""
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably result
# to slow down.
isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
op = node.op
inp = node.inputs[0].owner.inputs[0]
return [GpuCAReduceCuda(scalar_op=op.scalar_op,
reduce_mask=op.reduce_mask,
pre_scalar_op=scalar.basic.sqr)(inp)]
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
def local_scan_to_gpua(node, context_name):
info = copy.deepcopy(node.op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [node.inputs[0]]
e = (1 +
node.op.n_seqs +
node.op.n_mit_mot +
node.op.n_mit_sot +
node.op.n_sit_sot +
node.op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[1:e]]
b = e
e = e + node.op.n_nit_sot
nw_ins += node.inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in node.op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if node.op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs[:-1]]
scan_outs += [node.op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(node.op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'fast_run',
'inplace',
'scan')
|
py | 1a4f0b9a06494cd19d5a1bb41ad1c213c32b42c9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .test_fof_groups import *
|
py | 1a4f0bbce5fa3e0d1a8ca820cf13a531c3877567 | #! env/bin/python3.6
# -*- coding: utf8 -*-
"""Инициализация пакета API версии 0."""
|
py | 1a4f0c720375563f7b834611e03f6c3565112dda | import requests
from lxml import html
url = 'https://info.urfu.ru/ru/departures/kafedry/'
# Получение исходного кода страницы
response = requests.get(url)
# Преобразование тела документа в дерево элементов
parsed_body = html.fromstring(response.text)
# Получение всех элементов класса 'course-box'
course_boxes = parsed_body.find_class('course-box')
# Создание пустого списка для последующего добавления кафедр
departments = []
for box in course_boxes:
# Получение содержания элемента с тегом <a>
link = box.find('a')
# Получение содержания элемента с тегом <p>
text = link.find('p')
# Добавление названия кафедры в заготовленный список
departments.append(text.text_content())
|
py | 1a4f0cda88a52630967e7e9677976b618e4aa8e6 | # Test name = Settings
# Script dir = R:\Stingray\Tests\Settings\09-activation\09-activation.py
from time import sleep
from device import handler, updateTestResult
import RC
import UART
import DO
import GRAB
import MOD
import os
from DO import status
def runTest():
status("active")
TestName = "Settings"
ScriptName = "09-activation"
ScriptIndex = "9"
Grabber = DO.grab_define()
platform = DO.load_platform()
Modulation = "DVBS"
FEC = "3/4"
SR = "27500000"
Stream = "\\X_0000_00000_MUX_32000_EPG_Software_20130328a.ts"
Frequency = 1476
Modulator = "1"
COM = "COM7"
settings = [ScriptName, ScriptIndex, Grabber, Modulation, FEC, SR, Stream, Frequency, Modulator, COM]
DO.save_settings(settings)
GRAB.start_capture()
MOD.stop(Modulator)
# macros
searching_from_wizard_general_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_english_E501 = ["up 2 3400", "right 1 1000", "down 2 3400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_english_E501 = ["up 3 3400", "right 1 1000", "down 3 3400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_south_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_english_ALL = ["up 2 3400", "right 1 1000", "down 2 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "ok 1 5000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_english_ALL = ["up 3 3400", "right 1 1000", "down 3 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "down 1 1000", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_south_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
load_regions_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200"]
load_regions_english_E501 = ["up 2 2400", "right 1 1000", "down 2 2400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200"]
load_regions_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200"]
load_regions_english_ALL = ["up 2 2400", "right 1 1000", "down 2 2400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200"]
############################ TestCase 1 ##########################################
testcase = 1
status("active")
MOD.play_stream(Modulation, FEC, SR, Stream, Frequency, Modulator)
UART.default_settings()
if platform in ["E501", "E502", "A230"]:
RC.push(searching_from_wizard_general_E501)
else:
RC.push(searching_from_wizard_general_ALL)
UART.start_app("settings")
RC.push(["right 7 3000"])
GRAB.compare(testcase)
############################ TestCase 2 ##########################################
testcase = 2
status("active")
GRAB.compare(testcase)
############################ TestCase 3 ##########################################
testcase = 3
status("active")
GRAB.compare(testcase)
############################ TestCase 4 ##########################################
testcase = 4
status("active")
GRAB.compare(testcase)
############################ TestCase 5 ##########################################
testcase = 5
status("active")
GRAB.compare(testcase)
############################ TestCase 6 ##########################################
testcase = 6
status("active")
RC.push(["down 1 2000", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"])
GRAB.compare(testcase)
############################ TestCase 7 ##########################################
testcase = 7
status("active")
RC.push(["left"])
GRAB.compare(testcase)
############################ TestCase 8 ##########################################
testcase = 8
status("active")
RC.push(["left 10 3000"])
RC.push(["2 4 2000", "1 16 2000"])
GRAB.compare(testcase)
############################ TestCase 9 ##########################################
testcase = 9
status("active")
GRAB.compare(testcase)
############################ TestCase 10 ##########################################
testcase = 10
status("active")
GRAB.compare(testcase)
############################ TestCase 11 ##########################################
testcase = 11
status("active")
RC.push(["1 4 2000", "2 4 2000", "3 4 2000", "4 4 2000", "5 4 2000"])
GRAB.compare(testcase)
############################ TestCase 12 ##########################################
testcase = 12
status("active")
RC.push(["1", "5", "1 18 2000"])
GRAB.compare(testcase)
############################ TestCase 13 ##########################################
testcase = 13
status("active")
GRAB.compare(testcase)
############################ TestCase 14 ##########################################
testcase = 14
status("active")
GRAB.compare(testcase)
############################ TestCase 15 ##########################################
testcase = 15
status("active")
GRAB.compare(testcase)
############################ TestCase 16 ##########################################
testcase = 16
status("active")
RC.push(["4", "3", "2", "1", "1 16 2000"])
GRAB.compare(testcase)
############################ TestCase 17 ##########################################
testcase = 17
status("active")
GRAB.compare(testcase)
############################ TestCase 18 ##########################################
testcase = 18
status("active")
RC.push(["exit 4 2000", "menu 1 3000", "ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 19 ##########################################
testcase = 19
status("active")
RC.push(["right 1 2000", "down 1 2000", "ok 1 5000"])
GRAB.compare(testcase)
###################################################################################
status("active")
MOD.stop(Modulator)
GRAB.stop_capture()
|
py | 1a4f0d6933702a5c11ed61853a6c3ca2d23e48d3 | #!/usr/bin/env python
import os
import requests
import json
import datetime
import shutil
from bs4 import BeautifulSoup
here = os.path.dirname(os.path.abspath(__file__))
hospital_id = os.path.basename(here)
url ='https://www.huntsvillehospital.org/price-transparency'
today = datetime.datetime.today().strftime('%Y-%m-%d')
outdir = os.path.join(here, today)
if not os.path.exists(outdir):
os.mkdir(outdir)
prefix = "https://www.huntsvillehospital.org"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
# Each folder will have a list of records
records = []
for entry in soup.find_all('a', href=True):
download_url = prefix + entry['href']
if '.csv' in download_url:
filename = os.path.basename(download_url.split('?')[0])
output_file = os.path.join(outdir, filename)
os.system('wget -O "%s" "%s"' % (output_file, download_url))
record = { 'hospital_id': hospital_id,
'filename': filename,
'date': today,
'uri': filename,
'name': filename,
'url': download_url }
records.append(record)
# Keep json record of all files included
records_file = os.path.join(outdir, 'records.json')
with open(records_file, 'w') as filey:
filey.write(json.dumps(records, indent=4))
# This folder is also latest.
latest = os.path.join(here, 'latest')
if os.path.exists(latest):
shutil.rmtree(latest)
shutil.copytree(outdir, latest)
|
py | 1a4f0e290ceba2aa05eb2f1a4afe4e84409f2c42 | """Integration tests for :mod:`esmvalcore._recipe_checks`."""
from typing import Any, List
from unittest import mock
import pytest
import esmvalcore._recipe_checks as check
ERR_ALL = 'Looked for files matching%s'
ERR_D = ('Looked for files in %s, but did not find any file pattern to match '
'against')
ERR_F = ('Looked for files matching %s, but did not find any existing input '
'directory')
ERR_RANGE = 'No input data available for years {} in files {}'
VAR = {
'filename': 'a/c.nc',
'frequency': 'mon',
'short_name': 'tas',
'start_year': 2020,
'end_year': 2025,
'alias': 'alias',
}
FX_VAR = {
'filename': 'a/b.nc',
'frequency': 'fx',
'short_name': 'areacella',
}
FILES = [
'a/b/c_20200101-20201231',
'a/b/c_20210101-20211231',
'a/b/c_20220101-20221231',
'a/b/c_20230101-20231231',
'a/b/c_20240101-20241231',
'a/b/c_20250101-20251231',
]
DATA_AVAILABILITY_DATA = [
(FILES, dict(VAR), None),
(FILES, dict(FX_VAR), None),
(FILES[:-1], dict(VAR), ERR_RANGE.format('2025', FILES[:-1])),
(FILES[:-2], dict(VAR), ERR_RANGE.format('2024, 2025', FILES[:-2])),
([FILES[1]] + [FILES[3]], dict(VAR),
ERR_RANGE.format('2020, 2022, 2024, 2025', [FILES[1]] + [FILES[3]])),
]
@pytest.mark.parametrize('input_files,var,error', DATA_AVAILABILITY_DATA)
@mock.patch('esmvalcore._recipe_checks.logger', autospec=True)
def test_data_availability_data(mock_logger, input_files, var, error):
"""Test check for data when data is present."""
saved_var = dict(var)
if error is None:
check.data_availability(input_files, var, None, None)
mock_logger.error.assert_not_called()
else:
with pytest.raises(check.RecipeError) as rec_err:
check.data_availability(input_files, var, None, None)
assert str(rec_err.value) == error
assert var == saved_var
DATA_AVAILABILITY_NO_DATA: List[Any] = [
([], [], None),
([], None, None),
(None, [], None),
(None, None, None),
(['dir1'], [], (ERR_D, ['dir1'])),
(['dir1', 'dir2'], [], (ERR_D, ['dir1', 'dir2'])),
(['dir1'], None, (ERR_D, ['dir1'])),
(['dir1', 'dir2'], None, (ERR_D, ['dir1', 'dir2'])),
([], ['a*.nc'], (ERR_F, ['a*.nc'])),
([], ['a*.nc', 'b*.nc'], (ERR_F, ['a*.nc', 'b*.nc'])),
(None, ['a*.nc'], (ERR_F, ['a*.nc'])),
(None, ['a*.nc', 'b*.nc'], (ERR_F, ['a*.nc', 'b*.nc'])),
(['1'], ['a'], (ERR_ALL, ': 1/a')),
(['1'], ['a', 'b'], (ERR_ALL, '\n1/a\n1/b')),
(['1', '2'], ['a'], (ERR_ALL, '\n1/a\n2/a')),
(['1', '2'], ['a', 'b'], (ERR_ALL, '\n1/a\n1/b\n2/a\n2/b')),
]
@pytest.mark.parametrize('dirnames,filenames,error', DATA_AVAILABILITY_NO_DATA)
@mock.patch('esmvalcore._recipe_checks.logger', autospec=True)
def test_data_availability_no_data(mock_logger, dirnames, filenames, error):
"""Test check for data when no data is present."""
var = dict(VAR)
var_no_filename = {
'frequency': 'mon',
'short_name': 'tas',
'start_year': 2020,
'end_year': 2025,
'alias': 'alias',
}
error_first = ('No input files found for variable %s', var_no_filename)
error_last = ("Set 'log_level' to 'debug' to get more information", )
with pytest.raises(check.RecipeError) as rec_err:
check.data_availability([], var, dirnames, filenames)
assert str(rec_err.value) == 'Missing data for alias: tas'
if error is None:
assert mock_logger.error.call_count == 2
errors = [error_first, error_last]
else:
assert mock_logger.error.call_count == 3
errors = [error_first, error, error_last]
calls = [mock.call(*e) for e in errors]
assert mock_logger.error.call_args_list == calls
assert var == VAR
|
py | 1a4f0e36668d365caadd42ebe3fa4927b44ef61d | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow ops for directed graphs."""
import tensorflow as tf
from syntaxnet.util import check
def ArcPotentialsFromTokens(source_tokens, target_tokens, weights):
r"""Returns arc potentials computed from token activations and weights.
For each batch of source and target token activations, computes a scalar
potential for each arc as the 3-way product between the activation vectors of
the source and target of the arc and the |weights|. Specifically,
arc[b,s,t] =
\sum_{i,j} source_tokens[b,s,i] * weights[i,j] * target_tokens[b,t,j]
Note that the token activations can be extended with bias terms to implement a
"biaffine" model (Dozat and Manning, 2017).
Args:
source_tokens: [B,N,S] tensor of batched activations for the source token in
each arc.
target_tokens: [B,N,T] tensor of batched activations for the target token in
each arc.
weights: [S,T] matrix of weights.
B,N may be statically-unknown, but S,T must be statically-known. The dtype
of all arguments must be compatible.
Returns:
[B,N,N] tensor A of arc potentials where A_{b,s,t} is the potential of the
arc from s to t in batch element b. The dtype of A is the same as that of
the arguments. Note that the diagonal entries (i.e., where s==t) represent
self-loops and may not be meaningful.
"""
# All arguments must have statically-known rank.
check.Eq(source_tokens.get_shape().ndims, 3, 'source_tokens must be rank 3')
check.Eq(target_tokens.get_shape().ndims, 3, 'target_tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
num_target_activations = weights.get_shape().as_list()[1]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(source_tokens.get_shape().as_list()[2], num_source_activations,
'dimension mismatch between weights and source_tokens')
check.Eq(target_tokens.get_shape().as_list()[2], num_target_activations,
'dimension mismatch between weights and target_tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
source_tokens.dtype.base_dtype,
target_tokens.dtype.base_dtype],
'dtype mismatch')
source_tokens_shape = tf.shape(source_tokens)
target_tokens_shape = tf.shape(target_tokens)
batch_size = source_tokens_shape[0]
num_tokens = source_tokens_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, target_tokens_shape[0]),
tf.assert_equal(num_tokens, target_tokens_shape[1])]):
# Flatten out the batch dimension so we can use one big multiplication.
targets_bnxt = tf.reshape(target_tokens, [-1, num_target_activations])
# Matrices are row-major, so we arrange for the RHS argument of each matmul
# to have its transpose flag set. That way no copying is required to align
# the rows of the LHS with the columns of the RHS.
weights_targets_bnxs = tf.matmul(targets_bnxt, weights, transpose_b=True)
# The next computation is over pairs of tokens within each batch element, so
# restore the batch dimension.
weights_targets_bxnxs = tf.reshape(
weights_targets_bnxs, [batch_size, num_tokens, num_source_activations])
# Note that this multiplication is repeated across the batch dimension,
# instead of being one big multiplication as in the first matmul. There
# doesn't seem to be a way to arrange this as a single multiplication given
# the pairwise nature of this computation.
arcs_bxnxn = tf.matmul(source_tokens, weights_targets_bxnxs,
transpose_b=True)
return arcs_bxnxn
def ArcSourcePotentialsFromTokens(tokens, weights):
r"""Returns arc source potentials computed from tokens and weights.
For each batch of token activations, computes a scalar potential for each arc
as the product between the activations of the source token and the |weights|.
Specifically,
arc[b,s,:] = \sum_{i} weights[i] * tokens[b,s,i]
Args:
tokens: [B,N,S] tensor of batched activations for source tokens.
weights: [S] vector of weights.
B,N may be statically-unknown, but S must be statically-known. The dtype of
all arguments must be compatible.
Returns:
[B,N,N] tensor A of arc potentials as defined above. The dtype of A is the
same as that of the arguments. Note that the diagonal entries (i.e., where
s==t) represent self-loops and may not be meaningful.
"""
# All arguments must have statically-known rank.
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 1, 'weights must be a vector')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.Eq(tokens.get_shape().as_list()[2], num_source_activations,
'dimension mismatch between weights and tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
tokens.dtype.base_dtype],
'dtype mismatch')
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
# Flatten out the batch dimension so we can use a couple big matmuls.
tokens_bnxs = tf.reshape(tokens, [-1, num_source_activations])
weights_sx1 = tf.expand_dims(weights, 1)
sources_bnx1 = tf.matmul(tokens_bnxs, weights_sx1)
sources_bnxn = tf.tile(sources_bnx1, [1, num_tokens])
# Restore the batch dimension in the output.
sources_bxnxn = tf.reshape(sources_bnxn, [batch_size, num_tokens, num_tokens])
return sources_bxnxn
def RootPotentialsFromTokens(root, tokens, weights):
r"""Returns root selection potentials computed from tokens and weights.
For each batch of token activations, computes a scalar potential for each root
selection as the 3-way product between the activations of the artificial root
token, the token activations, and the |weights|. Specifically,
roots[b,r] = \sum_{i,j} root[i] * weights[i,j] * tokens[b,r,j]
Args:
root: [S] vector of activations for the artificial root token.
tokens: [B,N,T] tensor of batched activations for root tokens.
weights: [S,T] matrix of weights.
B,N may be statically-unknown, but S,T must be statically-known. The dtype
of all arguments must be compatible.
Returns:
[B,N] matrix R of root-selection potentials as defined above. The dtype of
R is the same as that of the arguments.
"""
# All arguments must have statically-known rank.
check.Eq(root.get_shape().ndims, 1, 'root must be a vector')
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
num_target_activations = weights.get_shape().as_list()[1]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(root.get_shape().as_list()[0], num_source_activations,
'dimension mismatch between weights and root')
check.Eq(tokens.get_shape().as_list()[2], num_target_activations,
'dimension mismatch between weights and tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
root.dtype.base_dtype,
tokens.dtype.base_dtype],
'dtype mismatch')
root_1xs = tf.expand_dims(root, 0)
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
# Flatten out the batch dimension so we can use a couple big matmuls.
tokens_bnxt = tf.reshape(tokens, [-1, num_target_activations])
weights_targets_bnxs = tf.matmul(tokens_bnxt, weights, transpose_b=True)
roots_1xbn = tf.matmul(root_1xs, weights_targets_bnxs, transpose_b=True)
# Restore the batch dimension in the output.
roots_bxn = tf.reshape(roots_1xbn, [batch_size, num_tokens])
return roots_bxn
def CombineArcAndRootPotentials(arcs, roots):
"""Combines arc and root potentials into a single set of potentials.
Args:
arcs: [B,N,N] tensor of batched arc potentials.
roots: [B,N] matrix of batched root potentials.
Returns:
[B,N,N] tensor P of combined potentials where
P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
"""
# All arguments must have statically-known rank.
check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')
# All arguments must share the same type.
dtype = arcs.dtype.base_dtype
check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')
roots_shape = tf.shape(roots)
arcs_shape = tf.shape(arcs)
batch_size = roots_shape[0]
num_tokens = roots_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, arcs_shape[0]),
tf.assert_equal(num_tokens, arcs_shape[1]),
tf.assert_equal(num_tokens, arcs_shape[2])]):
return tf.matrix_set_diag(arcs, roots)
def LabelPotentialsFromTokens(tokens, weights):
r"""Computes label potentials from tokens and weights.
For each batch of token activations, computes a scalar potential for each
label as the product between the activations of the source token and the
|weights|. Specifically,
labels[b,t,l] = \sum_{i} weights[l,i] * tokens[b,t,i]
Args:
tokens: [B,N,T] tensor of batched token activations.
weights: [L,T] matrix of weights.
B,N may be dynamic, but L,T must be static. The dtype of all arguments must
be compatible.
Returns:
[B,N,L] tensor of label potentials as defined above, with the same dtype as
the arguments.
"""
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
num_labels = weights.get_shape().as_list()[0]
num_activations = weights.get_shape().as_list()[1]
check.NotNone(num_labels, 'unknown number of labels')
check.NotNone(num_activations, 'unknown activation dimension')
check.Eq(tokens.get_shape().as_list()[2], num_activations,
'activation mismatch between weights and tokens')
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
check.Same([tokens.dtype.base_dtype,
weights.dtype.base_dtype],
'dtype mismatch')
# Flatten out the batch dimension so we can use one big matmul().
tokens_bnxt = tf.reshape(tokens, [-1, num_activations])
labels_bnxl = tf.matmul(tokens_bnxt, weights, transpose_b=True)
# Restore the batch dimension in the output.
labels_bxnxl = tf.reshape(labels_bnxl, [batch_size, num_tokens, num_labels])
return labels_bxnxl
def LabelPotentialsFromTokenPairs(sources, targets, weights):
r"""Computes label potentials from source and target tokens and weights.
For each aligned pair of source and target token activations, computes a
scalar potential for each label on the arc from the source to the target.
Specifically,
labels[b,t,l] = \sum_{i,j} sources[b,t,i] * weights[l,i,j] * targets[b,t,j]
Args:
sources: [B,N,S] tensor of batched source token activations.
targets: [B,N,T] tensor of batched target token activations.
weights: [L,S,T] tensor of weights.
B,N may be dynamic, but L,S,T must be static. The dtype of all arguments
must be compatible.
Returns:
[B,N,L] tensor of label potentials as defined above, with the same dtype as
the arguments.
"""
check.Eq(sources.get_shape().ndims, 3, 'sources must be rank 3')
check.Eq(targets.get_shape().ndims, 3, 'targets must be rank 3')
check.Eq(weights.get_shape().ndims, 3, 'weights must be rank 3')
num_labels = weights.get_shape().as_list()[0]
num_source_activations = weights.get_shape().as_list()[1]
num_target_activations = weights.get_shape().as_list()[2]
check.NotNone(num_labels, 'unknown number of labels')
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(sources.get_shape().as_list()[2], num_source_activations,
'activation mismatch between weights and source tokens')
check.Eq(targets.get_shape().as_list()[2], num_target_activations,
'activation mismatch between weights and target tokens')
check.Same([sources.dtype.base_dtype,
targets.dtype.base_dtype,
weights.dtype.base_dtype],
'dtype mismatch')
sources_shape = tf.shape(sources)
targets_shape = tf.shape(targets)
batch_size = sources_shape[0]
num_tokens = sources_shape[1]
with tf.control_dependencies([tf.assert_equal(batch_size, targets_shape[0]),
tf.assert_equal(num_tokens, targets_shape[1])]):
# For each token, we must compute a vector-3tensor-vector product. There is
# no op for this, but we can use reshape() and matmul() to compute it.
# Reshape |weights| and |targets| so we can use a single matmul().
weights_lsxt = tf.reshape(weights, [num_labels * num_source_activations,
num_target_activations])
targets_bnxt = tf.reshape(targets, [-1, num_target_activations])
weights_targets_bnxls = tf.matmul(targets_bnxt, weights_lsxt,
transpose_b=True)
# Restore all dimensions.
weights_targets_bxnxlxs = tf.reshape(
weights_targets_bnxls,
[batch_size, num_tokens, num_labels, num_source_activations])
# Incorporate the source activations. In this case, we perform a batched
# matmul() between the trailing [L,S] matrices of the current result and the
# trailing [S] vectors of the tokens.
sources_bxnx1xs = tf.expand_dims(sources, 2)
labels_bxnxlx1 = tf.matmul(weights_targets_bxnxlxs, sources_bxnx1xs,
transpose_b=True)
labels_bxnxl = tf.squeeze(labels_bxnxlx1, [3])
return labels_bxnxl
|
py | 1a4f0e8dbaddf18d2f8c8d213a6adccb2ba54480 | """main module
"""
import argparse
import importlib.util
import os
import shutil
import tempfile
import threading
import uuid
import docker
import yaml
from . import preprocess
def __import_configurator(path):
conf_path = os.path.join(path, "configurator.py")
spec = importlib.util.spec_from_file_location("configurator", conf_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def __start_container(client, base_path, properties, build, nocache, network,
alias=None, **container_opts):
image_path = os.path.join(base_path, properties.pop("image"))
cache = client.images.list(filters={'label':f"seed={image_path}"})
if (not build) and cache:
print(" Using Cached Image")
image = cache[0]
else:
print(" Building Image... ", end='', flush=True)
image = client.images.build(
path=image_path,
nocache=nocache,
rm=True,
pull=True,
labels={'seed':image_path}
)
print("Done")
configurator = __import_configurator(image_path)
ret = configurator.configure(properties)
if isinstance(ret, tuple):
configurator_opts, teardown = ret
else:
configurator_opts, teardown = (ret, lambda: None)
del configurator
print(" Starting Container... ", end='', flush=True)
container = client.containers.create(
image=image.id,
detach=True,
init=True,
**container_opts,
**configurator_opts
)
print("Done")
network.connect(
container,
aliases=[alias] if alias is not None else None
)
container.start()
return (container, teardown)
def __log_container(name, container):
logs = container.logs(
stdout=True,
stderr=True,
stream=True,
follow=True
)
for log in logs:
print(f"{name}:", log.decode(), end='', flush=True)
def __parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--wdir', default="",
help="path to working directory")
parser.add_argument('--config', default='config.yml',
help="path to config file relative to working directory")
parser.add_argument('--adapterdir', default='adapters',
help="adapter search path relative to working directory")
parser.add_argument('--applicationdir', default='applications',
help="application search path relative to working directory")
parser.add_argument('--bundledir', default='bundles',
help="bundle search path relative to working directory")
parser.add_argument('--controllerdir', default='controllers',
help="controller search path relative to working directory")
parser.add_argument('--build', action='store_true',
help="rebuild adapter or controller images")
parser.add_argument('--nocache', action='store_true',
help="don't use build cache on adapter or controller rebuild")
parser.add_argument('--verbose', '-v', action='count', default=0,
help="print adapter (1st level) and controller (2nd level) log")
args = parser.parse_args()
args.wdir = os.path.join(os.getcwd(), args.wdir)
return args
def main():
"""main function
"""
args = __parse_args()
adapter, controllers, network, bundledir = None, None, None, None
try:
with open(os.path.join(args.wdir, args.config)) as config_f:
config = yaml.load(config_f)["config"]
run_id = str(uuid.uuid4())
print(f"Starting With Run Id \"{run_id}\"")
#Set up directories
bundledir = tempfile.TemporaryDirectory()
resultdir = os.path.join("results", run_id)
os.makedirs(resultdir)
bundlepath = os.path.join(
args.wdir,
args.bundledir,
config["bundle"]["name"],
"bundle"
)
preprocess.preprocess_bundle(
bundlepath + ".xml",
os.path.join(bundledir.name, "bundle.xml"),
config["bundle"]['parameters']
)
shutil.copy(
bundlepath + ".controller-bindings.yml",
os.path.join(bundledir.name, "bundle.controller-bindings.yml")
)
#Set up network and containers
client = docker.from_env()
network = client.networks.create(run_id)
controllers = {}
for name, properties in config["controllers"].items():
print(f"Starting Controller \"{name}\"")
applications = {
application: os.path.join(
args.wdir,
args.applicationdir,
application)
for application in properties['applications']}
controllers[name] = __start_container(
client,
os.path.join(args.wdir, args.controllerdir),
dict(properties, **{
"applications": applications,
"bundledir": bundledir.name,
"resultdir": os.path.join(os.getcwd(), resultdir)
}),
args.build,
args.nocache,
network,
alias=name
)
print("Done")
print(f"Starting Adapter \"{config['adapter']['image']}\"")
adapter = __start_container(
client,
os.path.join(args.wdir, args.adapterdir),
dict(config["adapter"], **{
"bundledir": bundledir.name,
"resultdir": os.path.join(os.getcwd(), resultdir),
"controllers": config["controllers"]
}),
args.build,
args.nocache,
network,
privileged=True
)
print("Done")
try:
if args.verbose >= 2:
for name, (container, _) in controllers.items():
arg = (name, container)
threading.Thread(target=__log_container, args=arg).start()
if args.verbose >= 1:
__log_container("adapter", adapter[0])
import time
time.sleep(10)
adapter[0].wait()
except KeyboardInterrupt:
pass
finally:
print("Tearing Down")
if adapter is not None:
adapter[0].stop()
adapter[0].remove()
adapter[1]()
if controllers is not None:
for controller, teardown in controllers.values():
controller.stop()
controller.remove()
teardown()
if network is not None:
network.remove()
if bundledir is not None:
bundledir.cleanup()
if __name__ == "__main__":
main()
|
py | 1a4f0eb6a7b1a9c8e3f786627373a4fcd0ecf438 | from flask import render_template, redirect, url_for, flash, request
from werkzeug.urls import url_parse
from flask_login import login_user, logout_user, current_user
from flask_babel import _
from app import db
from app.auth import bp
from app.auth.forms import LoginForm, RegistrationForm, ResetPasswordRequestForm, ResetPasswordForm
from app.models import User
from app.auth.email import send_password_reset_email
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash(_('Invalid username or password'))
return redirect(url_for('auth.login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('main.index')
return redirect(next_page)
return render_template('auth/login.html', title=_('Sign In'), form=form)
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash(_('Congratulations, you are now a registered user!'))
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title=_('Register'),
form=form)
@bp.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash(
_('Check your email for the instructions to reset your password'))
return redirect(url_for('auth.login'))
return render_template('auth/reset_password_request.html',
title=_('Reset Password'), form=form)
@bp.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('main.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash(_('Your password has been reset.'))
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
|
py | 1a4f0f80df2ccb25525473837c84ecd6c90841f7 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Denis Engemann <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
from itertools import count
from math import sqrt
import numpy as np
from scipy import linalg
from .tree import dir_tree_find
from .tag import find_tag
from .constants import FIFF
from .pick import pick_types
from .write import (write_int, write_float, write_string, write_name_list,
write_float_matrix, end_block, start_block)
from ..utils import logger, verbose, warn
from ..externals.six import string_types
class Projection(dict):
"""Projection vector.
A basic class to proj a meaningful print for projection vectors.
"""
def __repr__(self): # noqa: D105
s = "%s" % self['desc']
s += ", active : %s" % self['active']
s += ", n_channels : %s" % self['data']['ncol']
return "<Projection | %s>" % s
class ProjMixin(object):
"""Mixin class for Raw, Evoked, Epochs.
Notes
-----
This mixin adds a proj attribute as a property to data containers.
It is True if at least one proj is present and all of them are active.
The projs might not be applied yet if data are not preloaded. In
this case it's the _projector attribute that does the job.
If a private _data attribute is present then the projs applied
to it are the ones marked as active.
A proj parameter passed in constructor of raw or epochs calls
apply_proj and hence after the .proj attribute is True.
As soon as you've applied the projs it will stay active in the
remaining pipeline.
The suggested pipeline is proj=True in epochs (it's cheaper than for raw).
When you use delayed SSP in Epochs, projs are applied when you call
get_data() method. They are not applied to the evoked._data unless you call
apply_proj(). The reason is that you want to reject with projs although
it's not stored in proj mode.
"""
@property
def proj(self):
"""Whether or not projections are active."""
return (len(self.info['projs']) > 0 and
all(p['active'] for p in self.info['projs']))
@verbose
def add_proj(self, projs, remove_existing=False, verbose=None):
"""Add SSP projection vectors.
Parameters
----------
projs : list
List with projection vectors.
remove_existing : bool
Remove the projection vectors currently in the file.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
self : instance of Raw | Epochs | Evoked
The data container.
"""
if isinstance(projs, Projection):
projs = [projs]
if (not isinstance(projs, list) and
not all(isinstance(p, Projection) for p in projs)):
raise ValueError('Only projs can be added. You supplied '
'something else.')
# mark proj as inactive, as they have not been applied
projs = deactivate_proj(projs, copy=True, verbose=self.verbose)
if remove_existing:
# we cannot remove the proj if they are active
if any(p['active'] for p in self.info['projs']):
raise ValueError('Cannot remove projectors that have '
'already been applied')
self.info['projs'] = projs
else:
self.info['projs'].extend(projs)
# We don't want to add projectors that are activated again.
self.info['projs'] = _uniquify_projs(self.info['projs'],
check_active=False, sort=False)
return self
def apply_proj(self):
"""Apply the signal space projection (SSP) operators to the data.
Notes
-----
Once the projectors have been applied, they can no longer be
removed. It is usually not recommended to apply the projectors at
too early stages, as they are applied automatically later on
(e.g. when computing inverse solutions).
Hint: using the copy method individual projection vectors
can be tested without affecting the original data.
With evoked data, consider the following example::
projs_a = mne.read_proj('proj_a.fif')
projs_b = mne.read_proj('proj_b.fif')
# add the first, copy, apply and see ...
evoked.add_proj(a).copy().apply_proj().plot()
# add the second, copy, apply and see ...
evoked.add_proj(b).copy().apply_proj().plot()
# drop the first and see again
evoked.copy().del_proj(0).apply_proj().plot()
evoked.apply_proj() # finally keep both
Returns
-------
self : instance of Raw | Epochs | Evoked
The instance.
"""
from ..epochs import BaseEpochs
from ..evoked import Evoked
from .base import BaseRaw
if self.info['projs'] is None or len(self.info['projs']) == 0:
logger.info('No projector specified for this dataset. '
'Please consider the method self.add_proj.')
return self
# Exit delayed mode if you apply proj
if isinstance(self, BaseEpochs) and self._do_delayed_proj:
logger.info('Leaving delayed SSP mode.')
self._do_delayed_proj = False
if all(p['active'] for p in self.info['projs']):
logger.info('Projections have already been applied. '
'Setting proj attribute to True.')
return self
_projector, info = setup_proj(deepcopy(self.info), activate=True,
verbose=self.verbose)
# let's not raise a RuntimeError here, otherwise interactive plotting
if _projector is None: # won't be fun.
logger.info('The projections don\'t apply to these data.'
' Doing nothing.')
return self
self._projector, self.info = _projector, info
if isinstance(self, (BaseRaw, Evoked)):
if self.preload:
self._data = np.dot(self._projector, self._data)
else: # BaseEpochs
if self.preload:
for ii, e in enumerate(self._data):
self._data[ii] = self._project_epoch(e)
else:
self.load_data() # will automatically apply
logger.info('SSP projectors applied...')
return self
def del_proj(self, idx='all'):
"""Remove SSP projection vector.
Note: The projection vector can only be removed if it is inactive
(has not been applied to the data).
Parameters
----------
idx : int | list of int | str
Index of the projector to remove. Can also be "all" (default)
to remove all projectors.
Returns
-------
self : instance of Raw | Epochs | Evoked
"""
if isinstance(idx, string_types) and idx == 'all':
idx = list(range(len(self.info['projs'])))
idx = np.atleast_1d(np.array(idx, int)).ravel()
if any(self.info['projs'][ii]['active'] for ii in idx):
raise ValueError('Cannot remove projectors that have already '
'been applied')
self.info['projs'] = [p for pi, p in enumerate(self.info['projs'])
if pi not in idx]
return self
def plot_projs_topomap(self, ch_type=None, layout=None, axes=None):
"""Plot SSP vector.
Parameters
----------
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None | List
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted. If None
(default), it will return all channel types present. If a list of
ch_types is provided, it will return multiple figures.
layout : None | Layout | List of Layouts
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations. Or a list of Layout if projections
are from different sensor types.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
if self.info['projs'] is not None or len(self.info['projs']) != 0:
from ..viz.topomap import plot_projs_topomap
from ..channels.layout import find_layout
if layout is None:
layout = []
if ch_type is None:
ch_type = [ch for ch in ['meg', 'eeg'] if ch in self]
elif isinstance(ch_type, string_types):
ch_type = [ch_type]
for ch in ch_type:
if ch in self:
layout.append(find_layout(self.info, ch, exclude=[]))
else:
warn('Channel type %s is not found in info.' % ch)
fig = plot_projs_topomap(self.info['projs'], layout, axes=axes)
else:
raise ValueError("Info is missing projs. Nothing to plot.")
return fig
def _proj_equal(a, b, check_active=True):
"""Test if two projectors are equal."""
equal = ((a['active'] == b['active'] or not check_active) and
a['kind'] == b['kind'] and
a['desc'] == b['desc'] and
a['data']['col_names'] == b['data']['col_names'] and
a['data']['row_names'] == b['data']['row_names'] and
a['data']['ncol'] == b['data']['ncol'] and
a['data']['nrow'] == b['data']['nrow'] and
np.all(a['data']['data'] == b['data']['data']))
return equal
@verbose
def _read_proj(fid, node, verbose=None):
"""Read spatial projections from a FIF file.
Parameters
----------
fid : file
The file descriptor of the open file.
node : tree node
The node of the tree where to look.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs: dict
The list of projections.
"""
projs = list()
# Locate the projection data
nodes = dir_tree_find(node, FIFF.FIFFB_PROJ)
if len(nodes) == 0:
return projs
tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN)
if tag is not None:
global_nchan = int(tag.data)
items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM)
for item in items:
# Find all desired tags in one item
tag = find_tag(fid, item, FIFF.FIFF_NCHAN)
if tag is not None:
nchan = int(tag.data)
else:
nchan = global_nchan
tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION)
if tag is not None:
desc = tag.data
else:
tag = find_tag(fid, item, FIFF.FIFF_NAME)
if tag is not None:
desc = tag.data
else:
raise ValueError('Projection item description missing')
# XXX : is this useful ?
# tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
# if tag is not None:
# namelist = tag.data
# else:
# raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND)
if tag is not None:
kind = int(tag.data)
else:
raise ValueError('Projection item kind missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC)
if tag is not None:
nvec = int(tag.data)
else:
raise ValueError('Number of projection vectors not specified')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
if tag is not None:
names = tag.data.split(':')
else:
raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS)
if tag is not None:
data = tag.data
else:
raise ValueError('Projection item data missing')
tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE)
if tag is not None:
active = bool(tag.data)
else:
active = False
tag = find_tag(fid, item, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR)
if tag is not None:
explained_var = tag.data
else:
explained_var = None
# handle the case when data is transposed for some reason
if data.shape[0] == len(names) and data.shape[1] == nvec:
data = data.T
if data.shape[1] != len(names):
raise ValueError('Number of channel names does not match the '
'size of data matrix')
# Use exactly the same fields in data as in a named matrix
one = Projection(kind=kind, active=active, desc=desc,
data=dict(nrow=nvec, ncol=nchan, row_names=None,
col_names=names, data=data),
explained_var=explained_var)
projs.append(one)
if len(projs) > 0:
logger.info(' Read a total of %d projection items:' % len(projs))
for k in range(len(projs)):
if projs[k]['active']:
misc = 'active'
else:
misc = ' idle'
logger.info(' %s (%d x %d) %s'
% (projs[k]['desc'], projs[k]['data']['nrow'],
projs[k]['data']['ncol'], misc))
return projs
###############################################################################
# Write
def _write_proj(fid, projs):
"""Write a projection operator to a file.
Parameters
----------
fid : file
The file descriptor of the open file.
projs : dict
The projection operator.
"""
if len(projs) == 0:
return
start_block(fid, FIFF.FIFFB_PROJ)
for proj in projs:
start_block(fid, FIFF.FIFFB_PROJ_ITEM)
write_int(fid, FIFF.FIFF_NCHAN, proj['data']['ncol'])
write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
proj['data']['col_names'])
write_string(fid, FIFF.FIFF_NAME, proj['desc'])
write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind'])
if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD:
write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0)
write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow'])
write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active'])
write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS,
proj['data']['data'])
if proj['explained_var'] is not None:
write_float(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
proj['explained_var'])
end_block(fid, FIFF.FIFFB_PROJ_ITEM)
end_block(fid, FIFF.FIFFB_PROJ)
###############################################################################
# Utils
def _check_projs(projs, copy=True):
"""Check that projs is a list of Projection."""
if not isinstance(projs, (list, tuple)):
raise TypeError('projs must be a list or tuple, got %s'
% (type(projs),))
for pi, p in enumerate(projs):
if not isinstance(p, Projection):
raise TypeError('All entries in projs list must be Projection '
'instances, but projs[%d] is type %s'
% (pi, type(p)))
return deepcopy(projs) if copy else projs
def make_projector(projs, ch_names, bads=(), include_active=True):
"""Create an SSP operator from SSP projection vectors.
Parameters
----------
projs : list
List of projection vectors.
ch_names : list of str
List of channels to include in the projection matrix.
bads : list of str
Some bad channels to exclude. If bad channels were marked
in the raw file when projs were calculated using mne-python,
they should not need to be included here as they will
have been automatically omitted from the projectors.
include_active : bool
Also include projectors that are already active.
Returns
-------
proj : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
nproj : int
How many items in the projector.
U : array
The orthogonal basis of the projection vectors (optional).
"""
return _make_projector(projs, ch_names, bads, include_active)
def _make_projector(projs, ch_names, bads=(), include_active=True,
inplace=False):
"""Subselect projs based on ch_names and bads.
Use inplace=True mode to modify ``projs`` inplace so that no
warning will be raised next time projectors are constructed with
the given inputs. If inplace=True, no meaningful data are returned.
"""
nchan = len(ch_names)
if nchan == 0:
raise ValueError('No channel names specified')
default_return = (np.eye(nchan, nchan), 0, [])
# Check trivial cases first
if projs is None:
return default_return
nvec = 0
nproj = 0
for p in projs:
if not p['active'] or include_active:
nproj += 1
nvec += p['data']['nrow']
if nproj == 0:
return default_return
# Pick the appropriate entries
vecs = np.zeros((nchan, nvec))
nvec = 0
nonzero = 0
for k, p in enumerate(projs):
if not p['active'] or include_active:
if (len(p['data']['col_names']) !=
len(np.unique(p['data']['col_names']))):
raise ValueError('Channel name list in projection item %d'
' contains duplicate items' % k)
# Get the two selection vectors to pick correct elements from
# the projection vectors omitting bad channels
sel = []
vecsel = []
for c, name in enumerate(ch_names):
if name in p['data']['col_names'] and name not in bads:
sel.append(c)
vecsel.append(p['data']['col_names'].index(name))
# If there is something to pick, pickit
nrow = p['data']['nrow']
this_vecs = vecs[:, nvec:nvec + nrow]
if len(sel) > 0:
this_vecs[sel] = p['data']['data'][:, vecsel].T
# Rescale for better detection of small singular values
for v in range(p['data']['nrow']):
psize = sqrt(np.sum(this_vecs[:, v] * this_vecs[:, v]))
if psize > 0:
orig_n = p['data']['data'].any(axis=0).sum()
# Average ref still works if channels are removed
if len(vecsel) < 0.9 * orig_n and not inplace and \
(p['kind'] != FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF or
len(vecsel) == 1):
warn('Projection vector "%s" has magnitude %0.2f '
'(should be unity), applying projector with '
'%s/%s of the original channels available may '
'be dangerous, consider recomputing and adding '
'projection vectors for channels that are '
'eventually used. If this is intentional, '
'consider using info.normalize_proj()'
% (p['desc'], psize, len(vecsel), orig_n))
this_vecs[:, v] /= psize
nonzero += 1
# If doing "inplace" mode, "fix" the projectors to only operate
# on this subset of channels.
if inplace:
p['data']['data'] = this_vecs[sel].T
p['data']['col_names'] = [p['data']['col_names'][ii]
for ii in vecsel]
nvec += p['data']['nrow']
# Check whether all of the vectors are exactly zero
if nonzero == 0 or inplace:
return default_return
# Reorthogonalize the vectors
U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False)
# Throw away the linearly dependent guys
nproj = np.sum((S / S[0]) > 1e-2)
U = U[:, :nproj]
# Here is the celebrated result
proj = np.eye(nchan, nchan) - np.dot(U, U.T)
return proj, nproj, U
def _normalize_proj(info):
"""Normalize proj after subselection to avoid warnings.
This is really only useful for tests, and might not be needed
eventually if we change or improve our handling of projectors
with picks.
"""
# Here we do info.get b/c info can actually be a noise cov
_make_projector(info['projs'], info.get('ch_names', info.get('names')),
info['bads'], include_active=True, inplace=True)
def make_projector_info(info, include_active=True):
"""Make an SSP operator using the measurement info.
Calls make_projector on good channels.
Parameters
----------
info : dict
Measurement info.
include_active : bool
Also include projectors that are already active.
Returns
-------
proj : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
nproj : int
How many items in the projector.
"""
proj, nproj, _ = make_projector(info['projs'], info['ch_names'],
info['bads'], include_active)
return proj, nproj
@verbose
def activate_proj(projs, copy=True, verbose=None):
"""Set all projections to active.
Useful before passing them to make_projector.
Parameters
----------
projs : list
The projectors.
copy : bool
Modify projs in place or operate on a copy.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs : list
The projectors.
"""
if copy:
projs = deepcopy(projs)
# Activate the projection items
for proj in projs:
proj['active'] = True
logger.info('%d projection items activated' % len(projs))
return projs
@verbose
def deactivate_proj(projs, copy=True, verbose=None):
"""Set all projections to inactive.
Useful before saving raw data without projectors applied.
Parameters
----------
projs : list
The projectors.
copy : bool
Modify projs in place or operate on a copy.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs : list
The projectors.
"""
if copy:
projs = deepcopy(projs)
# Deactivate the projection items
for proj in projs:
proj['active'] = False
logger.info('%d projection items deactivated' % len(projs))
return projs
@verbose
def make_eeg_average_ref_proj(info, activate=True, verbose=None):
"""Create an EEG average reference SSP projection vector.
Parameters
----------
info : dict
Measurement info.
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
eeg_proj: instance of Projection
The SSP/PCA projector.
"""
if info.get('custom_ref_applied', False):
raise RuntimeError('A custom reference has been applied to the '
'data earlier. Please use the '
'mne.io.set_eeg_reference function to move from '
'one EEG reference to another.')
logger.info("Adding average EEG reference projection.")
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
ch_names = info['ch_names']
eeg_names = [ch_names[k] for k in eeg_sel]
n_eeg = len(eeg_sel)
if n_eeg == 0:
raise ValueError('Cannot create EEG average reference projector '
'(no EEG data found)')
vec = np.ones((1, n_eeg))
vec /= n_eeg
explained_var = None
eeg_proj_data = dict(col_names=eeg_names, row_names=None,
data=vec, nrow=1, ncol=n_eeg)
eeg_proj = Projection(active=activate, data=eeg_proj_data,
desc='Average EEG reference',
kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF,
explained_var=explained_var)
return eeg_proj
def _has_eeg_average_ref_proj(projs, check_active=False):
"""Determine if a list of projectors has an average EEG ref.
Optionally, set check_active=True to additionally check if the CAR
has already been applied.
"""
for proj in projs:
if (proj['desc'] == 'Average EEG reference' or
proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF):
if not check_active or proj['active']:
return True
return False
def _needs_eeg_average_ref_proj(info):
"""Determine if the EEG needs an averge EEG reference.
This returns True if no custom reference has been applied and no average
reference projection is present in the list of projections.
"""
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
return (len(eeg_sel) > 0 and
not info['custom_ref_applied'] and
not _has_eeg_average_ref_proj(info['projs']))
@verbose
def setup_proj(info, add_eeg_ref=True, activate=True, verbose=None):
"""Set up projection for Raw and Epochs.
Parameters
----------
info : dict
The measurement info.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projector : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
info : dict
The modified measurement info (Warning: info is modified inplace).
"""
# Add EEG ref reference proj if necessary
if add_eeg_ref and _needs_eeg_average_ref_proj(info):
eeg_proj = make_eeg_average_ref_proj(info, activate=activate)
info['projs'].append(eeg_proj)
# Create the projector
projector, nproj = make_projector_info(info)
if nproj == 0:
if verbose:
logger.info('The projection vectors do not apply to these '
'channels')
projector = None
else:
logger.info('Created an SSP operator (subspace dimension = %d)'
% nproj)
# The projection items have been activated
if activate:
info['projs'] = activate_proj(info['projs'], copy=False)
return projector, info
def _uniquify_projs(projs, check_active=True, sort=True):
"""Make unique projs."""
final_projs = []
for proj in projs: # flatten
if not any(_proj_equal(p, proj, check_active) for p in final_projs):
final_projs.append(proj)
my_count = count(len(final_projs))
def sorter(x):
"""Sort in a nice way."""
digits = [s for s in x['desc'] if s.isdigit()]
if digits:
sort_idx = int(digits[-1])
else:
sort_idx = next(my_count)
return (sort_idx, x['desc'])
return sorted(final_projs, key=sorter) if sort else final_projs
|
py | 1a4f10237bde905b19abeb24233f314178eb40ad | from django.shortcuts import render, get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from .models import Report, UploadedFile, Folder
from .forms import ReportForm, FolderForm
from django.template import RequestContext
from web.models import UserGroup
from django.contrib.auth.models import User
from Crypto import Random
from datetime import datetime
# Create your views here.
random_generator = Random.new().read
def index(request):
return render(request, 'createReport.html')
def thanks(request):
return render(request, 'form.html')
def folders(request):
reports = Report.objects.all()
folders = Folder.objects.all()
return render(request, 'reports/folders.html', {'folders': folders})
def viewReportsInFolders(request):
folders = Folder.objects.all()
reports = Report.objects.all()
return render(request, 'reports/savedReports.html',
{'folders': folders, 'reports': reports})
def create_folder(request):
reports = Report.objects.all()
username_id = request.user
if request.method == 'POST':
form = FolderForm(request.POST, request.FILES)
selected = request.POST.getlist('selected_report[]')
if form.is_valid():
folder_object = Folder.objects.create(
name=form.cleaned_data['title'], owner=username_id
)
for report_selected in selected:
re = Report.objects.get(title=report_selected)
folder_object.members.add(re)
return HttpResponse("Folder has been updated")
else:
form = FolderForm()
variables = RequestContext(request, {
'form': form, 'reports': reports
})
return render_to_response(
'reports/folderz.html',
variables,
)
def edit_folder(request, id=None):
try:
folder=Folder.objects.get(id=id)
form_class=FolderForm(user=request.user, instance=folder)
if request.method == 'POST':
form = FolderForm(request.POST, request.FILES, instance=folder)
selected = request.POST.getlist('selected_report[]')
if form.is_valid():
folder_object = Folder.objects.create(
name=form.cleaned_data['title'], owner=username_id
)
for report_selected in selected:
re = Report.objects.get(title=report_selected)
folder_object.members.add(re)
return render(request, '/reports/doneEditingFolder.html', {'form_class': form_class})
except:
return HttpResponse("You can't update this folder")
def edit_with_delete(request, id=None):
try:
folder = Folder.objects.get(id=id)
print(folder.owner)
if folder.owner != request.user:
text = "You do not have permission to change this folder"
return HttpResponse(text)
else:
Folder.objects.filter(id=id).delete()
print("deleted")
return render(request, 'reports/redirect_to_change.html')
except:
text = "You are unable to change this folder"
return HttpResponse(text)
def folder(request):
folder_name = request.POST.get('selected')
print(folder_name)
reports = Report.objects.all()
print(reports)
return render(request, 'reports/folder.html', {'folder_name': folder_name,
'reports': reports})
@login_required
def delete_folder(request, id=None):
try:
folder = Folder.objects.get(id=id)
if folder.owner != request.user:
text = "You do not have permission to delete this folder"
return HttpResponse(text)
else:
Folder.objects.filter(id=id).delete()
return render(request, 'reports/deleteFolder.html')
except:
text = "You are unable to delete this folder"
return HttpResponse(text)
@login_required
def add_report(request):
form_class = ReportForm(user=request.user)
# if this is a POST request process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request
form = ReportForm(request.POST, request.FILES, user=request.user)
# check whether it's valid:
if form.is_valid():
report = form.save(commit=False)
report.owner = User.objects.get(username=request.user.username)
if form.cleaned_data['Share with:'] != 'all':
report.group = UserGroup.objects.get(
name=form.cleaned_data['Share with:'])
files = request.FILES.getlist('file_field')
report.save()
for f in files:
file = UploadedFile(report=report, owner=request.user)
file.file_obj = f
file.save()
# redirect to a new URL:
return render(request, 'reports/createReport.html', {'form': form_class})
else:
text = form.errors
return HttpResponse(text)
return render(request, 'reports/createReport.html', {'form': form_class})
@login_required
def edit_report(request, id=None):
try:
if id:
report = Report.objects.get(pk=id)
if report.owner != request.user:
text = "You do not have permission to edit this report"
return HttpResponse(text)
else:
report = Report()
form_class = ReportForm(user=request.user, instance=report)
if request.method == 'POST':
form = ReportForm(request.POST, request.FILES, instance=report, user=request.user)
if form.is_valid():
report = form.save(commit=False)
report.owner = User.objects.get(username=request.user.username)
if form.cleaned_data['Share with:'] != 'all':
report.group = UserGroup.objects.get(
name=form.cleaned_data['Share with:'])
files = request.FILES.getlist('file_field')
report.save()
for curr in report.file_set.all():
curr.delete()
for f in files:
file = UploadedFile(report=report, owner=request.user)
file.file_obj = f
file.save()
return render(request, 'reports/doneEditing.html', {'form': form_class})
else:
text = form.errors
return HttpResponse(text)
except:
text = "You are not able to edit this report"
return HttpResponse(text)
return render(request, 'reports/editReport.html', {'form': form_class, 'id': id})
@login_required
def see_reports(request):
initial_search = {}
reports_list = Report.objects.all().filter(group=None)
for group in UserGroup.objects.filter(members=request.user):
reports_list = reports_list | group.report_set.all()
# Filter based by min date
if request.GET.get('sincesearch', False):
date_in = request.GET['sincesearch']
initial_search['since'] = date_in
date_since = datetime(
*[int(v) for v in date_in.replace('T', '-').replace(':',
'-').split('-')])
reports_list = reports_list.filter(timestamp__gte=date_since)
# Filter based by max date
if request.GET.get('beforesearch', False):
date_in = request.GET['beforesearch']
initial_search['before'] = date_in
date_since = datetime(
*[int(v) for v in date_in.replace('T', '-').replace(':',
'-').split('-')])
reports_list = reports_list.filter(timestamp__lte=date_since)
# Filter based on creator
if request.GET.get('ownersearch', False):
owner = request.GET['ownersearch']
initial_search['owner'] = owner
reports_list = reports_list.filter(owner__username__icontains=owner)
# Filter based on title
if request.GET.get('titlesearch', False):
title = request.GET['titlesearch']
initial_search['title'] = title
reports_list = reports_list.filter(title__icontains=title)
# Filter based on descriptions
if request.GET.get('descsearch', False):
desc = request.GET['descsearch']
initial_search['desc'] = desc
short_search = reports_list.filter(short_desc__icontains=desc)
long_search = reports_list.filter(long_desc__icontains=desc)
reports_list = short_search | long_search
for report in reports_list:
report.files = report.file_set.all()
for file in report.files:
file.file_obj.name = file.file_obj.name.split('/')[-1]
return render(request, 'reports/see_reports.html', {'reports_list':
reports_list,
'search_values':
initial_search})
def add_reports(request, folder_name):
print("hi")
print(folder_name)
reports = Report.objects.all()
username_id = request.user
print(request.method)
if request.method == 'POST':
print("hi2")
form = FolderForm(request.POST)
selected = request.POST.getlist('selectedReport[]')
print(selected)
if form.is_valid():
print("hi3")
folder_object = Folder.objects.create(name=folder_name,
owner=username_id)
folder_object.save()
for report_selected in selected:
re = Report.objects.get(title=report_selected)
folder_object.members.add(re)
print(folder_object.members)
else:
form = FolderForm()
folder_object = []
if folder_name is not None:
folder_object = Folder.objects.get(name=folder_name)
print(folder_object)
print(folder_object.members)
variables = RequestContext(request, {'form': form, 'reports': reports})
return render_to_response(
'reports/folder.html',
variables,
)
def viewFolders(request):
context = {}
context['folders_list'] = Folder.objects.all()
return render(request, '/reports/folders', context)
@login_required
def see_my_reports(request):
initial_search = {}
my_reports_list = Report.objects.all().filter(owner=request.user).order_by('keyword')
for group in UserGroup.objects.filter(members=request.user):
my_reports_list = my_reports_list | group.report_set.all()
# Filter based by min date
if request.GET.get('sincesearch', False):
date_in = request.GET['sincesearch']
initial_search['since'] = date_in
date_since = datetime(
*[int(v) for v in date_in.replace('T', '-').replace(':',
'-').split('-')])
my_reports_list = my_reports_list.filter(timestamp__gte=date_since)
# Filter based by max date
if request.GET.get('beforesearch', False):
date_in = request.GET['beforesearch']
initial_search['before'] = date_in
date_since = datetime(
*[int(v) for v in date_in.replace('T', '-').replace(':',
'-').split('-')])
my_reports_list = my_reports_list.filter(timestamp__lte=date_since)
# Filter based on creator
if request.GET.get('ownersearch', False):
owner = request.GET['ownersearch']
initial_search['owner'] = owner
my_reports_list = my_reports_list.filter(owner__username__icontains=owner)
# Filter based on title
if request.GET.get('titlesearch', False):
title = request.GET['titlesearch']
initial_search['title'] = title
my_reports_list = my_reports_list.filter(title__icontains=title)
# Filter based on descriptions
if request.GET.get('descsearch', False):
desc = request.GET['descsearch']
initial_search['desc'] = desc
short_search = my_reports_list.filter(short_desc__icontains=desc)
long_search = my_reports_list.filter(long_desc__icontains=desc)
my_reports_list = short_search | long_search
for report in my_reports_list:
report.files = report.file_set.all()
for file in report.files:
file.file_obj.name = file.file_obj.name.split('/')[-1]
return render(request, 'reports/see_my_reports.html',
{'my_reports_list': my_reports_list, 'search_values': initial_search})
@login_required
def delete_report(request, id=None):
try:
report = Report.objects.get(id=id)
print(report.owner)
if report.owner != request.user:
text = "You do not have permission to delete this report"
return HttpResponse(text)
else:
Report.objects.filter(id=id).delete()
return render(request, 'reports/deleteReport.html')
except:
# text = "You are not able to delete this report"
# return HttpResponse(text)
return render(request, 'reports/deleteReport.html')
@login_required
def download_file(request, pk):
file = get_object_or_404(UploadedFile, pk=pk)
if file.report.group is not None:
if file.report.group not in UserGroup.objects.filter(members=request.user):
return HttpResponse(status=404)
filename = file.file_obj.name.split('/')[-1]
response = HttpResponse(file.file_obj, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
py | 1a4f1060c59cd5298b903a96416b7fd6700baa6f | from predictionserver.futureconventions.horizonconventions import HorizonConventions
class HorizonHabits(HorizonConventions):
def __init__(self,**kwargs):
super().__init__(**kwargs)
|
py | 1a4f10a6e0a5a628ee81a7a62d9d4d54c48114ed | # -*- encoding: utf-8 -*-
from .parser import Parser
from .safe_parser import SafeParser
def parse(dictionary):
return Parser(dictionary).query
def build_parser(valid_filters, default_filter):
return lambda dictionary: SafeParser(
dictionary,
valid_filters=valid_filters,
default_filter=default_filter,
).query
|
py | 1a4f10de121f27970653dccea4975730bc2f751c | import urllib3
import json
from api.src.postgre import Postgres
class DataCrawler:
def getData(self):
jsonOndeFoiRoubado = self.getJsonFromOndeFoiRoubado()
jsonOndeTemTiro = self.getJsonFromOnteTemTiro()
postgre = Postgres()
postgre.open()
postgre.insertOndeFoiRoubado(jsonOndeFoiRoubado)
postgre.insertOndeTemTiro(jsonOndeTemTiro)
postgre.close()
def getJsonFromOndeFoiRoubado(self):
http = urllib3.PoolManager()
r = http.request('GET', 'http://www.ondefuiroubado.com.br/rio-de-janeiro/RJ');
htmlData = str(r.data.decode('utf-8'))
idxStart = htmlData.find('OndeFuiRoubado.Views.CrimesIndexView.initialize')
idxEnd = htmlData.find('OndeFuiRoubado.PoliceStations')
htmlData = htmlData[idxStart:idxEnd]
htmlData = htmlData.replace('OndeFuiRoubado.Views.CrimesIndexView.initialize(','')
htmlData = htmlData.strip()
htmlData = htmlData.replace(');\\n });\\n\\n document.addEventListener(\\\'onMainMapLoad\\\', function(data) {\\n','')
htmlData = htmlData.strip()
htmlData = htmlData.replace("document.addEventListener('onMainMapLoad', function(data) {",'')
htmlData = htmlData.replace("\n","")
htmlData = htmlData.replace("); });","")
return json.loads(htmlData)
def getJsonFromOnteTemTiro(self):
http = urllib3.PoolManager()
r = http.request('GET', 'https://www.googleapis.com/fusiontables/v1/query?sql=SELECT%20*%20FROM%201HaQhL95pS0XhFQcifZ6fzKifuCXVdFxl-caH0zDf&key=AIzaSyC1CNeSPJOm5mPzk3kTrXuHJgG5vJP9Tgo');
htmlData = str(r.data.decode('utf-8'))
htmlData = htmlData.replace("\\n","#")
return json.loads(htmlData)
|
py | 1a4f11080c32af44227c01946573d4b2f2aa1b97 | from website.app import start
if __name__ == '__main__':
start() |
py | 1a4f11d2024bf5be433547afa88f846277edf3cc | name = "API"
from .modules import *
class API:
def __init__(self):
self.get = Get()
self.post = Post()
self.put = Put()
self.patch = Patch()
self.delete = Delete()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.