filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_27357
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Feature scaled kernel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math.psd_kernels import feature_transformed
from tensorflow_probability.python.math.psd_kernels.internal import util
__all__ = ['FeatureScaled']
# TODO(b/132103412): Support more general scaling via LinearOperator, along with
# scaling all feature dimensions.
class FeatureScaled(feature_transformed.FeatureTransformed):
"""Kernel that first rescales all feature dimensions.
Given a kernel `k` and `scale_diag` and inputs `x` and `y`, this kernel
first rescales the input by computing `x / scale_diag` and
`y / scale_diag`, and passing this to `k`.
With 1 feature dimension, this is also called Automatic Relevance
Determination (ARD) [1].
#### References
[1]: Carl Edward Rasmussen and Christopher K. I. Williams. Gaussian
Processes for Machine Learning. Section 5.1 2006.
http://www.gaussianprocess.org/gpml/chapters/RW5.pdf
"""
def __init__(
self, kernel, scale_diag, validate_args=False, name='FeatureScaled'):
"""Construct an FeatureScaled kernel instance.
Args:
kernel: `PositiveSemidefiniteKernel` instance. Inputs are rescaled and
passed in to this kernel. Parameters to `kernel` must be broadcastable
with `scale_diag`.
scale_diag: Floating point `Tensor` that controls how sharp or wide the
kernel shape is. `scale_diag` must have at least `kernel.feature_ndims`
dimensions, and extra dimensions must be broadcastable with parameters
of `kernel`. This is a "diagonal" in the sense that if all the feature
dimensions were flattened, `scale_diag` acts as the inverse of a
diagonal matrix.
validate_args: If `True`, parameters are checked for validity despite
possibly degrading runtime performance
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name):
self._scale_diag = tensor_util.convert_nonref_to_tensor(
scale_diag, name='scale_diag')
def rescale_input(x, feature_ndims, example_ndims):
"""Computes `x / scale_diag`."""
scale_diag = tf.convert_to_tensor(self.scale_diag)
scale_diag = util.pad_shape_with_ones(
scale_diag,
example_ndims,
# Start before the first feature dimension. We assume scale_diag has
# at least as many dimensions as feature_ndims.
start=-(feature_ndims + 1))
return x / scale_diag
super(FeatureScaled, self).__init__(
kernel,
transformation_fn=rescale_input,
validate_args=validate_args,
name=name,
parameters=parameters)
@property
def scale_diag(self):
return self._scale_diag
@classmethod
def _parameter_properties(cls, dtype):
from tensorflow_probability.python.bijectors import softplus # pylint:disable=g-import-not-at-top
return dict(
kernel=parameter_properties.BatchedComponentProperties(),
scale_diag=parameter_properties.ParameterProperties(
event_ndims=lambda self: self.kernel.feature_ndims,
default_constraining_bijector_fn=(
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))))
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
scale_diag = self.scale_diag
if scale_diag is not None and is_init != tensor_util.is_ref(scale_diag):
assertions.append(assert_util.assert_positive(
scale_diag,
message='scale_diag must be positive.'))
return assertions
|
the-stack_0_27358
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core.task.model_spec import text_spec
from official.nlp.data import classifier_data_lib
def _gen_examples():
examples = []
examples.append(
classifier_data_lib.InputExample(
guid=0, text_a='Really good.', label='pos'))
examples.append(
classifier_data_lib.InputExample(guid=1, text_a='So bad.', label='neg'))
return examples
def _get_dataset_from_tfrecord(tfrecord_file, name_to_features):
def _parse_function(example_proto):
# Parse the input `tf.Example` proto using the dictionary above.
return tf.io.parse_single_example(example_proto, name_to_features)
ds = tf.data.TFRecordDataset(tfrecord_file)
ds = ds.map(_parse_function)
return ds
class AverageWordVecModelSpecTest(tf.test.TestCase):
def setUp(self):
super(AverageWordVecModelSpecTest, self).setUp()
self.model_spec = text_spec.AverageWordVecModelSpec(seq_len=5)
self.vocab = collections.OrderedDict(
(('<PAD>', 0), ('<START>', 1), ('<UNKNOWN>', 2), ('good', 3), ('bad',
4)))
self.model_spec.vocab = self.vocab
def test_tokenize(self):
model_spec = text_spec.AverageWordVecModelSpec()
text = model_spec._tokenize('It\'s really good.')
self.assertEqual(text, ['it\'s', 'really', 'good'])
model_spec = text_spec.AverageWordVecModelSpec(lowercase=False)
text = model_spec._tokenize('That is so cool!!!')
self.assertEqual(text, ['That', 'is', 'so', 'cool'])
def test_convert_examples_to_features(self):
examples = _gen_examples()
tfrecord_file = os.path.join(self.get_temp_dir(), 'tmp.tfrecord')
self.model_spec.convert_examples_to_features(examples, tfrecord_file,
['pos', 'neg'])
ds = _get_dataset_from_tfrecord(tfrecord_file,
self.model_spec.get_name_to_features())
expected_features = [[[1, 2, 3, 0, 0], 0], [[1, 2, 4, 0, 0], 1]]
for i, sample in enumerate(ds):
self.assertTrue(
(sample['input_ids'].numpy() == expected_features[i][0]).all())
self.assertEqual(sample['label_ids'].numpy(), expected_features[i][1])
def test_preprocess(self):
token_ids = self.model_spec.preprocess('It\'s really good.')
expected_token_ids = [1, 2, 2, 3, 0]
self.assertEqual(token_ids, expected_token_ids)
def test_gen_vocab(self):
examples = _gen_examples()
self.model_spec.gen_vocab(examples)
expected_vocab = collections.OrderedDict([('<PAD>', 0), ('<START>', 1),
('<UNKNOWN>', 2), ('really', 3),
('good', 4), ('so', 5),
('bad', 6)])
self.assertEqual(self.model_spec.vocab, expected_vocab)
def test_save_load_vocab(self):
vocab_file = os.path.join(self.get_temp_dir(), 'vocab.txt')
self.model_spec.save_vocab(vocab_file)
vocab = self.model_spec.load_vocab(vocab_file)
self.assertEqual(vocab, self.vocab)
def test_run_classifier(self):
num_classes = 2
model = self.model_spec.run_classifier(
train_ds=self._gen_random_ds(num_classes),
validation_ds=self._gen_random_ds(num_classes),
epochs=1,
steps_per_epoch=1,
num_classes=num_classes)
self.assertIsInstance(model, tf.keras.Model)
def _gen_random_ds(self, num_classes, data_size=1, batch_size=4):
batched_features = tf.random.uniform(
(data_size, batch_size, self.model_spec.seq_len),
minval=0,
maxval=len(self.model_spec.vocab),
dtype=tf.dtypes.int32)
batched_labels = tf.random.uniform((data_size, batch_size),
minval=0,
maxval=num_classes,
dtype=tf.dtypes.int32)
ds = tf.data.Dataset.from_tensor_slices((batched_features, batched_labels))
return ds
class BertClassifierModelSpecTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1', True),
('https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1', False),
)
def test_bert(self, uri, is_tf2):
model_spec = text_spec.BertClassifierModelSpec(
uri, is_tf2=is_tf2, distribution_strategy='off', seq_len=3)
self._test_convert_examples_to_features(model_spec)
self._test_run_classifier(model_spec)
def _test_convert_examples_to_features(self, model_spec):
examples = _gen_examples()
tfrecord_file = os.path.join(self.get_temp_dir(), 'tmp.tfrecord')
model_spec.convert_examples_to_features(examples, tfrecord_file,
['pos', 'neg'])
ds = _get_dataset_from_tfrecord(tfrecord_file,
model_spec.get_name_to_features())
expected_features = []
expected_features.append({
'input_ids': [101, 2428, 102],
'input_mask': [1, 1, 1],
'segment_ids': [0, 0, 0],
'label_ids': 0
})
expected_features.append({
'input_ids': [101, 2061, 102],
'input_mask': [1, 1, 1],
'segment_ids': [0, 0, 0],
'label_ids': 1
})
for i, sample in enumerate(ds):
for k, v in expected_features[i].items():
self.assertTrue((sample[k].numpy() == v).all())
def _test_run_classifier(self, model_spec):
num_classes = 2
model = model_spec.run_classifier(
train_ds=self._gen_random_ds(model_spec.seq_len, num_classes),
validation_ds=self._gen_random_ds(model_spec.seq_len, num_classes),
epochs=1,
steps_per_epoch=1,
num_classes=num_classes)
self.assertIsInstance(model, tf.keras.Model)
def _gen_random_ds(self, seq_len, num_classes, data_size=1, batch_size=1):
batched_input_ids = tf.random.uniform((data_size, batch_size, seq_len),
minval=0,
maxval=2,
dtype=tf.dtypes.int32)
batched_input_mask = tf.random.uniform((data_size, batch_size, seq_len),
minval=0,
maxval=2,
dtype=tf.dtypes.int32)
batched_segment_ids = tf.random.uniform((data_size, batch_size, seq_len),
minval=0,
maxval=2,
dtype=tf.dtypes.int32)
batched_labels = tf.random.uniform((data_size, batch_size),
minval=0,
maxval=num_classes,
dtype=tf.dtypes.int32)
x = {
'input_word_ids': batched_input_ids,
'input_mask': batched_input_mask,
'input_type_ids': batched_segment_ids
}
y = batched_labels
ds = tf.data.Dataset.from_tensor_slices((x, y))
return ds
if __name__ == '__main__':
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
tf.test.main()
|
the-stack_0_27359
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xi_plugin import start_plugin, Plugin, edit
MATCHES = {"{": "}", "[": "]", "(": ")"}
class BracketCloser(Plugin):
"""Naively closes opened brackets, parens, & braces."""
def update(self, view, author, rev, start, end,
new_len, edit_type, text=None):
resp = 0
close_char = MATCHES.get(text)
if close_char:
# compute a delta from params:
new_cursor = end + new_len
# we set 'after_cursor' because we want the edit to appear to the right
# of the active cursor. we set priority=HIGH because we want this edit
# applied after concurrent edits.
resp = self.new_edit(rev, (new_cursor, new_cursor), close_char,
after_cursor=True, priority=edit.EDIT_PRIORITY_HIGH)
return resp
def main():
start_plugin(BracketCloser())
if __name__ == "__main__":
main()
|
the-stack_0_27361
|
#!/usr/bin/python
# coding=utf-8
from __future__ import print_function
import sys
if (sys.version_info > (3, 0)):
from io import StringIO
else:
try:
from io import BytesIO as StringIO
except ImportError:
import StringIO
from optparse import OptionParser
import os
from os.path import isfile, join
import re
from subprocess import Popen, PIPE
import jsmin
from .jspacker import JavaScriptPacker
VERSION_TEMPLATE = """
x3dom.versionInfo = {
version: '%s',
revision: '%s',
date: '%s'
};
"""
usage = \
"""
%prog [options] <files> """
tools_path = os.path.abspath(__file__)
tools_path = os.path.dirname(tools_path)
# os.chdir(os.path.abspath(tools_path + "/../src"))
class packer(object):
VERSION_STRING = "/** X3DOM Runtime, http://www.x3dom.org */"
# Create the version.js and fill in the git revision
def generate_version_file(self, version_in):
version_out = "version.js"
print("Generating version.js")
# Read the base-version from the VERSION file
if os.path.isfile(version_in):
version_file = open(version_in, "r")
# Make sure to only use the version string without '\n' etc.
version = version_file.read()
version = version.split()[0]
version_file.close()
else:
print("FATAL: Cannot find VERSION file: " + version_in)
sys.exit(0)
# Extract the git revision
try:
git_revision = Popen(["git", "log", "-1", "--pretty=format:%H"], stdout=PIPE).communicate()[0]
git_date = Popen(["git", "log", "-1", "--pretty=format:%ad"], stdout=PIPE).communicate()[0]
except:
git_revision = 0
git_date = 0
print(" WARNING: Cannot find git executable")
print(" Input ", os.path.abspath(version_in))
print(" Output ", os.path.abspath(version_out))
print(" Version ", version)
print(" Revision ", git_revision)
print(" Date ", git_date)
print("")
# Write the version and revision to file
version_js_file = open(version_out, "w")
version_js_file.write(VERSION_TEMPLATE % (version, git_revision, git_date))
version_js_file.close()
self.VERSION_STRING = "/** X3DOM Runtime, http://www.x3dom.org/ %s - %s - %s */" % (version, git_revision, git_date)
return version_out
# File merging helper
def _mergeFile(self, concatenated_file, filename):
"""
Append content of the given file to the given buffer
@param concatenated_file: Buffer containing the already concatenated files
@type concatenated_file: String
@param filename: Path to file that shall be appended
@type filename: String
@return: A String with contents of the given file appended
@rtype: String
"""
# print "File:", filename
try:
print(" " + os.path.abspath(filename))
f = open(filename, 'r')
concatenated_file += f.read()
f.close()
except:
print("Could not open input file '%s'. Skipping" % filename)
concatenated_file += "\n"
return concatenated_file
def _prefixFilePath(self, filename, src_prefix_path):
"""
Prefix filename with path if path is not empty
@param filename: Name of the file
@type filename: String
@param src_prefix_path: Path to use
@type src_prefix_path: String
@return: filename with prefix if path is not empty
@rtype: String
"""
if src_prefix_path != "":
filename = src_prefix_path + "/" + filename
return filename
# Packaging
def build(self, input_files, output_file, packaging_module, include_version=True, src_prefix_path=""):
"""
Build distributable version of x3dom
@param src_prefix_path: Optional path that is used as prefix for all source files
@type src_prefix_path: String
"""
print("output file:", output_file)
print("input_files:", input_files)
version_out = ""
if include_version == True:
# find the VERSION file
if os.path.isfile("VERSION"):
version_file_name = "VERSION"
elif os.path.isfile("src/VERSION"):
version_file_name = "src/VERSION"
else:
print("FATAL: Cannot find any VERSION file")
sys.exit(0)
# parse file & generate version.js
version_out = self.generate_version_file(version_file_name)
# Add the version.js to the list of input files
input_files.append((version_out, [version_out]))
concatenated_file = ""
in_len = 0
out_len = 0
# Merging files
print("Packing Files")
for (_, files) in input_files:
for f in files:
if f == version_out:
concatenated_file = self._mergeFile(concatenated_file, f)
else:
concatenated_file = self._mergeFile(concatenated_file, self._prefixFilePath(f, src_prefix_path))
"""
#Single file?
if filename[-3:] == ".js":
#Merge directly
concatenated_file = self._mergeFile(concatenated_file, filename)
#Otherwise (folder)
else:
#Open all files in folder and merge individually
print "Folder: ", filename
node_files = [f for f in os.listdir(filename) if isfile(join(filename,f)) and f[-3:]==".js"]
print ";".join(node_files)
for node_file in node_files:
concatenated_file = self._mergeFile(concatenated_file, join(filename,node_file))
"""
print("")
outpath = os.path.dirname(os.path.abspath(output_file))
if not os.access(outpath, os.F_OK):
print("Create Dir ", outpath)
os.mkdir(outpath)
# Packaging
print("Packaging")
print(self.VERSION_STRING)
print(" Algo " + packaging_module)
print(" Output " + os.path.abspath(output_file))
# JSMIN
if packaging_module == "jsmin":
# Minifiy the concatenated files
out_stream = StringIO()
jsm = jsmin.JavascriptMinify()
jsm.minify(StringIO(concatenated_file), out_stream)
out_len = len(out_stream.getvalue())
# Write the minified output file
outfile = open(output_file, 'w')
outfile.write(self.VERSION_STRING)
outfile.write(out_stream.getvalue())
outfile.close()
# JSPACKER
elif packaging_module == "jspacker":
p = JavaScriptPacker()
result = p.pack(concatenated_file, compaction=True, encoding=62, fastDecode=False)
out_len = len(result)
outfile = open(output_file, 'w')
outfile.write(self.VERSION_STRING)
outfile.write(result)
outfile.close()
# ClosureCompiler
elif packaging_module == "closure":
# collect files
files = []
for (_, filesForComponent) in input_files:
for f in filesForComponent:
if f == version_out:
files += ["--js=" + f]
else:
files += ["--js=" + self._prefixFilePath(f, src_prefix_path)]
#concatenated_file = self._mergeFile(concatenated_file, _prefixFilePath(f, src_prefix_path))
Popen(["java", "-jar", "tools/compiler.jar", "--js_output_file=" + output_file, "--summary_detail_level=3", "--warning_level=VERBOSE"] + files)
# Popen(["java", "-jar", "tools/compiler.jar", "--js_output_file=" + output_file] + files)
# NONE
elif packaging_module == 'none':
outfile = open(output_file, 'w')
outfile.write(self.VERSION_STRING)
outfile.write(concatenated_file)
outfile.close()
# Output some stats
in_len = len(concatenated_file)
ratio = float(out_len) / float(in_len);
print(" Packed %s -> %s" % (in_len, out_len))
print(" Ratio %s" % ratio)
if __name__ == '__main__':
parser = OptionParser(usage)
parser.add_option("-a", "--algo", type="string", dest="algo", help='The algorithm to use. [jsmin, jspacker, closure, none]', default="jsmin")
parser.add_option("-o", "--outfile", type="string", dest="outfile", help='The name of the output file.')
(options, input_files) = parser.parse_args()
if len(input_files) == 0:
print(parser.print_help())
print("- No input files specified. Exiting -")
sys.exit(0)
if not options.outfile:
print(parser.print_help())
print("- Please specify an output file using the -o options. Exiting. -")
sys.exit(0)
x3dom_packer = packer()
x3dom_packer.build(input_files, options.outfile, options.algo)
|
the-stack_0_27364
|
import json
import time
from functools import wraps
from itertools import repeat
from typing import Any, Callable, Container, Dict, Iterable, List, Optional
from urllib.parse import quote
import gevent
import structlog
from eth_utils import to_checksum_address
from gevent.lock import Semaphore
from matrix_client.api import MatrixHttpApi
from matrix_client.client import CACHE, MatrixClient
from matrix_client.errors import MatrixHttpLibError, MatrixRequestError
from matrix_client.room import Room as MatrixRoom
from matrix_client.user import User
from requests import Response
from requests.adapters import HTTPAdapter
log = structlog.get_logger(__name__)
SHUTDOWN_TIMEOUT = 35
def node_address_from_userid(user_id: Optional[str]) -> Optional[str]:
if user_id:
return to_checksum_address(user_id.split(":", 1)[0][1:])
return None
class Room(MatrixRoom):
""" Matrix `Room` subclass that invokes listener callbacks in separate greenlets """
def __init__(self, client, room_id):
super().__init__(client, room_id)
self._members: Dict[str, User] = {}
# dict of 'type': 'content' key/value pairs
self.account_data: Dict[str, Dict[str, Any]] = dict()
def get_joined_members(self, force_resync=False) -> List[User]:
""" Return a list of members of this room. """
if force_resync:
response = self.client.api.get_room_members(self.room_id)
for event in response["chunk"]:
if event["content"]["membership"] == "join":
user_id = event["state_key"]
if user_id not in self._members:
self._mkmembers(
User(self.client.api, user_id, event["content"].get("displayname"))
)
return list(self._members.values())
def _mkmembers(self, member: User):
if member.user_id not in self._members:
self._members[member.user_id] = member
def _rmmembers(self, user_id: str):
self._members.pop(user_id, None)
def __repr__(self):
if self.canonical_alias:
return f"<Room id={self.room_id!r} alias={self.canonical_alias!r}>"
return f"<Room id={self.room_id!r} aliases={self.aliases!r}>"
def update_aliases(self):
""" Get aliases information from room state
Returns:
boolean: True if the aliases changed, False if not
"""
changed = False
try:
response = self.client.api.get_room_state(self.room_id)
except MatrixRequestError:
return False
for chunk in response:
content = chunk.get("content")
if content:
if "aliases" in content:
aliases = content["aliases"]
if aliases != self.aliases:
self.aliases = aliases
changed = True
if chunk.get("type") == "m.room.canonical_alias":
canonical_alias = content["alias"]
if self.canonical_alias != canonical_alias:
self.canonical_alias = canonical_alias
changed = True
if changed and self.aliases and not self.canonical_alias:
self.canonical_alias = self.aliases[0]
return changed
def set_account_data(self, type_: str, content: Dict[str, Any]) -> dict:
self.account_data[type_] = content
return self.client.api.set_room_account_data(
quote(self.client.user_id), quote(self.room_id), quote(type_), content
)
class GMatrixHttpApi(MatrixHttpApi):
"""
A wrapper around MatrixHttpApi to limit the number
of concurrent requests we make to the number of connections
available to us in requests.Session connection pool size.
Args:
pool_maxsize: max size of underlying/session connection pool
retry_timeout: for how long should a single request be retried if it errors
retry_delay: callable which returns an iterable of delays
"""
def __init__(
self,
*args,
pool_maxsize: int = 10,
retry_timeout: int = 60,
retry_delay: Callable[[], Iterable[float]] = None,
long_paths: Container[str] = (),
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.server_ident: Optional[str] = None
http_adapter = HTTPAdapter(pool_maxsize=pool_maxsize)
https_adapter = HTTPAdapter(pool_maxsize=pool_maxsize)
self.session.mount("http://", http_adapter)
self.session.mount("https://", https_adapter)
self.session.hooks["response"].append(self._record_server_ident)
self._long_paths = long_paths
if long_paths:
self._semaphore = Semaphore(pool_maxsize - 1)
self._priority_lock = Semaphore()
else:
self._semaphore = Semaphore(pool_maxsize)
self.retry_timeout = retry_timeout
self.retry_delay = retry_delay
if self.retry_delay is None:
self.retry_delay = lambda: repeat(1)
def _send(self, method, path, *args, **kwargs):
# we use an infinite loop + time + sleep instead of gevent.Timeout
# to be able to re-raise the last exception instead of declaring one beforehand
started = time.time()
# paths in long_paths have a reserved slot in the pool, and aren't error-handled
# to avoid them getting stuck when listener greenlet is killed
if path in self._long_paths:
with self._priority_lock:
return super()._send(method, path, *args, **kwargs)
last_ex = None
for delay in self.retry_delay():
try:
with self._semaphore:
return super()._send(method, path, *args, **kwargs)
except (MatrixRequestError, MatrixHttpLibError) as ex:
# from MatrixRequestError, retry only 5xx http errors
if isinstance(ex, MatrixRequestError) and ex.code < 500:
raise
if time.time() > started + self.retry_timeout:
raise
last_ex = ex
log.debug(
"Got http _send exception, waiting then retrying",
wait_for=delay,
_exception=ex,
)
gevent.sleep(delay)
else:
if last_ex:
raise last_ex
def send_to_device(self, event_type, messages, txn_id=None): # pylint: disable=unused-argument
started = time.time()
last_ex = None
for delay in self.retry_delay():
try:
with self._semaphore:
return super().send_to_device(event_type, messages, txn_id=None)
except (MatrixRequestError, MatrixHttpLibError) as ex:
# from MatrixRequestError, retry only 5xx http errors
if isinstance(ex, MatrixRequestError) and ex.code < 500:
raise
if time.time() > started + self.retry_timeout:
raise
last_ex = ex
log.debug(
"Got http _send exception, waiting then retrying",
wait_for=delay,
_exception=ex,
)
gevent.sleep(delay)
else:
if last_ex:
raise last_ex
def _record_server_ident(
self, response: Response, *args, **kwargs # pylint: disable=unused-argument
):
self.server_ident = response.headers.get("Server")
class GMatrixClient(MatrixClient):
""" Gevent-compliant MatrixClient subclass """
sync_filter: str
sync_thread: Optional[gevent.Greenlet] = None
_handle_thread: Optional[gevent.Greenlet] = None
def __init__(
self,
base_url: str,
token: str = None,
user_id: str = None,
valid_cert_check: bool = True,
sync_filter_limit: int = 20,
cache_level: CACHE = CACHE.ALL,
http_pool_maxsize: int = 10,
http_retry_timeout: int = 60,
http_retry_delay: Callable[[], Iterable[float]] = lambda: repeat(1),
) -> None:
# dict of 'type': 'content' key/value pairs
self.account_data: Dict[str, Dict[str, Any]] = dict()
self._post_hook_func: Optional[Callable[[str], None]] = None
self.token: Optional[str] = None
super().__init__(
base_url, token, user_id, valid_cert_check, sync_filter_limit, cache_level
)
self.api = GMatrixHttpApi(
base_url,
token,
pool_maxsize=http_pool_maxsize,
retry_timeout=http_retry_timeout,
retry_delay=http_retry_delay,
long_paths=("/sync",),
)
self.api.validate_certificate(valid_cert_check)
def listen_forever(
self,
timeout_ms: int = 20000,
exception_handler: Callable[[Exception], None] = None,
bad_sync_timeout: int = 5,
):
"""
Keep listening for events forever.
Args:
timeout_ms: How long to poll the Home Server for before retrying.
exception_handler: Optional exception handler function which can
be used to handle exceptions in the caller thread.
bad_sync_timeout: Base time to wait after an error before retrying.
Will be increased according to exponential backoff.
"""
_bad_sync_timeout = bad_sync_timeout
self.should_listen = True
while self.should_listen:
try:
# may be killed and raise exception from _handle_thread
self._sync(timeout_ms)
_bad_sync_timeout = bad_sync_timeout
except MatrixRequestError as e:
log.warning(
"A MatrixRequestError occured during sync.",
node=node_address_from_userid(self.user_id),
)
if e.code >= 500:
log.warning(
"Problem occured serverside. Waiting",
node=node_address_from_userid(self.user_id),
wait_for=_bad_sync_timeout,
)
gevent.sleep(_bad_sync_timeout)
_bad_sync_timeout = min(_bad_sync_timeout * 2, self.bad_sync_timeout_limit)
else:
raise
except MatrixHttpLibError:
log.exception(
"A MatrixHttpLibError occured during sync.",
node=node_address_from_userid(self.user_id),
)
if self.should_listen:
gevent.sleep(_bad_sync_timeout)
_bad_sync_timeout = min(_bad_sync_timeout * 2, self.bad_sync_timeout_limit)
except Exception as e:
log.exception(
"Exception thrown during sync", node=node_address_from_userid(self.user_id)
)
if exception_handler is not None:
exception_handler(e)
else:
raise
def start_listener_thread(self, timeout_ms: int = 20000, exception_handler: Callable = None):
"""
Start a listener greenlet to listen for events in the background.
Args:
timeout_ms: How long to poll the Home Server for before retrying.
exception_handler: Optional exception handler function which can
be used to handle exceptions in the caller thread.
"""
assert not self.should_listen and self.sync_thread is None, "Already running"
self.should_listen = True
self.sync_thread = gevent.spawn(self.listen_forever, timeout_ms, exception_handler)
self.sync_thread.name = f"GMatrixClient.listen_forever user_id:{self.user_id}"
def stop_listener_thread(self):
""" Kills sync_thread greenlet before joining it """
# when stopping, `kill` will cause the `self.api.sync` call in _sync
# to raise a connection error. This flag will ensure it exits gracefully then
self.should_listen = False
if self.sync_thread:
self.sync_thread.kill()
log.debug(
"Waiting on sync greenlet",
node=node_address_from_userid(self.user_id),
current_user=self.user_id,
)
exited = gevent.joinall({self.sync_thread}, timeout=SHUTDOWN_TIMEOUT, raise_error=True)
if not exited:
raise RuntimeError("Timeout waiting on sync greenlet during transport shutdown.")
self.sync_thread.get()
if self._handle_thread is not None:
log.debug(
"Waiting on handle greenlet",
node=node_address_from_userid(self.user_id),
current_user=self.user_id,
)
exited = gevent.joinall(
{self._handle_thread}, timeout=SHUTDOWN_TIMEOUT, raise_error=True
)
if not exited:
raise RuntimeError("Timeout waiting on handle greenlet during transport shutdown.")
self._handle_thread.get()
log.debug(
"Listener greenlet exited",
node=node_address_from_userid(self.user_id),
current_user=self.user_id,
)
self.sync_thread = None
self._handle_thread = None
def stop(self):
self.stop_listener_thread()
self.sync_token = None
self.should_listen = False
self.rooms: Dict[str, Room] = {}
def logout(self):
super().logout()
self.api.session.close()
def search_user_directory(self, term: str) -> List[User]:
"""
Search user directory for a given term, returning a list of users
Args:
term: term to be searched for
Returns:
user_list: list of users returned by server-side search
"""
response = self.api._send("POST", "/user_directory/search", {"search_term": term})
try:
return [
User(self.api, _user["user_id"], _user["display_name"])
for _user in response["results"]
]
except KeyError:
return []
def search_room_directory(self, filter_term: str = None, limit: int = 10) -> List[Room]:
filter_options: dict = {}
if filter_term:
filter_options = {"filter": {"generic_search_term": filter_term}}
response = self.api._send("POST", "/publicRooms", {"limit": limit, **filter_options})
rooms = []
for room_info in response["chunk"]:
room = Room(self, room_info["room_id"])
room.canonical_alias = room_info.get("canonical_alias")
rooms.append(room)
return rooms
def modify_presence_list(
self, add_user_ids: List[str] = None, remove_user_ids: List[str] = None
):
if add_user_ids is None:
add_user_ids = []
if remove_user_ids is None:
remove_user_ids = []
return self.api._send(
"POST",
f"/presence/list/{quote(self.user_id)}",
{"invite": add_user_ids, "drop": remove_user_ids},
)
def get_presence_list(self) -> List[dict]:
return self.api._send("GET", f"/presence/list/{quote(self.user_id)}")
def set_presence_state(self, state: str):
return self.api._send(
"PUT", f"/presence/{quote(self.user_id)}/status", {"presence": state}
)
def typing(self, room: Room, timeout: int = 5000):
"""
Send typing event directly to api
Args:
room: room to send typing event to
timeout: timeout for the event, in ms
"""
path = f"/rooms/{quote(room.room_id)}/typing/{quote(self.user_id)}"
return self.api._send("PUT", path, {"typing": True, "timeout": timeout})
def _mkroom(self, room_id: str) -> Room:
""" Uses a geventified Room subclass """
if room_id not in self.rooms:
self.rooms[room_id] = Room(self, room_id)
room = self.rooms[room_id]
if not room.canonical_alias:
room.update_aliases()
return room
def get_user_presence(self, user_id: str) -> str:
return self.api._send("GET", f"/presence/{quote(user_id)}/status").get("presence")
@staticmethod
def call(callback, *args, **kwargs):
return callback(*args, **kwargs)
def _sync(self, timeout_ms=30000):
""" Reimplements MatrixClient._sync, add 'account_data' support to /sync """
log.debug(
"Sync called", node=node_address_from_userid(self.user_id), current_user=self.user_id
)
response = self.api.sync(self.sync_token, timeout_ms)
prev_sync_token = self.sync_token
self.sync_token = response["next_batch"]
if self._handle_thread is not None:
# if previous _handle_thread is still running, wait for it and re-raise if needed
self._handle_thread.get()
is_first_sync = prev_sync_token is None
self._handle_thread = gevent.Greenlet(self._handle_response, response, is_first_sync)
self._handle_thread.name = (
f"GMatrixClient._sync user_id:{self.user_id} sync_token:{prev_sync_token}"
)
self._handle_thread.link_exception(lambda g: self.sync_thread.kill(g.exception))
log.debug(
"Starting handle greenlet",
node=node_address_from_userid(self.user_id),
first_sync=is_first_sync,
sync_token=prev_sync_token,
current_user=self.user_id,
)
self._handle_thread.start()
if self._post_hook_func is not None:
self._post_hook_func(self.sync_token)
def _handle_response(self, response, first_sync=False):
# We must ignore the stop flag during first_sync
if not self.should_listen and not first_sync:
log.warning(
"Aborting handle response",
node=node_address_from_userid(self.user_id),
reason="Transport stopped",
current_user=self.user_id,
)
return
# Handle presence after rooms
for presence_update in response["presence"]["events"]:
for callback in list(self.presence_listeners.values()):
self.call(callback, presence_update)
for to_device_message in response["to_device"]["events"]:
for listener in self.listeners[:]:
if listener["event_type"] == "to_device":
self.call(listener["callback"], to_device_message)
for room_id, invite_room in response["rooms"]["invite"].items():
for listener in self.invite_listeners[:]:
self.call(listener, room_id, invite_room["invite_state"])
for room_id, left_room in response["rooms"]["leave"].items():
for listener in self.left_listeners[:]:
self.call(listener, room_id, left_room)
if room_id in self.rooms:
del self.rooms[room_id]
for room_id, sync_room in response["rooms"]["join"].items():
if room_id not in self.rooms:
self._mkroom(room_id)
room = self.rooms[room_id]
# TODO: the rest of this for loop should be in room object method
room.prev_batch = sync_room["timeline"]["prev_batch"]
for event in sync_room["state"]["events"]:
event["room_id"] = room_id
self.call(room._process_state_event, event)
for event in sync_room["timeline"]["events"]:
event["room_id"] = room_id
self.call(room._put_event, event)
# TODO: global listeners can still exist but work by each
# room.listeners[uuid] having reference to global listener
# Dispatch for client (global) listeners
for listener in self.listeners:
should_call = (
listener["event_type"] is None or listener["event_type"] == event["type"]
)
if should_call:
self.call(listener["callback"], event)
for event in sync_room["ephemeral"]["events"]:
event["room_id"] = room_id
self.call(room._put_ephemeral_event, event)
for listener in self.ephemeral_listeners:
should_call = (
listener["event_type"] is None or listener["event_type"] == event["type"]
)
if should_call:
self.call(listener["callback"], event)
for event in sync_room["account_data"]["events"]:
room.account_data[event["type"]] = event["content"]
if first_sync:
# Only update the local account data on first sync to avoid races.
# We don't support running multiple raiden nodes for the same eth account,
# therefore no situation where we would need to be updated from the server
# can happen.
for event in response["account_data"]["events"]:
self.account_data[event["type"]] = event["content"]
def set_account_data(self, type_: str, content: Dict[str, Any]) -> dict:
""" Use this to set a key: value pair in account_data to keep it synced on server """
self.account_data[type_] = content
return self.api.set_account_data(quote(self.user_id), quote(type_), content)
def set_post_sync_hook(self, hook: Callable[[str], None]):
self._post_hook_func = hook
def set_access_token(self, user_id: str, token: Optional[str]) -> None:
self.user_id = user_id
self.token = self.api.token = token
def set_sync_limit(self, limit: Optional[int]) -> Optional[int]:
""" Sets the events limit per room for sync and return previous limit """
try:
prev_limit = json.loads(self.sync_filter)["room"]["timeline"]["limit"]
except (json.JSONDecodeError, KeyError):
prev_limit = None
self.sync_filter = json.dumps({"room": {"timeline": {"limit": limit}}})
return prev_limit
# Monkey patch matrix User class to provide nicer repr
@wraps(User.__repr__)
def user__repr__(self):
return f"<User id={self.user_id!r}>"
User.__repr__ = user__repr__
|
the-stack_0_27367
|
from six import string_types, iteritems
from functools import partial
import itertools
import importlib
from ..core import Machine, Enum
import numbers
class MarkupMachine(Machine):
# Special attributes such as NestedState._name/_parent or Transition._condition are handled differently
state_attributes = ['on_exit', 'on_enter', 'ignore_invalid_triggers', 'timeout', 'on_timeout', 'tags']
transition_attributes = ['source', 'dest', 'prepare', 'before', 'after']
def __init__(self, *args, **kwargs):
self._markup = kwargs.pop('markup', {})
self._auto_transitions_markup = kwargs.pop('auto_transitions_markup', False)
self.skip_references = True
self._needs_update = True
if self._markup:
models_markup = self._markup.pop('models', [])
super(MarkupMachine, self).__init__(None, **self._markup)
for m in models_markup:
self._add_markup_model(m)
else:
super(MarkupMachine, self).__init__(*args, **kwargs)
self._markup['before_state_change'] = [x for x in (rep(f) for f in self.before_state_change) if x]
self._markup['after_state_change'] = [x for x in (rep(f) for f in self.before_state_change) if x]
self._markup['prepare_event'] = [x for x in (rep(f) for f in self.prepare_event) if x]
self._markup['finalize_event'] = [x for x in (rep(f) for f in self.finalize_event) if x]
self._markup['send_event'] = self.send_event
self._markup['auto_transitions'] = self.auto_transitions
self._markup['ignore_invalid_triggers'] = self.ignore_invalid_triggers
self._markup['queued'] = self.has_queue
@property
def auto_transitions_markup(self):
return self._auto_transitions_markup
@auto_transitions_markup.setter
def auto_transitions_markup(self, value):
self._auto_transitions_markup = value
self._needs_update = True
@property
def markup(self):
self._markup['models'] = self._convert_models()
return self.get_markup_config()
# the only reason why this not part of markup property is that pickle
# has issues with properties during __setattr__ (self.markup is not set)
def get_markup_config(self):
if self._needs_update:
self._convert_states_and_transitions(self._markup)
self._needs_update = False
return self._markup
def add_transition(self, trigger, source, dest, conditions=None,
unless=None, before=None, after=None, prepare=None, **kwargs):
super(MarkupMachine, self).add_transition(trigger, source, dest, conditions=conditions, unless=unless,
before=before, after=after, prepare=prepare, **kwargs)
self._needs_update = True
def add_states(self, states, on_enter=None, on_exit=None, ignore_invalid_triggers=None, **kwargs):
super(MarkupMachine, self).add_states(states, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore_invalid_triggers, **kwargs)
self._needs_update = True
def _convert_states_and_transitions(self, root):
state = getattr(self, 'scoped', self)
if state.initial:
root['initial'] = state.initial
if state == self and state.name:
root['name'] = self.name[:-2]
self._convert_transitions(root)
self._convert_states(root)
def _convert_states(self, root):
key = 'states' if getattr(self, 'scoped', self) == self else 'children'
root[key] = []
for state_name, state in self.states.items():
s_def = _convert(state, self.state_attributes, self.skip_references)
if isinstance(state_name, Enum):
s_def['name'] = state_name.name
else:
s_def['name'] = state_name
if getattr(state, 'states', []):
with self(state_name):
self._convert_states_and_transitions(s_def)
root[key].append(s_def)
def _convert_transitions(self, root):
root['transitions'] = []
for event in self.events.values():
if self._omit_auto_transitions(event):
continue
for transitions in event.transitions.items():
for trans in transitions[1]:
t_def = _convert(trans, self.transition_attributes, self.skip_references)
t_def['trigger'] = event.name
con = [x for x in (rep(f.func, self.skip_references) for f in trans.conditions
if f.target) if x]
unl = [x for x in (rep(f.func, self.skip_references) for f in trans.conditions
if not f.target) if x]
if con:
t_def['conditions'] = con
if unl:
t_def['unless'] = unl
root['transitions'].append(t_def)
def _add_markup_model(self, markup):
initial = markup.get('state', None)
if markup['class-name'] == 'self':
self.add_model(self, initial)
else:
mod_name, cls_name = markup['class-name'].rsplit('.', 1)
cls = getattr(importlib.import_module(mod_name), cls_name)
self.add_model(cls(), initial)
def _convert_models(self):
models = []
for model in self.models:
state = getattr(model, self.model_attribute)
model_def = dict(state=state.name if isinstance(state, Enum) else state)
model_def['name'] = model.name if hasattr(model, 'name') else str(id(model))
model_def['class-name'] = 'self' if model == self else model.__module__ + "." + model.__class__.__name__
models.append(model_def)
return models
def _omit_auto_transitions(self, event):
return self.auto_transitions_markup is False and self._is_auto_transition(event)
# auto transition events commonly a) start with the 'to_' prefix, followed by b) the state name
# and c) contain a transition from each state to the target state (including the target)
def _is_auto_transition(self, event):
if event.name.startswith('to_') and len(event.transitions) == len(self.states):
state_name = event.name[len('to_'):]
try:
_ = self.get_state(state_name)
return True
except ValueError:
pass
return False
@classmethod
def _identify_callback(self, name):
callback_type, target = super(MarkupMachine, self)._identify_callback(name)
if callback_type:
self._needs_update = True
return callback_type, target
def rep(func, skip_references=False):
""" Return a string representation for `func`. """
if isinstance(func, string_types):
return func
if isinstance(func, numbers.Number):
return str(func)
if skip_references:
return None
try:
return func.__name__
except AttributeError:
pass
if isinstance(func, partial):
return "%s(%s)" % (
func.func.__name__,
", ".join(itertools.chain(
(str(_) for _ in func.args),
("%s=%s" % (key, value)
for key, value in iteritems(func.keywords if func.keywords else {})))))
return str(func)
def _convert(obj, attributes, skip):
s = {}
for key in attributes:
val = getattr(obj, key, False)
if not val:
continue
if isinstance(val, string_types):
s[key] = val
else:
try:
s[key] = [rep(v, skip) for v in iter(val)]
except TypeError:
s[key] = rep(val, skip)
return s
|
the-stack_0_27368
|
import numpy as np
import matplotlib.pyplot as plt
vertices = [(0,0), (8,0), (0,2)]
# draw the triangle
plt.plot([0, 8], [0, 0])
plt.plot([0, 0], [0, 2])
plt.plot([0, 8], [2, 0])
# take the mean of the three points
vertices = np.stack(vertices).astype(float)
mean_pos = vertices.mean(axis=0)
# draw the mean as the point on the plot
plt.scatter(mean_pos[0], mean_pos[1])
plt.show()
# subtract the mean from the vertices and redraw the triangle
vertices -= mean_pos[None]
plt.plot((vertices[0,0], vertices[1,0]), (vertices[0,1], vertices[1,1]))
plt.plot((vertices[0,0], vertices[2,0]), (vertices[0,1], vertices[2,1]))
plt.plot((vertices[2,0], vertices[1,0]), (vertices[2,1], vertices[1,1]))
zero_mean = vertices.mean(axis=0)
plt.scatter(zero_mean[0], zero_mean[1])
plt.show()
# compute the bounding box
min_v = vertices.min(axis=0)
max_v = vertices.max(axis=0)
print(np.absolute(min_v) - np.absolute(max_v))
# now I will do the same thing for equilateral triangle
vertices = [(2,1), (7,1), (4.5, 5.33)]
vertices = np.stack(vertices)
mean_pos = vertices.mean(axis=0)
print(mean_pos)
plt.plot((vertices[0,0], vertices[1,0]), (vertices[0,1], vertices[1,1]))
plt.plot((vertices[0,0], vertices[2,0]), (vertices[0,1], vertices[2,1]))
plt.plot((vertices[2,0], vertices[1,0]), (vertices[2,1], vertices[1,1]))
plt.scatter(mean_pos[0], mean_pos[1])
plt.show()
vertices = vertices - mean_pos[None]
plt.plot((vertices[0,0], vertices[1,0]), (vertices[0,1], vertices[1,1]))
plt.plot((vertices[0,0], vertices[2,0]), (vertices[0,1], vertices[2,1]))
plt.plot((vertices[2,0], vertices[1,0]), (vertices[2,1], vertices[1,1]))
new_mean_pos = vertices.mean(axis=0)
print(new_mean_pos)
plt.scatter(new_mean_pos[0], new_mean_pos[1])
plt.show()
# now compute the max and min and subtraction would only be zero in x
max_v = np.max(vertices, axis=0)
min_v = np.min(vertices, axis=0)
print(np.absolute(min_v) - np.absolute(max_v))
|
the-stack_0_27369
|
"""
"""
from os.path import splitext, basename
import pandas as pd
from library.metrics import DataFileStore
class BaselineMetrics(DataFileStore):
def __init__(self, filepath):
self.datafile_path = filepath
self.prefix = basename(splitext(self.datafile_path)[0])
super().__init__("%s.baseline.bin" % splitext(self.datafile_path)[0])
def init_baseline(self):
dataframe = pd.read_csv(self.datafile_path)
metrics = {
key: round(value, 2)
for key, value in dict(dataframe.mean()).items()
}
self._save_data(metrics)
def get_metric_names(self):
metrics = self._load_data()
return list(metrics.keys())
def get_metric(self, name):
metrics = self._load_data()
return metrics[name]
def get_metrics(self):
metrics = self._load_data()
return [
"%s.%s=%s" % (self.prefix, key, value)
for key, value in metrics.items()
]
def set_metric(self, name, value):
metrics = self._load_data()
if name not in metrics.keys():
return False
metrics[name] = round(float(value), 2)
self._save_data(metrics)
return True
|
the-stack_0_27371
|
import os, json
with open('lstm/lstm_200d_1x32x1.json') as fp_in:
cfg = json.load(fp_in)
for num_layers in [1,2,3]:
for hidden_size in [32,64]:
for num_directions in [False, True]:
cfg['cuda'] = True
cfg['network']['hidden_size'] = hidden_size
cfg['network']['num_layers'] = num_layers
cfg['network']['bidirectional'] = num_directions
cfg['name'] = f'lstm_200d_{num_layers}x{hidden_size}x{int(num_directions)+1}'
if not os.path.exists(f'{cfg["name"]}.json'):
with open(f'lstm/{cfg["name"]}.json', 'w') as fp_out:
json.dump(cfg, fp_out, indent=2)
|
the-stack_0_27374
|
import string
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import TreebankWordTokenizer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
TOKENIZER = TreebankWordTokenizer()
LEMMATIZER = WordNetLemmatizer()
STOP = stopwords.words('english')
# stolen from http://stackoverflow.com/questions/15586721/wordnet-lemmatization-and-pos-tagging-in-python
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return ''
def tokenize(doc):
doc = " ".join("".join([" " if ch in string.punctuation else ch for ch in doc]).split())
for token in TOKENIZER.tokenize(doc):
yield token.lower()
def remove_stopwords(tokens):
res = []
for token in tokens:
if token not in STOP:
res.append(token)
return res
def lemmatize(tokens):
tagged = nltk.pos_tag(tokens)
for t in tagged:
try:
yield LEMMATIZER.lemmatize(t[0], get_wordnet_pos(t[1]))
except KeyError:
pass
def preprocess(doc):
tokens = tokenize(doc)
tokens = remove_stopwords(tokens)
return tokens
|
the-stack_0_27377
|
from django.db import transaction
from Harvest.utils import get_logger
from monitoring.decorators import update_component_status
from task_queue.task_queue import TaskQueue
from torrents.alcazar_client import AlcazarClient
from torrents.alcazar_event_processor import AlcazarEventProcessor
from torrents.exceptions import AlcazarNotConfiguredException
logger = get_logger(__name__)
UPDATE_BATCH_SIZE = 5000
@TaskQueue.periodic_task(3)
@transaction.atomic
@update_component_status(
'alcazar_update',
'Alcazar update completed successfully in {time_taken:.3f} s.',
'Alcazar update crashed.',
)
def poll_alcazar():
try:
client = AlcazarClient(timeout=60)
except AlcazarNotConfiguredException:
logger.info('Skipping alcazar poll due to missing config.')
return
update_batch = client.pop_update_batch(5000)
num_added = 0
num_updated = 0
num_removed = 0
for realm_batch in update_batch.values():
num_added += len(realm_batch['added'])
num_updated += len(realm_batch['updated'])
num_removed += len(realm_batch['removed'])
logger.debug('Received {} added, {} updated and {} removed from alcazar.', num_added, num_updated, num_removed)
processor = AlcazarEventProcessor()
processor.process(update_batch)
|
the-stack_0_27380
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event resources for version 1 of the Timesketch API."""
import codecs
import datetime
import hashlib
import json
import logging
import math
import time
import six
import dateutil
from elasticsearch.exceptions import RequestError
import numpy as np
import pandas as pd
from flask import jsonify
from flask import request
from flask import abort
from flask_restful import Resource
from flask_restful import reqparse
from flask_login import login_required
from flask_login import current_user
from timesketch.api.v1 import resources
from timesketch.lib import forms
from timesketch.lib.definitions import HTTP_STATUS_CODE_OK
from timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
from timesketch.lib.definitions import HTTP_STATUS_CODE_FORBIDDEN
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.models import db_session
from timesketch.models.sketch import Event
from timesketch.models.sketch import SearchIndex
from timesketch.models.sketch import Sketch
from timesketch.models.sketch import Timeline
logger = logging.getLogger('timesketch.event_api')
def _tag_event(row, tag_dict, tags_to_add, datastore, flush_interval):
"""Tag each event from a dataframe with tags.
Args:
row (np.Series): a single row of data with existing tags and
information about the event in order to be able to add
tags to it.
tag_dict (dict): a dict that contains information to be returned
by the API call to the user.
tags_to_add (list[str]): a list of strings of tags to add to each
event.
datastore (elastic.ElasticsearchDataStore): the datastore object.
flush_interval (int): the number of events to import before a bulk
update is done with the datastore.
"""
tag_dict['events_processed_by_api'] += 1
existing_tags = set()
if 'tag' in row:
tag = row['tag']
if isinstance(tag, (list, tuple)):
existing_tags = set(tag)
new_tags = list(set().union(existing_tags, set(tags_to_add)))
else:
new_tags = tags_to_add
if set(existing_tags) == set(new_tags):
return
datastore.import_event(
index_name=row['_index'], event_type=row['_type'],
event_id=row['_id'], event={'tag': new_tags},
flush_interval=flush_interval)
tag_dict['tags_applied'] += len(new_tags)
tag_dict['number_of_events_with_added_tags'] += 1
class EventCreateResource(resources.ResourceMixin, Resource):
"""Resource to create an annotation for an event."""
@login_required
def post(self, sketch_id):
"""Handles POST request to the resource.
Handler for /api/v1/sketches/:sketch_id/event/create/
Args:
sketch_id: Integer primary key for a sketch database model
Returns:
An annotation in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'write'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have write access controls on sketch.')
form = request.json
if not form:
form = request.data
timeline_name = 'sketch specific timeline'
index_name_seed = 'timesketch_{0:d}'.format(sketch_id)
event_type = 'user_created_event'
date_string = form.get('date_string')
if not date_string:
date = datetime.datetime.utcnow().isoformat()
else:
# derive datetime from timestamp:
try:
date = dateutil.parser.parse(date_string)
except (dateutil.parser.ParserError, OverflowError) as e:
logger.error('Unable to convert date string', exc_info=True)
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to add event, not able to convert the date '
'string. Was it properly formatted? Error: '
'{0!s}'.format(e))
timestamp = int(
time.mktime(date.utctimetuple())) * 1000000
timestamp += date.microsecond
event = {
'datetime': date_string,
'timestamp': timestamp,
'timestamp_desc': form.get('timestamp_desc', 'Event Happened'),
'message': form.get('message', 'No message string'),
}
attributes = form.get('attributes', {})
if not isinstance(attributes, dict):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to add an event where the attributes are not a '
'dict object.')
event.update(attributes)
tag = form.get('tag', [])
if not isinstance(tag, list):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to add an event where the tags are not a '
'list of strings.')
if tag and any([not isinstance(x, str) for x in tag]):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to add an event where the tags are not a '
'list of strings.')
event['tag'] = tag
# We do not need a human readable filename or
# datastore index name, so we use UUIDs here.
index_name = hashlib.md5(index_name_seed.encode()).hexdigest()
if six.PY2:
index_name = codecs.decode(index_name, 'utf-8')
# Try to create index
try:
# Create the index in Elasticsearch (unless it already exists)
self.datastore.create_index(
index_name=index_name,
doc_type=event_type)
# Create the search index in the Timesketch database
searchindex = SearchIndex.get_or_create(
name=timeline_name,
description='internal timeline for user-created events',
user=current_user,
index_name=index_name)
searchindex.grant_permission(
permission='read', user=current_user)
searchindex.grant_permission(
permission='write', user=current_user)
searchindex.grant_permission(
permission='delete', user=current_user)
searchindex.set_status('ready')
db_session.add(searchindex)
db_session.commit()
timeline = None
if sketch and sketch.has_permission(current_user, 'write'):
self.datastore.import_event(
index_name,
event_type,
event,
flush_interval=1)
timeline = Timeline.get_or_create(
name=searchindex.name,
description=searchindex.description,
sketch=sketch,
user=current_user,
searchindex=searchindex)
if timeline not in sketch.timelines:
sketch.timelines.append(timeline)
timeline.set_status('ready')
db_session.add(timeline)
db_session.commit()
# Return Timeline if it was created.
# pylint: disable=no-else-return
if timeline:
return self.to_json(
timeline, status_code=HTTP_STATUS_CODE_CREATED)
else:
return self.to_json(
searchindex, status_code=HTTP_STATUS_CODE_CREATED)
# TODO: Can this be narrowed down, both in terms of the scope it
# applies to, as well as not to catch a generic exception.
except Exception as e: # pylint: disable=broad-except
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Failed to add event ({0!s})'.format(e))
class EventResource(resources.ResourceMixin, Resource):
"""Resource to get a single event from the datastore.
HTTP Args:
searchindex_id: The datastore searchindex id as string
event_id: The datastore event id as string
"""
def __init__(self):
super().__init__()
self.parser = reqparse.RequestParser()
self.parser.add_argument(
'searchindex_id', type=six.text_type, required=True)
self.parser.add_argument('event_id', type=six.text_type, required=True)
@login_required
def get(self, sketch_id):
"""Handles GET request to the resource.
Handler for /api/v1/sketches/:sketch_id/event/
Args:
sketch_id: Integer primary key for a sketch database model
Returns:
JSON of the datastore event
"""
args = self.parser.parse_args()
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'read'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have read access controls on sketch.')
searchindex_id = args.get('searchindex_id')
searchindex = SearchIndex.query.filter_by(
index_name=searchindex_id).first()
if not searchindex:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Search index not found for this event.')
if searchindex.get_status.status == 'deleted':
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to query event on a closed search index.')
event_id = args.get('event_id')
indices = [
t.searchindex.index_name for t in sketch.timelines
if t.get_status.status.lower() == 'ready']
# Check if the requested searchindex is part of the sketch
if searchindex_id not in indices:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Search index ID ({0!s}) does not belong to the list '
'of indices'.format(searchindex_id))
result = self.datastore.get_event(searchindex_id, event_id)
event = Event.query.filter_by(
sketch=sketch, searchindex=searchindex,
document_id=event_id).first()
# Comments for this event
comments = []
if event:
for comment in event.comments:
if not comment.user:
username = 'System'
else:
username = comment.user.username
comment_dict = {
'user': {
'username': username,
},
'created_at': comment.created_at,
'comment': comment.comment
}
comments.append(comment_dict)
schema = {
'meta': {
'comments': comments
},
'objects': result['_source']
}
return jsonify(schema)
class EventTaggingResource(resources.ResourceMixin, Resource):
"""Resource to fetch and set tags to an event."""
# The number of events to bulk together for each query.
EVENT_CHUNK_SIZE = 1000
# The maximum number of events to tag in a single request.
MAX_EVENTS_TO_TAG = 100000
# The size of the buffer before a bulk update in ES takes place.
BUFFER_SIZE_FOR_ES_BULK_UPDATES = 10000
@login_required
def post(self, sketch_id):
"""Handles POST request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
Returns:
An annotation in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
msg = 'No sketch found with this ID.'
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
if not sketch.has_permission(current_user, 'write'):
abort(
HTTP_STATUS_CODE_FORBIDDEN, (
'User does not have sufficient access rights to '
'modify a sketch.'))
form = request.json
if not form:
form = request.data
tag_dict = {
'events_processed_by_api': 0,
'number_of_events_with_added_tags': 0,
'tags_applied': 0,
}
datastore = self.datastore
try:
tags_to_add = json.loads(form.get('tag_string', ''))
except json.JSONDecodeError as e:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to read the tags, with error: {0!s}'.format(e))
if not isinstance(tags_to_add, list):
abort(
HTTP_STATUS_CODE_BAD_REQUEST, 'Tags need to be a list')
if not all([isinstance(x, str) for x in tags_to_add]):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Tags need to be a list of strings')
events = form.get('events', [])
event_df = pd.DataFrame(events)
for field in ['_id', '_type', '_index']:
if field not in event_df:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Events need to have a [{0:s}] field associated '
'to it.'.format(field))
if any(event_df[field].isna()):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'All events need to have a [{0:s}] field '
'set, it cannot have a non-value.'.format(field))
# Remove any potential extra fields from the events.
event_df = event_df[['_id', '_type', '_index']]
tag_df = pd.DataFrame()
event_size = event_df.shape[0]
if event_size > self.MAX_EVENTS_TO_TAG:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Cannot tag more than {0:d} events in a single '
'request'.format(self.MAX_EVENTS_TO_TAG))
tag_dict['number_of_events_passed_to_api'] = event_size
errors = []
verbose = form.get('verbose', False)
if verbose:
tag_dict['number_of_indices'] = len(event_df['_index'].unique())
time_tag_gathering_start = time.time()
for _index in event_df['_index'].unique():
index_slice = event_df[event_df['_index'] == _index]
index_size = index_slice.shape[0]
if verbose:
tag_dict.setdefault('index_count', {})
tag_dict['index_count'][_index] = index_size
if index_size <= self.EVENT_CHUNK_SIZE:
chunks = 1
else:
chunks = math.ceil(index_size / self.EVENT_CHUNK_SIZE)
tags = []
for index_chunk in np.array_split(
index_slice['_id'].unique(), chunks):
should_list = [{'match': {'_id': x}} for x in index_chunk]
query_body = {
'query': {
'bool': {
'should': should_list
}
}
}
# Adding a small buffer to make sure all results are captured.
size = len(should_list) + 100
query_body['size'] = size
query_body['terminate_after'] = size
try:
# pylint: disable=unexpected-keyword-arg
if datastore.version.startswith('6'):
search = datastore.client.search(
body=json.dumps(query_body),
index=[_index],
_source_include=['tag'],
search_type='query_then_fetch'
)
else:
search = datastore.client.search(
body=json.dumps(query_body),
index=[_index],
_source_includes=['tag'],
search_type='query_then_fetch'
)
except RequestError as e:
logger.error('Unable to query for events', exc_info=True)
errors.append(
'Unable to query for events, {0!s}'.format(e))
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to query events, {0!s}'.format(e))
for result in search['hits']['hits']:
tag = result.get('_source', {}).get('tag', [])
if not tag:
continue
tags.append({'_id': result.get('_id'), 'tag': tag})
if not tags:
continue
tag_df = pd.concat([tag_df, pd.DataFrame(tags)])
if tag_df.shape[0]:
event_df = event_df.merge(tag_df, on='_id', how='left')
if verbose:
tag_dict[
'time_to_gather_tags'] = time.time() - time_tag_gathering_start
tag_dict['number_of_events'] = len(events)
if tag_df.shape[0]:
tag_dict['number_of_events_in_tag_frame'] = tag_df.shape[0]
if 'tag' in event_df:
current_tag_events = event_df[~event_df['tag'].isna()].shape[0]
tag_dict['number_of_events_with_tags'] = current_tag_events
else:
tag_dict['number_of_events_with_tags'] = 0
tag_dict['tags_to_add'] = tags_to_add
time_tag_start = time.time()
if event_size > datastore.DEFAULT_FLUSH_INTERVAL:
flush_interval = self.BUFFER_SIZE_FOR_ES_BULK_UPDATES
else:
flush_interval = datastore.DEFAULT_FLUSH_INTERVAL
_ = event_df.apply(
_tag_event, axis=1, tag_dict=tag_dict, tags_to_add=tags_to_add,
datastore=datastore, flush_interval=flush_interval)
datastore.flush_queued_events()
if verbose:
tag_dict['time_to_tag'] = time.time() - time_tag_start
if errors:
tag_dict['errors'] = errors
schema = {
'meta': tag_dict,
'objects': []}
response = jsonify(schema)
response.status_code = HTTP_STATUS_CODE_OK
return response
class EventAnnotationResource(resources.ResourceMixin, Resource):
"""Resource to create an annotation for an event."""
@login_required
def post(self, sketch_id):
"""Handles POST request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
Returns:
An annotation in JSON (instance of flask.wrappers.Response)
"""
form = forms.EventAnnotationForm.build(request)
if not form.validate_on_submit():
abort(
HTTP_STATUS_CODE_BAD_REQUEST, 'Unable to validate form data.')
annotations = []
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'write'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have write access controls on sketch.')
indices = [
t.searchindex.index_name for t in sketch.timelines
if t.get_status.status.lower() == 'ready']
annotation_type = form.annotation_type.data
events = form.events.raw_data
for _event in events:
searchindex_id = _event['_index']
searchindex = SearchIndex.query.filter_by(
index_name=searchindex_id).first()
event_id = _event['_id']
event_type = _event['_type']
if searchindex_id not in indices:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Search index ID ({0!s}) does not belong to the list '
'of indices'.format(searchindex_id))
# Get or create an event in the SQL database to have something
# to attach the annotation to.
event = Event.get_or_create(
sketch=sketch,
searchindex=searchindex,
document_id=event_id)
# Add the annotation to the event object.
if 'comment' in annotation_type:
annotation = Event.Comment(
comment=form.annotation.data, user=current_user)
event.comments.append(annotation)
self.datastore.set_label(
searchindex_id,
event_id,
event_type,
sketch.id,
current_user.id,
'__ts_comment',
toggle=False)
elif 'label' in annotation_type:
annotation = Event.Label.get_or_create(
label=form.annotation.data, user=current_user)
if annotation not in event.labels:
event.labels.append(annotation)
toggle = False
if '__ts_star' or '__ts_hidden' in form.annotation.data:
toggle = True
if form.remove.data:
toggle = True
self.datastore.set_label(
searchindex_id,
event_id,
event_type,
sketch.id,
current_user.id,
form.annotation.data,
toggle=toggle)
else:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Annotation type needs to be either label or comment, '
'not {0!s}'.format(annotation_type))
annotations.append(annotation)
# Save the event to the database
db_session.add(event)
db_session.commit()
return self.to_json(
annotations, status_code=HTTP_STATUS_CODE_CREATED)
class CountEventsResource(resources.ResourceMixin, Resource):
"""Resource to number of events for sketch timelines."""
@login_required
def get(self, sketch_id):
"""Handles GET request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
Returns:
Number of events in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'read'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have read access controls on sketch.')
indices = [
t.searchindex.index_name for t in sketch.active_timelines
if t.get_status.status != 'archived'
]
count, bytes_on_disk = self.datastore.count(indices)
meta = dict(count=count, bytes=bytes_on_disk)
schema = dict(meta=meta, objects=[])
return jsonify(schema)
|
the-stack_0_27383
|
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
import time
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_connection = None
while not db_connection:
try:
db_connection = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, trying again...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Connected to database.'))
|
the-stack_0_27384
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : April 2018
import logging
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
from dxm.lib.masking_api.api.domain_api import DomainApi
from dxm.lib.masking_api.rest import ApiException
from dxm.lib.masking_api.genericmodel import GenericModel
class DxDomain(object):
swagger_types = {
'domain_name': 'str',
'created_by': 'str',
'default_algorithm_code': 'str',
'default_tokenization_code': 'str'
}
swagger_map = {
'domain_name': 'domainName',
'created_by': 'createdBy',
'default_algorithm_code': 'defaultAlgorithmCode',
'default_tokenization_code': 'defaultTokenizationCode'
}
def __init__(self, engine):
"""
Constructor
:param engine: DxMaskingEngine object
"""
#Domain.__init__(self)
self.__engine = engine
self.__logger = logging.getLogger()
self.__sync = None
self.__logger.debug("creating DxDomain object")
self.__api = DomainApi
self.__apiexc = ApiException
self.__obj = None
@property
def obj(self):
if self.__obj is not None:
return self.__obj
else:
return None
def from_domain(self, dom):
"""
Set obj properties with a Domain object
:param column: Domain object
"""
self.__obj = dom
self.__obj.swagger_map = self.swagger_map
self.__obj.swagger_types = self.swagger_types
@property
def domain_name(self):
if self.obj is not None:
return self.obj.domain_name
else:
return None
@property
def classification(self):
if self.obj is not None:
return self.obj.classification
else:
return None
@property
def default_algorithm_code(self):
if self.obj is not None:
return self.obj.default_algorithm_code
else:
return None
@default_algorithm_code.setter
def default_algorithm_code(self, default_algorithm_code):
if self.__obj is not None:
self.__obj.default_algorithm_code = default_algorithm_code
else:
raise ValueError("Object needs to be initialized first")
@property
def default_tokenization_code(self):
if self.obj is not None:
return self.obj.default_tokenization_code
else:
return None
def create_domain(self, domain_name, domain_classification, default_algorithm_code):
self.__obj = GenericModel({ x:None for x in self.swagger_map.values()}, self.swagger_types, self.swagger_map)
self.obj.domain_name = domain_name
self.obj.domain_classification = domain_classification
self.obj.default_algorithm_code = default_algorithm_code
def add(self):
"""
Add File type to Masking engine and print status message
return a None if non error
return 1 in case of error
"""
if (self.obj.domain_name is None):
print_error("Domain name is required")
self.__logger.error("Domain name is required")
return 1
if (self.obj.default_algorithm_code is None):
print_error("Domain default algorithm is required")
self.__logger.error("Domain default algorithm is required")
return 1
try:
self.__logger.debug("create domain input %s" % str(self))
api_instance = self.__api(self.__engine.api_client)
self.__logger.debug("API instance created")
response = api_instance.create_domain(
self.obj,
_request_timeout=self.__engine.get_timeout()
)
self.from_domain(response)
self.__logger.debug("domain response %s"
% str(response))
print_message("Domain %s added" % self.obj.domain_name)
return 0
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
def delete(self):
"""
Delete domain
return a 0 if non error
return 1 in case of error
"""
api_instance = self.__api(self.__engine.api_client)
try:
self.__logger.debug("delete domain name %s"
% self.obj.domain_name)
response = api_instance.delete_domain(
self.obj.domain_name,
_request_timeout=self.__engine.get_timeout()
)
self.__logger.debug("delete domain name response %s"
% str(response))
print_message("Domain %s deleted" % self.obj.domain_name)
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
def update(self):
"""
Delete domain
return a 0 if non error
return 1 in case of error
"""
api_instance = self.__api(self.__engine.api_client)
try:
self.__logger.debug("update domain name %s"
% self.domain_name)
response = api_instance.update_domain(self.domain_name,
self.obj,
_request_timeout=self.__engine.get_timeout()
)
self.__logger.debug("delete domain name response %s"
% str(response))
print_message("Domain %s updated" % self.domain_name)
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
|
the-stack_0_27385
|
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fs module."""
import pathlib
from typing import List
import pytest
from tests import test_utils
import trestle.core.const as const
import trestle.oscal.common as common
from trestle.core.err import TrestleError
from trestle.core.models.file_content_type import FileContentType
from trestle.oscal import catalog
from trestle.utils import fs
if fs.is_windows(): # pragma: no cover
import win32api
import win32con
def test_should_ignore() -> None:
"""Test should_ignore method."""
assert fs.should_ignore('.test') is True
assert fs.should_ignore('_test') is True
assert fs.should_ignore('__test') is True
assert fs.should_ignore('test') is False
def test_oscal_dir_valid(tmp_path: pathlib.Path) -> None:
"""Test if oscal dir is valid or not."""
assert fs.check_oscal_directories(tmp_path)
create_sample_catalog_project(tmp_path)
assert fs.check_oscal_directories(tmp_path)
# add some hidden files
hidden_file = tmp_path / 'catalogs' / '.hidden.txt'
test_utils.make_hidden_file(hidden_file)
keep_file = tmp_path / 'catalogs' / '.keep'
test_utils.make_hidden_file(keep_file)
assert fs.check_oscal_directories(tmp_path)
assert not hidden_file.exists()
assert keep_file.exists()
# add some markdown readme
readme_file = tmp_path / 'catalogs' / 'README.md'
readme_file.touch()
assert fs.check_oscal_directories(tmp_path)
def test_oscal_dir_notvalid(tmp_path: pathlib.Path) -> None:
"""Test OSCAL directory not valid."""
assert fs.check_oscal_directories(tmp_path)
create_sample_catalog_project(tmp_path)
assert fs.check_oscal_directories(tmp_path)
profiles_dir = tmp_path / 'profiles'
profiles_dir.mkdir(parents=True, exist_ok=True)
invalid_file = profiles_dir / 'shouldnt_be_here.txt'
invalid_file.touch()
assert not fs.check_oscal_directories(tmp_path)
invalid_file.unlink()
assert fs.check_oscal_directories(tmp_path)
metadata_dir = tmp_path / 'catalogs' / 'mycatalog' / 'catalog' / 'metadata'
deep_invalid_file = metadata_dir / 'responsible-parties' / 'should_be_here.docx'
readme_file = tmp_path / 'catalogs' / 'readme.md'
deep_invalid_file.touch()
readme_file.touch()
assert not fs.check_oscal_directories(tmp_path)
def test_is_valid_project_root(tmp_path: pathlib.Path) -> None:
"""Test is_valid_project_root method."""
assert fs.is_valid_project_root(tmp_path) is False
test_utils.ensure_trestle_config_dir(tmp_path)
assert fs.is_valid_project_root(tmp_path) is True
def test_has_parent_path(tmp_path: pathlib.Path) -> None:
"""Test has_parent_path method."""
assert fs.has_parent_path(pathlib.Path('tests'), test_utils.BASE_TMP_DIR) is False
assert fs.has_parent_path(pathlib.Path('/invalid/path'), test_utils.BASE_TMP_DIR) is False
def test_get_trestle_project_root(tmp_path: pathlib.Path, rand_str: str) -> None:
"""Test get_trestle_project_root method."""
project_path: pathlib.Path = pathlib.Path.joinpath(tmp_path, rand_str)
sub_path: pathlib.Path = project_path.joinpath('samples2')
sub_path.mkdir(exist_ok=True, parents=True)
assert sub_path.exists() and sub_path.is_dir()
# create a file
sub_path.joinpath('readme.md').touch()
# create a data-dir and a file
sub_data_dir = pathlib.Path.joinpath(sub_path, 'data')
sub_data_dir.mkdir(exist_ok=True, parents=True)
sub_data_dir.joinpath('readme.md').touch()
assert fs.get_trestle_project_root(sub_data_dir) is None
test_utils.ensure_trestle_config_dir(project_path)
assert fs.get_trestle_project_root(sub_data_dir) == project_path
assert fs.get_trestle_project_root(sub_data_dir.joinpath('readme.md')) == project_path
assert fs.get_trestle_project_root(sub_path.joinpath('readme.md')) == project_path
assert fs.get_trestle_project_root(sub_path) == project_path
assert fs.get_trestle_project_root(project_path.parent) is None
def test_is_valid_project_model_path(tmp_path: pathlib.Path) -> None:
"""Test is_valid_project_model method."""
assert fs.is_valid_project_model_path(tmp_path) is False
test_utils.ensure_trestle_config_dir(tmp_path)
assert fs.is_valid_project_model_path(tmp_path) is False
create_sample_catalog_project(tmp_path)
catalog_dir = tmp_path / 'catalogs'
assert fs.is_valid_project_model_path(catalog_dir) is False
mycatalog_dir = catalog_dir / 'mycatalog'
assert fs.is_valid_project_model_path(mycatalog_dir) is True
metadata_dir = mycatalog_dir / 'metadata'
assert fs.is_valid_project_model_path(metadata_dir) is True
foo_dir = tmp_path / 'foo/bar'
foo_dir.mkdir(parents=True)
assert fs.is_valid_project_model_path(foo_dir) is False
def test_get_project_model_path(tmp_path: pathlib.Path) -> None:
"""Test get_project_model_path method."""
assert fs.get_project_model_path(tmp_path) is None
test_utils.ensure_trestle_config_dir(tmp_path)
assert fs.get_project_model_path(tmp_path) is None
create_sample_catalog_project(tmp_path)
catalog_dir = tmp_path / 'catalogs'
assert fs.get_project_model_path(catalog_dir) is None
mycatalog_dir = catalog_dir / 'mycatalog'
assert fs.get_project_model_path(mycatalog_dir) == mycatalog_dir
metadata_dir = mycatalog_dir / 'metadata'
assert fs.get_project_model_path(metadata_dir) == mycatalog_dir
def test_clean_project_sub_path(tmp_path: pathlib.Path, rand_str: str) -> None:
"""Test clean_project_sub_path method."""
project_path: pathlib.Path = pathlib.Path.joinpath(tmp_path, rand_str)
sub_path: pathlib.Path = project_path.joinpath('samples')
sub_path.mkdir(exist_ok=True, parents=True)
assert sub_path.exists() and sub_path.is_dir()
# create a file
sub_path.joinpath('readme.md').touch()
# create a data-dir and a file
sub_data_dir = pathlib.Path.joinpath(sub_path, 'data')
sub_data_dir_file = sub_data_dir.joinpath('readme.md')
sub_data_dir.mkdir(exist_ok=True, parents=True)
# create a file
sub_data_dir_file.touch()
try:
# not having .trestle directory at the project root or tmp_path should fail
fs.clean_project_sub_path(sub_path)
except TrestleError:
pass
test_utils.ensure_trestle_config_dir(project_path)
fs.clean_project_sub_path(sub_data_dir_file)
assert not sub_data_dir_file.exists()
# create the file again
with open(sub_data_dir_file, 'w+', encoding=const.FILE_ENCODING):
pass
# clean the sub_path in the trestle project
fs.clean_project_sub_path(sub_path)
assert not sub_path.exists()
def test_load_file(tmp_path: pathlib.Path) -> None:
"""Test load file."""
json_file_path = test_utils.NIST_SAMPLE_CD_JSON
yaml_file_path = pathlib.Path.joinpath(test_utils.YAML_TEST_DATA_PATH, 'good_component.yaml')
assert fs.load_file(json_file_path) is not None
assert fs.load_file(yaml_file_path) is not None
try:
sample_file_path = tmp_path.joinpath('sample.txt')
with open(sample_file_path, 'w', encoding=const.FILE_ENCODING):
fs.load_file(sample_file_path)
except TrestleError:
pass
def test_get_relative_model_type(tmp_path: pathlib.Path) -> None:
"""Test get model type and alias based on filesystem context."""
import trestle.core.utils as cutils
with pytest.raises(TrestleError):
fs.get_relative_model_type(pathlib.Path('invalidpath'))
with pytest.raises(TrestleError):
fs.get_relative_model_type(pathlib.Path('./'))
catalogs_dir = pathlib.Path('catalogs')
mycatalog_dir = catalogs_dir / 'mycatalog'
catalog_dir = mycatalog_dir / 'catalog'
metadata_dir = catalog_dir / 'metadata'
roles_dir = metadata_dir / 'roles'
rps_dir = metadata_dir / 'responsible-parties'
props_dir = metadata_dir / 'props'
groups_dir = catalog_dir / 'groups'
group_dir = groups_dir / f'00000{const.IDX_SEP}group'
controls_dir = group_dir / 'controls'
with pytest.raises(TrestleError):
fs.get_relative_model_type(catalogs_dir)
assert fs.get_relative_model_type(mycatalog_dir) == (catalog.Catalog, 'catalog')
assert fs.get_relative_model_type(mycatalog_dir / 'catalog.json') == (catalog.Catalog, 'catalog')
assert fs.get_relative_model_type(catalog_dir / 'back-matter.json') == (common.BackMatter, 'catalog.back-matter')
assert fs.get_relative_model_type(catalog_dir / 'metadata.yaml') == (common.Metadata, 'catalog.metadata')
assert fs.get_relative_model_type(metadata_dir) == (common.Metadata, 'catalog.metadata')
assert fs.get_relative_model_type(roles_dir) == (List[common.Role], 'catalog.metadata.roles')
(type_, element) = fs.get_relative_model_type(roles_dir)
assert cutils.get_origin(type_) == list
assert element == 'catalog.metadata.roles'
assert fs.get_relative_model_type(roles_dir / '00000__role.json') == (common.Role, 'catalog.metadata.roles.role')
model_type, full_alias = fs.get_relative_model_type(rps_dir)
assert model_type == List[common.ResponsibleParty]
assert full_alias == 'catalog.metadata.responsible-parties'
assert fs.get_relative_model_type(
rps_dir / 'creator__responsible-party.json'
) == (common.ResponsibleParty, 'catalog.metadata.responsible-parties.responsible-party')
(type_, element) = fs.get_relative_model_type(props_dir)
assert cutils.get_origin(type_) == list
assert cutils.get_inner_type(type_) == common.Property
assert element == 'catalog.metadata.props'
(expected_type, expected_json_path) = fs.get_relative_model_type(props_dir / f'00000{const.IDX_SEP}property.json')
assert expected_type == common.Property
assert expected_json_path == 'catalog.metadata.props.property'
assert cutils.get_origin(type_) == list
assert fs.get_relative_model_type(groups_dir / f'00000{const.IDX_SEP}group.json'
) == (catalog.Group, 'catalog.groups.group')
assert fs.get_relative_model_type(group_dir) == (catalog.Group, 'catalog.groups.group')
assert fs.get_relative_model_type(controls_dir / f'00000{const.IDX_SEP}control.json'
) == (catalog.Control, 'catalog.groups.group.controls.control')
def create_sample_catalog_project(trestle_base_dir: pathlib.Path) -> None:
"""Create directory structure for a sample catalog named mycatalog."""
test_utils.ensure_trestle_config_dir(trestle_base_dir)
mycatalog_dir = trestle_base_dir / 'catalogs' / 'mycatalog'
directories = [
mycatalog_dir / 'catalog' / 'metadata' / 'roles',
mycatalog_dir / 'catalog' / 'metadata' / 'responsible-parties',
mycatalog_dir / 'catalog' / 'metadata' / 'props',
mycatalog_dir / 'catalog' / 'groups' / f'00000{const.IDX_SEP}group' / 'controls'
]
for directory in directories:
directory.mkdir(parents=True, exist_ok=True)
files = [
mycatalog_dir / 'catalog.json',
mycatalog_dir / 'catalog' / 'back-matter.json',
mycatalog_dir / 'catalog' / 'metadata.json',
mycatalog_dir / 'catalog' / 'metadata' / 'roles' / f'00000{const.IDX_SEP}role.json',
mycatalog_dir / 'catalog' / 'metadata' / 'responsible-parties'
/ f'creator{const.IDX_SEP}responsible-party.json',
mycatalog_dir / 'catalog' / 'metadata' / 'props' / f'00000{const.IDX_SEP}property.json',
mycatalog_dir / 'catalog' / 'groups' / f'00000{const.IDX_SEP}group.json',
mycatalog_dir / 'catalog' / 'groups' / f'00000{const.IDX_SEP}group' / 'controls'
/ f'00000{const.IDX_SEP}control.json',
]
for file in files:
file.touch()
def test_extract_alias() -> None:
"""Test extraction of alias from filename or directory names."""
assert fs.extract_alias(pathlib.Path('catalog').name) == 'catalog'
assert fs.extract_alias(pathlib.Path('/tmp/catalog').name) == 'catalog'
assert fs.extract_alias(pathlib.Path('/catalogs/mycatalog/catalog.json').name) == 'catalog'
assert fs.extract_alias(pathlib.Path('/catalogs/mycatalog/catalog.yaml').name) == 'catalog'
assert fs.extract_alias(pathlib.Path('responsible-parties').name) == 'responsible-parties'
assert fs.extract_alias(pathlib.Path('responsible-parties.json').name) == 'responsible-parties'
assert fs.extract_alias(pathlib.Path('/roles').name) == 'roles'
assert fs.extract_alias(pathlib.Path('/roles/roles.json').name) == 'roles'
assert fs.extract_alias(pathlib.Path(f'/roles/00000{const.IDX_SEP}role.json').name) == 'role'
assert fs.extract_alias(
pathlib.Path(f'/metadata/responsible-parties/creator{const.IDX_SEP}responsible-party.json').name
) == 'responsible-party'
def test_get_stripped_model_type(tmp_path: pathlib.Path) -> None:
"""Test get stripped model type and alias based on filesystem context."""
with pytest.raises(TrestleError):
fs.get_stripped_model_type(tmp_path / 'invalidpath', tmp_path)
with pytest.raises(TrestleError):
fs.get_stripped_model_type(tmp_path, tmp_path)
create_sample_catalog_project(tmp_path)
catalogs_dir = tmp_path / 'catalogs'
with pytest.raises(TrestleError):
fs.get_stripped_model_type(catalogs_dir, tmp_path)
def check_stripped_catalog() -> None:
assert 'uuid' in alias_to_field_map
assert 'metadata' not in alias_to_field_map
assert 'back-matter' not in alias_to_field_map
assert 'groups' not in alias_to_field_map
mycatalog_dir = catalogs_dir / 'mycatalog'
stripped_catalog = fs.get_stripped_model_type(mycatalog_dir, tmp_path)
alias_to_field_map = stripped_catalog[0].alias_to_field_map()
check_stripped_catalog()
stripped_catalog = fs.get_stripped_model_type(mycatalog_dir / 'catalog.json', tmp_path)
alias_to_field_map = stripped_catalog[0].alias_to_field_map()
check_stripped_catalog()
def check_stripped_metadata(a2f_map) -> None:
assert 'title' in a2f_map
assert 'published' in a2f_map
assert 'last-modified' in a2f_map
assert 'version' in a2f_map
assert 'oscal-version' in a2f_map
assert 'revisions' in a2f_map
assert 'document-ids' in a2f_map
assert 'links' in a2f_map
assert 'locations' in a2f_map
assert 'parties' in a2f_map
assert 'remarks' in a2f_map
assert 'roles' not in alias_to_field_map
assert 'responsible-properties' not in a2f_map
assert 'props' not in a2f_map
catalog_dir = mycatalog_dir / 'catalog'
metadata_dir = catalog_dir / 'metadata'
stripped_catalog = fs.get_stripped_model_type(metadata_dir, tmp_path)
alias_to_field_map = stripped_catalog[0].alias_to_field_map()
check_stripped_metadata(alias_to_field_map)
stripped_catalog = fs.get_stripped_model_type(catalog_dir / 'metadata.json', tmp_path)
alias_to_field_map = stripped_catalog[0].alias_to_field_map()
check_stripped_metadata(alias_to_field_map)
groups_dir = catalog_dir / 'groups'
stripped_catalog = fs.get_stripped_model_type(groups_dir, tmp_path)
assert stripped_catalog[0].__name__ == 'Groups'
assert stripped_catalog[1] == 'catalog.groups'
def check_stripped_group() -> None:
assert 'id' in alias_to_field_map
assert 'class' in alias_to_field_map
assert 'title' in alias_to_field_map
assert 'params' in alias_to_field_map
assert 'props' in alias_to_field_map
assert 'links' in alias_to_field_map
assert 'parts' in alias_to_field_map
assert 'groups' in alias_to_field_map
assert 'controls' not in alias_to_field_map
stripped_catalog = fs.get_stripped_model_type(groups_dir / f'00000{const.IDX_SEP}group', tmp_path)
alias_to_field_map = stripped_catalog[0].alias_to_field_map()
check_stripped_group()
stripped_catalog = fs.get_stripped_model_type(groups_dir / f'00000{const.IDX_SEP}group.json', tmp_path)
alias_to_field_map = stripped_catalog[0].alias_to_field_map()
check_stripped_group()
def test_get_singular_alias() -> None:
"""Test get_singular_alias function."""
assert fs.get_singular_alias(alias_path='catalog') == 'catalog'
# Not fullpath. It should be 'catalog.metadata' instead
with pytest.raises(TrestleError):
fs.get_singular_alias(alias_path='metadata.something')
# Invalid alias_path
with pytest.raises(TrestleError):
fs.get_singular_alias(alias_path='invalid')
# Invalid alias_path
with pytest.raises(TrestleError):
fs.get_singular_alias(alias_path='')
assert fs.get_singular_alias(alias_path='catalog.metadata.responsible-parties') == 'responsible-party'
assert fs.get_singular_alias(alias_path='catalog.metadata.responsible-parties.*.props') == 'property'
assert 'responsible-party' == fs.get_singular_alias(alias_path='catalog.metadata.responsible-parties.*')
assert 'role' == fs.get_singular_alias(alias_path='catalog.metadata.roles')
assert 'property' == fs.get_singular_alias(alias_path='catalog.metadata.props')
assert 'control-implementations' == fs.get_singular_alias(
alias_path='component-definition.components.control-implementations'
)
assert 'control-implementation' == fs.get_singular_alias(
alias_path='component-definition.components.*.control-implementations'
)
assert 'control-implementation' == fs.get_singular_alias(
alias_path='component-definition.components.0.control-implementations'
)
# FIXME ideally this should report error
assert '0' == fs.get_singular_alias(alias_path='component-definition.components.0')
assert 'control' == fs.get_singular_alias(alias_path='catalog.groups.*.controls.*.controls')
def test_contextual_get_singular_alias(tmp_path: pathlib.Path, keep_cwd: pathlib.Path) -> None:
"""Test get_singular_alias in contextual mode."""
# Contextual model tests
create_sample_catalog_project(tmp_path)
catalogs_dir = tmp_path.resolve() / 'catalogs'
mycatalog_dir = catalogs_dir / 'mycatalog'
catalog_dir = mycatalog_dir / 'catalog'
metadata_dir = catalog_dir / 'metadata'
groups_dir = catalog_dir / 'groups'
group_dir = groups_dir / f'00000{const.IDX_SEP}group'
rel_dir = mycatalog_dir.relative_to(tmp_path)
assert 'responsible-party' == fs.get_singular_alias('catalog.metadata.responsible-parties', rel_dir)
# Both should work to deal with the case back-matter is already split from the catalog in a separate file
assert 'resource' == fs.get_singular_alias('catalog.back-matter.resources', rel_dir)
assert 'resource' == fs.get_singular_alias('back-matter.resources', rel_dir)
rel_dir = metadata_dir.relative_to(tmp_path)
with pytest.raises(TrestleError):
fs.get_singular_alias('metadata.roles')
alias = fs.get_singular_alias('metadata.roles', rel_dir)
assert alias == 'role'
assert 'responsible-party' == fs.get_singular_alias('metadata.responsible-parties.*', rel_dir)
assert 'property' == fs.get_singular_alias('metadata.responsible-parties.*.props', rel_dir)
rel_dir = groups_dir.relative_to(tmp_path)
assert 'control' == fs.get_singular_alias('groups.*.controls.*.controls', rel_dir)
rel_dir = group_dir.relative_to(tmp_path)
assert 'control' == fs.get_singular_alias('group.controls.*.controls', rel_dir)
def test_get_contextual_file_type(tmp_path: pathlib.Path) -> None:
"""Test fs.get_contextual_file_type()."""
(tmp_path / 'file.json').touch()
with pytest.raises(TrestleError):
fs.get_contextual_file_type(pathlib.Path(tmp_path / 'gu.json'))
(tmp_path / 'file.json').unlink()
(tmp_path / '.trestle').mkdir()
(tmp_path / 'catalogs').mkdir()
catalogs_dir = tmp_path / 'catalogs'
(catalogs_dir / 'mycatalog').mkdir()
mycatalog_dir = catalogs_dir / 'mycatalog'
pathlib.Path(mycatalog_dir / 'file2.json').touch()
assert fs.get_contextual_file_type(mycatalog_dir) == FileContentType.JSON
(mycatalog_dir / 'file2.json').unlink()
if fs.is_windows():
hidden_file = mycatalog_dir / 'hidden.txt'
hidden_file.touch()
atts = win32api.GetFileAttributes(str(hidden_file))
win32api.SetFileAttributes(str(hidden_file), win32con.FILE_ATTRIBUTE_HIDDEN | atts)
else:
pathlib.Path(mycatalog_dir / '.DS_Store').touch()
pathlib.Path(mycatalog_dir / 'file2.json').touch()
assert fs.get_contextual_file_type(mycatalog_dir) == FileContentType.JSON
if fs.is_windows():
hidden_file.unlink()
else:
(mycatalog_dir / '.DS_Store').unlink()
(mycatalog_dir / 'file2.json').unlink()
pathlib.Path(mycatalog_dir / 'file3.yml').touch()
assert fs.get_contextual_file_type(mycatalog_dir) == FileContentType.YAML
(mycatalog_dir / 'file3.yml').unlink()
(mycatalog_dir / 'catalog').mkdir()
(mycatalog_dir / 'catalog/groups').mkdir()
(mycatalog_dir / 'catalog/groups/file4.yaml').touch()
assert fs.get_contextual_file_type(mycatalog_dir) == FileContentType.YAML
def test_get_models_of_type(tmp_trestle_dir) -> None:
"""Test fs.get_models_of_type()."""
create_sample_catalog_project(tmp_trestle_dir)
catalogs_dir = tmp_trestle_dir.resolve() / 'catalogs'
components_dir = tmp_trestle_dir.resolve() / 'component-definitions'
# mycatalog is already there
(catalogs_dir / 'mycatalog2').mkdir()
(catalogs_dir / '.myfile').touch()
(components_dir / 'my_component').mkdir()
models = fs.get_models_of_type('catalog', tmp_trestle_dir)
assert len(models) == 2
assert 'mycatalog' in models
assert 'mycatalog2' in models
all_models = fs.get_all_models(tmp_trestle_dir)
assert len(all_models) == 3
assert ('catalog', 'mycatalog') in all_models
assert ('catalog', 'mycatalog2') in all_models
assert ('component-definition', 'my_component') in all_models
with pytest.raises(TrestleError):
fs.get_models_of_type('foo', tmp_trestle_dir)
def test_get_models_of_type_bad_cwd(tmp_path) -> None:
"""Test fs.get_models_of_type() from outside trestle dir."""
with pytest.raises(TrestleError):
fs.get_models_of_type('catalog', tmp_path)
def test_is_hidden_posix(tmp_path) -> None:
"""Test is_hidden on posix systems."""
if not fs.is_windows():
hidden_file = tmp_path / '.hidden.md'
hidden_dir = tmp_path / '.hidden/'
visible_file = tmp_path / 'visible.md'
visible_dir = tmp_path / 'visible/'
assert fs.is_hidden(hidden_file)
assert fs.is_hidden(hidden_dir)
assert not fs.is_hidden(visible_file)
assert not fs.is_hidden(visible_dir)
else:
pass
def test_is_hidden_windows(tmp_path) -> None:
"""Test is_hidden on windows systems."""
if fs.is_windows():
visible_file = tmp_path / 'visible.md'
visible_dir = tmp_path / 'visible/'
visible_file.touch()
visible_dir.touch()
assert not fs.is_hidden(visible_file)
assert not fs.is_hidden(visible_dir)
atts = win32api.GetFileAttributes(str(visible_file))
win32api.SetFileAttributes(str(visible_file), win32con.FILE_ATTRIBUTE_HIDDEN | atts)
atts = win32api.GetFileAttributes(str(visible_dir))
win32api.SetFileAttributes(str(visible_dir), win32con.FILE_ATTRIBUTE_HIDDEN | atts)
assert fs.is_hidden(visible_file)
assert fs.is_hidden(visible_dir)
else:
pass
@pytest.mark.parametrize(
'task_name, outcome',
[
('hello', True), ('.trestle', False), ('task/name', True), ('.bad,', False), ('catalogs', False),
('catalog', True), ('component-definitions', False), ('hello.world', False),
('component-definitions/hello', False)
]
)
def test_allowed_task_name(task_name: str, outcome: bool) -> None:
"""Test whether task names are allowed."""
assert fs.allowed_task_name(task_name) == outcome
def test_model_type_to_model_dir() -> None:
"""Test model type to model dir."""
assert fs.model_type_to_model_dir('catalog') == 'catalogs'
try:
fs.model_type_to_model_dir('foo')
except Exception:
pass
else:
assert 'test failed'
def test_local_and_visible(tmp_path) -> None:
"""Test if file is local (not symlink) and visible (not hidden)."""
local_file = tmp_path / 'local.md'
local_file.touch()
if fs.is_windows():
link_file = tmp_path / 'not_local.lnk'
link_file.touch()
else:
link_file = tmp_path / 'linked.md'
link_file.symlink_to(local_file)
assert fs.local_and_visible(local_file)
assert not fs.local_and_visible(link_file)
@pytest.mark.parametrize(
'candidate, build, expect_failure',
[
(pathlib.Path('relative_file.json'), False, False),
(pathlib.Path('relative_file.json'), True, False),
(pathlib.Path('/random/absolute/path'), False, True),
(pathlib.Path('/random/absolute/path'), False, True),
(pathlib.Path('~/random/home_directory/path'), False, True),
(pathlib.Path('~/random/home_directory/path'), True, False),
(pathlib.Path('../relative_file.json'), False, True),
(pathlib.Path('../relative_file.json'), True, True),
(
pathlib.Path('./hello/../relative_file.json'),
False,
False,
),
(
pathlib.Path('./hello/../relative_file.json'),
True,
False,
),
]
)
def test_relative_resolve(tmp_path, candidate: pathlib.Path, build: bool, expect_failure: bool):
"""Test relative resolve capability."""
if build:
input_path = tmp_path / candidate
else:
input_path = candidate
if expect_failure:
with pytest.raises(TrestleError):
_ = fs.relative_resolve(input_path, tmp_path)
else:
_ = fs.relative_resolve(input_path, tmp_path)
def test_iterdir_without_hidden_files(tmp_path: pathlib.Path) -> None:
"""Test that hidden files are filtered from the path."""
pathlib.Path(tmp_path / 'visible.txt').touch()
pathlib.Path(tmp_path / 'visibleDir/').mkdir()
if fs.is_windows():
"""Windows"""
hidden_file = tmp_path / 'hidden.txt'
hidden_dir = tmp_path / 'hiddenDir/'
hidden_file.touch()
hidden_dir.mkdir()
atts = win32api.GetFileAttributes(str(hidden_file))
win32api.SetFileAttributes(str(hidden_file), win32con.FILE_ATTRIBUTE_HIDDEN | atts)
atts = win32api.GetFileAttributes(str(hidden_dir))
win32api.SetFileAttributes(str(hidden_dir), win32con.FILE_ATTRIBUTE_HIDDEN | atts)
assert len(list(fs.iterdir_without_hidden_files(tmp_path))) == 3
else:
pathlib.Path(tmp_path / '.DS_Store').touch()
pathlib.Path(tmp_path / '.hidden.txt').touch()
pathlib.Path(tmp_path / '.hiddenDir/').mkdir()
assert len(list(fs.iterdir_without_hidden_files(tmp_path))) == 3
def test_make_hidden_file(tmp_path: pathlib.Path) -> None:
"""Test make hidden files."""
file_path = tmp_path / '.keep'
fs.make_hidden_file(file_path)
file_path2 = tmp_path / 'hidden.txt'
fs.make_hidden_file(file_path2)
assert file_path.exists() and not fs.local_and_visible(file_path)
if fs.is_windows():
assert file_path2.exists() and not fs.local_and_visible(file_path2)
else:
assert (tmp_path / '.hidden.txt').exists() and not fs.local_and_visible(tmp_path / '.hidden.txt')
def test_full_path_for_top_level_model(tmp_trestle_dir: pathlib.Path, sample_catalog_minimal: catalog.Catalog) -> None:
"""Test full path for top level model."""
fs.save_top_level_model(sample_catalog_minimal, tmp_trestle_dir, 'mycat', fs.FileContentType.JSON)
cat_path = fs.full_path_for_top_level_model(tmp_trestle_dir, 'mycat', catalog.Catalog)
assert cat_path == tmp_trestle_dir / 'catalogs/mycat/catalog.json'
|
the-stack_0_27386
|
import sys, logging, os, random, math, open_color, arcade
#check to make sure we are running the right version of Python
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
#turn on logging, in case we have to leave ourselves debugging messages
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
MARGIN = 30
SCREEN_TITLE = "Space Pirates"
NUM_ENEMIES = 5
STARTING_LOCATION = (400,100)
BULLET_DAMAGE = 10
ENEMY_HP = 100
PLAYER_HP = 100
HIT_SCORE = 10
KILL_SCORE = 100
class Bullet(arcade.Sprite):
def __init__(self, position, velocity, damage):
'''
initializes the ammunition
Parameters: position: (x,y) tuple
velocity: (dx, dy) tuple
damage: int (or float)
'''
super().__init__("Assets/ammunition.png", 0.5)
(self.center_x, self.center_y) = position
(self.dx, self.dy) = velocity
self.damage = damage
def update(self):
'''
Moves the ammunition
'''
self.center_x += self.dx
self.center_y += self.dy
class Enemy_Bullet(arcade.Sprite):
def __init__(self, position, velocity, damage):
'''
initializes the bullet
Parameters: position: (x,y) tuple
velocity: (dx, dy) tuple
damage: int (or float)
'''
super().__init__("Assets/enemyAmmo.png", 0.5)
(self.center_x, self.center_y) = position
(self.dx, self.dy) = velocity
self.damage = damage
def update(self):
'''
Moves the ammunition
'''
self.center_x += self.dx
self.center_y += self.dy
class Player(arcade.Sprite):
def __init__(self):
super().__init__("Assets/Space Pirates PC.png", 0.5)
(self.center_x, self.center_y) = STARTING_LOCATION
self.hp = PLAYER_HP
class Enemy(arcade.Sprite):
def __init__(self, position):
'''
initializes a pirate enemy
Parameter: position: (x,y) tuple
'''
super().__init__("Assets/spacePirateEnemy.png", 0.5)
self.hp = ENEMY_HP
(self.center_x, self.center_y) = position
class Window(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
self.set_mouse_visible(True)
arcade.set_background_color(open_color.black)
self.bullet_list = arcade.SpriteList()
self.enemy_list = arcade.SpriteList()
self.enemy_bullet_list = arcade.SpriteList()
self.player = Player()
self.score = 0
self.win = False
self.lose = False
def setup(self):
'''
Set up enemies
'''
for i in range(NUM_ENEMIES):
x = 120 * (i+1) + 40
y = 500
enemy = Enemy((x,y))
self.enemy_list.append(enemy)
def update(self, delta_time):
self.bullet_list.update()
self.enemy_bullet_list.update()
if (not (self.win or self.lose)):
for e in self.enemy_list:
# check for collision
# for every shot that hits, decrease the hp and then see if it dies
# increase the score
# e.kill() will remove the enemy sprite from the game
for b in self.bullet_list:
if (abs(b.center_x - e.center_x) <= e.width / 2 and abs(b.center_y - e.center_y) <= e.height / 2):
self.score += HIT_SCORE
e.hp -= b.damage
b.kill()
if (e.hp <= 0):
e.kill()
self.score += KILL_SCORE
if (len(self.enemy_list) == 0):
self.win = True
if (random.randint(1, 75) == 1):
self.enemy_bullet_list.append(Enemy_Bullet((e.center_x, e.center_y - 15), (0, -10), BULLET_DAMAGE))
for b in self.enemy_bullet_list:
if (abs(b.center_x - self.player.center_x) <= self.player.width / 2 and abs(b.center_y - self.player.center_y) <= self.player.height / 2):
self.player.hp -= b.damage
b.kill()
if (self.player.hp <= 0):
self.lose = True
def on_draw(self):
arcade.start_render()
arcade.draw_text(str(self.score), 20, SCREEN_HEIGHT - 40, open_color.white, 16)
arcade.draw_text("HP: {}".format(self.player.hp), 20, 40, open_color.white, 16)
if (self.player.hp > 0):
self.player.draw()
self.bullet_list.draw()
self.enemy_bullet_list.draw()
self.enemy_list.draw()
if (self.lose):
self.draw_game_loss()
elif (self.win):
self.draw_game_won()
def draw_game_loss(self):
arcade.draw_text(str("YOU LOSE, MATEY..."), SCREEN_WIDTH / 2 - 90, SCREEN_HEIGHT / 2 - 10, open_color.white, 30)
def draw_game_won(self):
arcade.draw_text(str("YAARRRR, YOU WIN!"), SCREEN_WIDTH / 2 - 90, SCREEN_HEIGHT / 2 - 10, open_color.white, 30)
def on_mouse_motion(self, x, y, dx, dy):
'''
The player moves left and right with the mouse
'''
self.player.center_x = x
def on_mouse_press(self, x, y, button, modifiers):
if self.player.hp > 0 and button == arcade.MOUSE_BUTTON_LEFT:
#fire a shot
self.bullet_list.append(Bullet((self.player.center_x, self.player.center_y + 15), (0, 10), BULLET_DAMAGE))
pass
def main():
window = Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
the-stack_0_27387
|
import os
from django.conf import settings
def configure(nose_args=None):
# Helper function to extract absolute path
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
},
TEMPLATE_DIRS=(
location('templates'),
),
NOSE_ARGS=nose_args,
)
|
the-stack_0_27388
|
"""
Client for uploading packaged artifacts to s3
"""
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import hashlib
import logging
import threading
import os
import sys
from collections import abc
import botocore
import botocore.exceptions
from boto3.s3 import transfer
from samcli.commands.package.exceptions import NoSuchBucketError, BucketNotSpecifiedError
from samcli.lib.package.artifact_exporter import parse_s3_url
from samcli.lib.utils.hash import file_checksum
LOG = logging.getLogger(__name__)
class S3Uploader:
"""
Class to upload objects to S3 bucket that use versioning. If bucket
does not already use versioning, this class will turn on versioning.
"""
@property
def artifact_metadata(self):
"""
Metadata to attach to the object(s) uploaded by the uploader.
"""
return self._artifact_metadata
@artifact_metadata.setter
def artifact_metadata(self, val):
if val is not None and not isinstance(val, abc.Mapping):
raise TypeError("Artifact metadata should be in dict type")
self._artifact_metadata = val
def __init__(self, s3_client, bucket_name, prefix=None, kms_key_id=None, force_upload=False, no_progressbar=False):
self.s3 = s3_client
self.bucket_name = bucket_name
self.prefix = prefix
self.kms_key_id = kms_key_id or None
self.force_upload = force_upload
self.no_progressbar = no_progressbar
self.transfer_manager = transfer.create_transfer_manager(self.s3, transfer.TransferConfig())
self._artifact_metadata = None
def upload(self, file_name, remote_path):
"""
Uploads given file to S3
:param file_name: Path to the file that will be uploaded
:param remote_path: be uploaded
:return: VersionId of the latest upload
"""
if self.prefix:
remote_path = "{0}/{1}".format(self.prefix, remote_path)
# Check if a file with same data exists
if not self.force_upload and self.file_exists(remote_path):
LOG.debug("File with same data is already exists at %s. " "Skipping upload", remote_path)
return self.make_url(remote_path)
try:
# Default to regular server-side encryption unless customer has
# specified their own KMS keys
additional_args = {"ServerSideEncryption": "AES256"}
if self.kms_key_id:
additional_args["ServerSideEncryption"] = "aws:kms"
additional_args["SSEKMSKeyId"] = self.kms_key_id
if self.artifact_metadata:
additional_args["Metadata"] = self.artifact_metadata
if not self.bucket_name:
raise BucketNotSpecifiedError()
if not self.no_progressbar:
print_progress_callback = ProgressPercentage(file_name, remote_path)
future = self.transfer_manager.upload(
file_name, self.bucket_name, remote_path, additional_args, [print_progress_callback]
)
else:
future = self.transfer_manager.upload(file_name, self.bucket_name, remote_path, additional_args)
future.result()
return self.make_url(remote_path)
except botocore.exceptions.ClientError as ex:
error_code = ex.response["Error"]["Code"]
if error_code == "NoSuchBucket":
raise NoSuchBucketError(bucket_name=self.bucket_name) from ex
raise ex
def upload_with_dedup(self, file_name, extension=None, precomputed_md5=None):
"""
Makes and returns name of the S3 object based on the file's MD5 sum
:param file_name: file to upload
:param extension: String of file extension to append to the object
:param precomputed_md5: Specified md5 hash for the file to be uploaded.
:return: S3 URL of the uploaded object
"""
# This construction of remote_path is critical to preventing duplicate
# uploads of same object. Uploader will check if the file exists in S3
# and re-upload only if necessary. So the template points to same file
# in multiple places, this will upload only once
filemd5 = precomputed_md5 or file_checksum(file_name)
remote_path = filemd5
if extension:
remote_path = remote_path + "." + extension
return self.upload(file_name, remote_path)
def file_exists(self, remote_path):
"""
Check if the file we are trying to upload already exists in S3
:param remote_path:
:return: True, if file exists. False, otherwise
"""
try:
# Find the object that matches this ETag
if not self.bucket_name:
raise BucketNotSpecifiedError()
self.s3.head_object(Bucket=self.bucket_name, Key=remote_path)
return True
except botocore.exceptions.ClientError:
# Either File does not exist or we are unable to get
# this information.
return False
def make_url(self, obj_path):
if not self.bucket_name:
raise BucketNotSpecifiedError()
return "s3://{0}/{1}".format(self.bucket_name, obj_path)
def to_path_style_s3_url(self, key, version=None):
"""
This link describes the format of Path Style URLs
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
"""
base = self.s3.meta.endpoint_url
result = "{0}/{1}/{2}".format(base, self.bucket_name, key)
if version:
result = "{0}?versionId={1}".format(result, version)
return result
def get_version_of_artifact(self, s3_url):
"""
Returns version information of the S3 object that is given as S3 URL
"""
parsed_s3_url = parse_s3_url(s3_url)
s3_bucket = parsed_s3_url["Bucket"]
s3_key = parsed_s3_url["Key"]
s3_object_tagging = self.s3.get_object_tagging(Bucket=s3_bucket, Key=s3_key)
LOG.debug("S3 Object (%s) tagging information %s", s3_url, s3_object_tagging)
s3_object_version_id = s3_object_tagging["VersionId"]
return s3_object_version_id
class ProgressPercentage:
# This class was copied directly from S3Transfer docs
def __init__(self, filename, remote_path):
self._filename = filename
self._remote_path = remote_path
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def on_progress(self, bytes_transferred, **kwargs):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_transferred
percentage = (self._seen_so_far / self._size) * 100
sys.stderr.write(
"\rUploading to %s %s / %s (%.2f%%)" % (self._remote_path, self._seen_so_far, self._size, percentage)
)
sys.stderr.flush()
if int(percentage) == 100:
sys.stderr.write("\n")
|
the-stack_0_27389
|
import torch
import random
from model.utils import load_openai_weights_chinese, set_seed
from model.transformer_model_s2s_unembedding import TransformerUnembeddingModel
from model.text import myVocab
from config import get_model_config_unembedding, get_test_config_unembedding
from collections import Counter
import json
import numpy as np
import warnings
warnings.filterwarnings("ignore")
class Model:
"""
This is an example model. It reads predefined dictionary and predict a fixed distribution.
For a correct evaluation, each team should implement 3 functions:
next_word_probability
gen_response
"""
def __init__(self):
"""
Init whatever you need here
with codecs.open(vocab_file, 'r', 'utf-8') as f:
vocab = [i.strip().split()[0] for i in f.readlines() if len(i.strip()) != 0]
self.vocab = vocab
self.freqs = dict(zip(self.vocab[::-1], range(len(self.vocab))))
"""
# vocab_file = 'vocab.txt'
model_config = get_model_config_unembedding()
test_config = get_test_config_unembedding()
set_seed(test_config.seed)
device = torch.device(test_config.device)
vocab = myVocab(model_config.vocab_path)
self.vocab = vocab
transformer = TransformerUnembeddingModel(n_layers=model_config.n_layers,
n_embeddings=len(vocab),
n_pos_embeddings=model_config.n_pos_embeddings,
embeddings_size=model_config.embeddings_size,
padding_idx=vocab.pad_id,
n_heads=model_config.n_heads,
dropout=model_config.dropout,
embed_dropout=model_config.embed_dropout,
attn_dropout=model_config.attn_dropout,
ff_dropout=model_config.ff_dropout,
bos_id=vocab.bos_id,
eos_id=vocab.eos_id,
max_seq_len=model_config.max_seq_len,
beam_size=model_config.beam_size,
length_penalty=model_config.length_penalty,
n_segments=model_config.n_segments,
annealing_topk=model_config.annealing_topk,
temperature=model_config.temperature,
annealing=model_config.annealing,
diversity_coef=model_config.diversity_coef,
diversity_groups=model_config.diversity_groups,
n_gender=None,
n_loc=None,
n_tag=None)
transformer = transformer.to(device)
state_dict = torch.load(test_config.last_checkpoint_path, map_location=device)
temp = dict(state_dict['model'])
keys = list(temp.keys())
for key in keys:
# new_key = '.'.join([i for i in key.split('.') if i != 'module'])
new_key = key.replace('.module', '')
temp[new_key] = temp.pop(key)
transformer.load_state_dict(temp)
transformer.eval()
self.model_config = model_config
self.test_config = test_config
self.transformer = transformer
self.device = device
print('Weights loaded from {}'.format(test_config.last_checkpoint_path))
def next_word_probability(self, context, partial_out, weight_i=None):
"""
Return probability distribution over next words given a partial true output.
This is used to calculate the per-word perplexity.
:param context: dict, contexts containing the dialogue history and personal
profile of each speaker
this dict contains following keys:
context['dialog']: a list of string, dialogue histories (tokens in each utterances
are separated using spaces).
context['uid']: a list of int, indices to the profile of each speaker
context['profile']: a list of dict, personal profiles for each speaker
context['responder_profile']: dict, the personal profile of the responder
:param partial_out: list, previous "true" words
:return: a list, the first element is a dict, where each key is a word and each value is a probability
score for that word. Unset keys assume a probability of zero.
the second element is the probability for the EOS token
e.g.
context:
{ "dialog": [ ["How are you ?"], ["I am fine , thank you . And you ?"] ],
"uid": [0, 1],
"profile":[ { "loc":"Beijing", "gender":"male", "tag":"" },
{ "loc":"Shanghai", "gender":"female", "tag":"" } ],
"responder_profile":{ "loc":"Beijing", "gender":"male", "tag":"" }
}
partial_out:
['I', 'am']
==> {'fine': 0.9}, 0.1
"""
'''
# freqs = copy.deepcopy(self.freqs)
freqs = self.freqs
for i in partial_out:
if i in freqs:
freqs[i] += 1000
'''
if 'responder_profile' in context:
responder_profile = context['responder_profile']
tag = responder_profile['tag'].replace(' ', '')
# weight_i = torch.Tensor([[1, 0]]).to('cuda')
else:
responder_profile = context['response_profile']
tag = ';'.join(responder_profile['tag']).replace(' ', '')
dialog = context['dialog']
uid = context['uid']
profile_all = context['profile']
# tag = ';'.join(responder_profile['tag']).replace(' ', '')
loc = ';'.join(responder_profile['loc'].split()).replace(' ', '')
gender = '男' if responder_profile['gender'] == 'male' else '女'
persona = '性别:' + gender + ',' + '地点:' + loc + ',' + '标签:' + tag
profile_ids = self.vocab.string2ids(' '.join(persona))
dialog_ids = [self.vocab.string2ids(' '.join(i[0].replace(' ', ''))) for i in dialog]
profile = [self.vocab.eos_id] + profile_ids + [self.vocab.eos_id]
history_cat = [self.vocab.eos_id]
for k in range(len(dialog_ids)):
temp = dialog_ids[k] + [self.vocab.spl_id]
history_cat.extend(temp)
history_cat[-1] = self.vocab.eos_id
profile = profile[:48]
history_cat = history_cat[-128:]
sample = profile, history_cat
persona, dialog = sample
contexts = [torch.tensor([c], dtype=torch.long, device=self.device) for c in [persona, dialog] if
len(c) > 0]
with torch.no_grad():
persona_enc = self.transformer.encode(contexts[0])
dialog_enc = self.transformer.encode(contexts[1], gender=None, loc=None, tag=None)
enc_contexts = [persona_enc, dialog_enc]
if weight_i is None:
weight = self.transformer.compute_weight(enc_contexts[1])
else:
weight = weight_i
partial_out_ids = self.vocab.string2ids(' '.join(''.join(partial_out)))
prediction = self.transformer.predict_next(enc_contexts, prefix=partial_out_ids, weight=weight)
eos_prob = prediction[self.vocab.eos_id]
distribute = {self.vocab.id2token[i]: max(t, 1e-8) for i, t in enumerate(prediction)}
return distribute, eos_prob
def gen_response(self, contexts, weight_i=None):
"""
Return a list of responses to each context.
:param contexts: list, a list of context, each context is a dict that contains the dialogue history and personal
profile of each speaker
this dict contains following keys:
context['dialog']: a list of string, dialogue histories (tokens in each utterances
are separated using spaces).
context['uid']: a list of int, indices to the profile of each speaker
context['profile']: a list of dict, personal profiles for each speaker
context['responder_profile']: dict, the personal profile of the responder
:return: list, responses for each context, each response is a list of tokens.
e.g.
contexts:
[{ "dialog": [ ["How are you ?"], ["I am fine , thank you . And you ?"] ],
"uid": [0, 1],
"profile":[ { "loc":"Beijing", "gender":"male", "tag":"" },
{ "loc":"Shanghai", "gender":"female", "tag":"" } ],
"responder_profile":{ "loc":"Beijing", "gender":"male", "tag":"" }
}]
==> [['I', 'am', 'fine', 'too', '!']]
"""
res = []
for context in contexts:
if 'responder_profile' in context:
responder_profile = context['responder_profile']
tag = responder_profile['tag'].replace(' ', '')
# weight_i = torch.Tensor([[1, 0]]).to('cuda')
else:
responder_profile = context['response_profile']
tag = ';'.join(responder_profile['tag']).replace(' ', '')
dialog = context['dialog']
uid = context['uid']
profile_all = context['profile']
# tag = ';'.join(responder_profile['tag']).replace(' ', '')
loc = ';'.join(responder_profile['loc'].split()).replace(' ', '')
gender = '男' if responder_profile['gender'] == 'male' else '女'
persona = '性别:' + gender + ',' + '地点:' + loc + ',' + '标签:' + tag
profile_ids = self.vocab.string2ids(' '.join(persona))
dialog_ids = [self.vocab.string2ids(' '.join(i[0].replace(' ', ''))) for i in dialog]
profile = [self.vocab.eos_id] + profile_ids + [self.vocab.eos_id]
history_cat = [self.vocab.eos_id]
for k in range(len(dialog_ids)):
temp = dialog_ids[k] + [self.vocab.spl_id]
history_cat.extend(temp)
history_cat[-1] = self.vocab.eos_id
profile = profile[:48]
history_cat = history_cat[-128:]
sample = profile, history_cat
persona, dialog = sample
contexts = [torch.tensor([c], dtype=torch.long, device=self.device) for c in [persona, dialog] if
len(c) > 0]
with torch.no_grad():
persona_enc = self.transformer.encode(contexts[0])
dialog_enc = self.transformer.encode(contexts[1], gender=None, loc=None, tag=None)
enc_contexts = [persona_enc, dialog_enc]
if weight_i is None:
weight = self.transformer.compute_weight(enc_contexts[1])
else:
weight = weight_i
prediction = self.transformer.beam_search(enc_contexts, weight=weight)[0]
prediction_str = self.vocab.ids2string(prediction)
res.append(list(prediction_str))
return res
def test(model, input_file, output_file):
with open(output_file, 'w', encoding='utf8') as fw:
with open(input_file, 'r', encoding='utf8') as fr:
lines = fr.readlines()
for line in lines:
line = line.strip('\n')
data = json.loads(line)
dialog = data['dialog']
uid = data['uid']
profile_all = data['profile']
if 'responder_profile' in data:
responder_profile = data['responder_profile']
else:
responder_profile = data['response_profile']
golden_response = data['golden_response']
golden_response_str = ''.join(golden_response).replace(' ', '')
dialog_str = '\n\t'.join([''.join(i).replace(' ', '') for i in dialog])
profile_all_str = '\n\t'.join([json.dumps(i, ensure_ascii=False) for i in profile_all])
responder_profile_str = json.dumps(responder_profile, ensure_ascii=False)
fw.write('all profiles: \n\t' + profile_all_str + '\n')
fw.write('responder profile: \n\t' + responder_profile_str + '\n')
fw.write('history: \n\t' + dialog_str + '\n')
fw.write('golden response: \n\t' + golden_response_str + '\n')
ans_auto = model.gen_response([data])
ans_auto = ''.join(ans_auto[0])
fw.write('predict with unembedding: ' + ans_auto + '\n')
fw.write('\n')
if __name__ == '__main__':
model = Model()
# test_biased(model)
files = [['data/test_data_biased.json', 'data/test_data_biased_unembedding.txt'],
['data/test_data_random.json', 'data/test_data_random_unembedding.txt']]
test(model, files[0][0], files[0][1])
test(model, files[1][0], files[1][1])
|
the-stack_0_27394
|
"""
COL780
Assignment 3
"""
import cv2
import argparse
import os, shutil
import json
import numpy as np
from tqdm import tqdm
from utils import *
# Using OpenCV Detector
def hog_pretrained(args):
# Initialize the HOG detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
if os.path.exists(str(args.inp_folder)+"/vis_hog_pretrained"):
shutil.rmtree(str(args.inp_folder)+"/vis_hog_pretrained")
os.mkdir(str(args.inp_folder)+"/vis_hog_pretrained")
coco_result = []
category_id = 1
f=open(args.val_json,'r')
data=json.load(f)
images_dict=data['images']
images=[images_dict[i]['file_name'].split('/')[2] for i in range(len(images_dict))]
annotations_dict=data["annotations"]
image_ids=[annotations_dict[i]['image_id'] for i in range(len(annotations_dict))]
image_ids=np.unique(image_ids)
f.close()
for i in tqdm(range(len(images))):
file=images[i]
image_id=image_ids[i]
image = cv2.imread(os.path.join(args.inp_folder,"PNGImages",file))
h, w = image.shape[:2]
original_image = image.copy()
# Running detector
(pred, confidence) = hog.detectMultiScale(image, winStride=(2, 2), padding=(4, 4), scale=1.05)
# The size of the sliding window = (64, 128) defualt & as suggested in original paper
rects = []
for rect in pred:
x,y,w,h = rect
x1 = x
y1 = y
x3 = x + w
y3 = y + h
rects.append([x1,y1,x3,y3])
rects=np.array(rects)
rects,scores = NMS(rects,confidence)
for rect,score in zip(rects,scores):
x1,y1,x3,y3 = rect.tolist()
coco_result.append({"image_id":int(image_id),"category_id":int(category_id),"bbox":[float(x1),float(y1),float(x3-x1),float(y3-y1)],"score":np.round(score.item(),3)})
if args.vis:
cv2.rectangle(original_image, (x1, y1), (x3, y3), (0, 0, 255), 2)
# cv2.putText(original_image_1 , str(round(score.item(),3)), (x1, y1-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
if args.vis:
cv2.imwrite(str(args.inp_folder)+"/vis_hog_pretrained/"+str(file),original_image)
if args.vis:
print(f"Saved images with bounding box in {args.inp_folder+'/vis_hog_pretrained.json'}")
print(f"Saved predictions at {args.inp_folder+'/pred_eval_hog_pretrained.json'}")
json.dump(coco_result, open(args.inp_folder+"/pred_eval_hog_pretrained.json", 'w'), ensure_ascii=False)
def main(args):
hog_pretrained(args)
if __name__ == "__main__":
argument_parser_object = argparse.ArgumentParser(description="Pedestrian Detection in images")
argument_parser_object.add_argument('-i', '--inp_folder', type=str, default='PennFudanPed', help="Path for the root folder of dataset containing images, annotations etc.)")
argument_parser_object.add_argument('-v', '--vis', action='store_true', default=False, help="Visualize Results (Add --vis to visualize")
argument_parser_object.add_argument('-t', '--train_json', type=str, default="PennFudanPed_train.json", help="path for the train annotation json file.")
argument_parser_object.add_argument('-val', '--val_json', type=str, default="PennFudanPed_val.json", help="path for the validation annotation json file.")
args = argument_parser_object.parse_args()
main(args)
|
the-stack_0_27396
|
from django.core.management.base import BaseCommand
from openpyxl import load_workbook
from directory.models import Researches, ResearchSite
class Command(BaseCommand):
def add_arguments(self, parser):
"""
:param path - файл с картами пациентов + диагноз Д-учета
"""
parser.add_argument('path', type=str)
def handle(self, *args, **kwargs):
"""
Испорт услуг консультацй, Лечения, Стоматологии, Стационар
Если услуга(id) существует обновиться внутренний код, иначе создать новую
:param args:
:param kwargs:
:return:
"""
fp = kwargs["path"]
self.stdout.write("Path: " + fp)
wb = load_workbook(filename=fp)
ws = wb[wb.sheetnames[0]]
ws1 = wb[wb.sheetnames[1]]
starts = False
identify = 0
int_code = 0
research = ''
type_research = 0
place_research = 0
podr = ''
pay = ''
r = 0
for row in ws.rows:
cells = [str(x.value) for x in row]
if not starts:
if "код_внутренний" in cells and "услуга" in cells and "тип" in cells and "место" in cells and "id" in cells:
starts = True
identify = cells.index("id")
int_code = cells.index("код_внутренний")
research = cells.index("услуга")
type_research = cells.index("тип")
place_research = cells.index("место")
podr = cells.index("подразделение")
pay = cells.index("платно")
def insert_data(ins):
c1 = ws1.cell(row=r, column=1)
c2 = ws1.cell(row=r, column=2)
c3 = ws1.cell(row=r, column=3)
c4 = ws1.cell(row=r, column=4)
c5 = ws1.cell(row=r, column=5)
c6 = ws1.cell(row=r, column=6)
c7 = ws1.cell(row=r, column=7)
c1.value = ins
c2.value = cells[int_code]
c3.value = cells[research]
c4.value = cells[type_research]
c5.value = cells[place_research]
c6.value = cells[podr]
c7.value = cells[pay]
else:
r = r + 1
if cells[identify] == '-1':
if Researches.objects.filter(internal_code=cells[int_code]).exists():
r_o = Researches.objects.values_list('pk').get(internal_code=cells[int_code])
insert_data(int(r_o[0]))
continue
else:
treatment = True if cells[type_research] == 'is_treatment' else False
doc_refferal = True if cells[type_research] == 'is_doc_refferal' else False
stom = True if cells[type_research] == 'is_stom' else False
hospital = True if cells[type_research] == 'is_hospital' else False
if cells[place_research] == '-1':
s_t = None
else:
s_t = ResearchSite.objects.get(pk=int(cells[place_research]))
c = Researches.objects.create(
title=cells[research], site_type=s_t, internal_code=cells[int_code], is_treatment=treatment, is_doc_refferal=doc_refferal, is_hospital=hospital, is_stom=stom
)
insert_data(int(c.pk))
print('добавлен услуга:', c.title, c.pk, c.internal_code) # noqa: T001
else:
pk_research = int(cells[identify])
res = Researches.objects.get(pk=pk_research)
if res:
Researches.objects.filter(pk=pk_research).update(internal_code=cells[int_code])
print('обновлена услуга (title, pk, internal_code):', res.title, res.pk, cells[int_code]) # noqa: T001
insert_data(int(res.pk))
wb.save(fp + 'import')
|
the-stack_0_27398
|
# Loss functions for the generator and the discriminator
from warnings import simplefilter
simplefilter(action = "ignore", category = FutureWarning)
import numpy as np
import tensorflow as tf
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
def G_loss(G, D,
dataset, # The dataset object for the real images
minibatch_size, # size of each minibatch
loss_type, # The loss type: logistic, hinge, wgan
reg_weight = 1.0, # Regularization strength
pathreg = False, # Path regularization
pl_minibatch_shrink = 2, # Minibatch shrink (for path regularization only)
pl_decay = 0.01, # Decay (for path regularization only)
pl_weight = 2.0, # Weight (for path regularization only)
**kwargs):
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = dataset.get_random_labels_tf(minibatch_size)
fake_imgs_out = G.get_output_for(latents, labels, is_training = True)[0]
fake_scores_out = D.get_output_for(fake_imgs_out, labels, is_training = True)
if loss_type == "logistic":
loss = -tf.nn.softplus(fake_scores_out)
elif loss_type == "logistic_ns":
loss = tf.nn.softplus(-fake_scores_out)
elif loss_type == "hinge":
loss = -tf.maximum(0.0, 1.0 + fake_scores_out)
elif loss_type == "wgan":
loss = -fake_scores_out
reg = None
if pathreg:
with tf.name_scope("PathReg"):
# Evaluate the regularization term using a smaller minibatch to conserve memory
if pl_minibatch_shrink > 1:
pl_minibatch = minibatch_size // pl_minibatch_shrink
pl_latents = tf.random_normal([pl_minibatch] + G.input_shapes[0][1:])
pl_labels = dataset.get_random_labels_tf(pl_minibatch)
ret = G.get_output_for(pl_latents, pl_labels, is_training = True, return_dlatents = True)
fake_imgs_out, dlatents = ret[0], ret[-1]
# Compute |J*y|
pl_noise = tf.random_normal(tf.shape(fake_imgs_out)) / np.sqrt(np.prod(G.output_shape[2:]))
pl_grads = tf.gradients(tf.reduce_sum(fake_imgs_out * pl_noise), [dlatents])[0]
pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis = 3), axis = [1, 2]))
pl_lengths = autosummary("Loss/pl_lengths", pl_lengths)
# Track exponential moving average of |J*y|
with tf.control_dependencies(None):
pl_mean_var = tf.Variable(name = "pl_mean", trainable = False, initial_value = 0.0, dtype = tf.float32)
pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
pl_update = tf.assign(pl_mean_var, pl_mean)
# Calculate (|J*y|-a)^2
with tf.control_dependencies([pl_update]):
pl_penalty = tf.square(pl_lengths - pl_mean)
pl_penalty = autosummary("Loss/pl_penalty", pl_penalty)
reg = pl_penalty * pl_weight
if reg is not None:
reg *= reg_weight
return loss, reg
def D_loss(G, D,
reals, # A batch of real images
labels, # A batch of labels (default 0s if no labels)
minibatch_size, # Size of each minibatch
loss_type, # Loss type: logistic, hinge, wgan
reg_type, # Regularization type: r1, t2, gp (mixed)
gamma = 10.0, # Regularization strength
wgan_epsilon = 0.001, # Wasserstein epsilon (for wgan only)
wgan_target = 1.0, # Wasserstein target (for wgan only)
**kwargs):
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_imgs_out = G.get_output_for(latents, labels, is_training = True)[0]
real_scores_out = D.get_output_for(reals, labels, is_training = True)
fake_scores_out = D.get_output_for(fake_imgs_out, labels, is_training = True)
real_scores_out = autosummary("Loss/scores/real", real_scores_out)
fake_scores_out = autosummary("Loss/scores/fake", fake_scores_out)
if loss_type == "logistic":
loss = tf.nn.softplus(fake_scores_out)
loss += tf.nn.softplus(-real_scores_out)
elif loss_type == "hinge":
loss = tf.maximum(0.0, 1.0 + fake_scores_out)
loss += tf.maximum(0.0, 1.0 - real_scores_out)
elif loss_type == "wgan":
loss = fake_scores_out - real_scores_out
with tf.name_scope("EpsilonPenalty"):
epsilon_penalty = autosummary("Loss/epsilon_penalty", tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
reg = None
with tf.name_scope("GradientPenalty"):
if reg_type in ["r1", "r2"]:
if reg_type == "r1":
grads = tf.gradients(tf.reduce_sum(real_scores_out), [reals])[0]
else:
grads = tf.gradients(tf.reduce_sum(fake_scores_out), [fake_imgs_out])[0]
gradient_penalty = tf.reduce_sum(tf.square(grads), axis = [1, 2, 3])
gradient_penalty = autosummary("Loss/gradient_penalty", gradient_penalty)
reg = gradient_penalty * (gamma * 0.5)
elif reg_type == "gp":
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype = fake_imgs_out.dtype)
mixed_imgs_out = tflib.lerp(tf.cast(reals, fake_imgs_out.dtype), fake_imgs_out, mixing_factors)
mixed_scores_out = D.get_output_for(mixed_imgs_out, labels, is_training = True)
mixed_scores_out = autosummary("Loss/scores/mixed", mixed_scores_out)
mixed_grads = tf.gradients(tf.reduce_sum(mixed_scores_out), [mixed_imgs_out])[0]
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis = [1, 2, 3]))
mixed_norms = autosummary("Loss/mixed_norms", mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
reg = gradient_penalty * (gamma / (wgan_target ** 2))
return loss, reg
|
the-stack_0_27399
|
from sqlalchemy.sql import expression
from app.models.db import BaseModel, TimedBaseModel, db
class Chat(TimedBaseModel):
__tablename__ = "chats"
id = db.Column(db.BigInteger, primary_key=True, index=True)
type = db.Column(db.String)
is_official = db.Column(db.Boolean, server_default=expression.false())
language = db.Column(db.String(12), default="ru")
join_filter = db.Column(db.Boolean, server_default=expression.false())
class ChatRelatedModel(BaseModel):
__abstract__ = True
chat_id = db.Column(
db.ForeignKey(
f"{Chat.__tablename__}.id", ondelete="CASCADE", onupdate="CASCADE"
),
nullable=False,
)
|
the-stack_0_27400
|
# parser
# author: Christophe VG <[email protected]>
# a parser for ALP commands
import struct
from d7a.alp.command import Command
from d7a.alp.operands.interface_status import InterfaceStatusOperand
from d7a.alp.operations.status import InterfaceStatus
from d7a.alp.operations.tag_response import TagResponse
from d7a.alp.status_action import StatusAction, StatusActionOperandExtensions
from d7a.alp.regular_action import RegularAction
from d7a.alp.operations.responses import ReturnFileData
from d7a.alp.operations.requests import ReadFileData
from d7a.alp.operands.file import Offset, Data, DataRequest
from d7a.alp.tag_response_action import TagResponseAction
from d7a.parse_error import ParseError
from d7a.sp.status import Status
from d7a.d7anp.addressee import Addressee
from d7a.types.ct import CT
from d7a.alp.operands.tag_id import TagId
from d7a.alp.operations.tag_request import TagRequest
from d7a.alp.tag_request_action import TagRequestAction
class Parser(object):
def parse(self, s, cmd_length):
actions = []
if cmd_length != 0:
startpos = s.bytepos
alp_bytes_parsed = 0
while alp_bytes_parsed < cmd_length:
action = self.parse_alp_action(s)
actions.append(action)
alp_bytes_parsed = alp_bytes_parsed + (s.bytepos - startpos)
cmd = Command(actions = actions, generate_tag_request_action=False)
return cmd
def parse_alp_action(self, s):
# meaning of first 2 bits depend on action opcode
b7 = s.read("bool")
b6 = s.read("bool")
op = s.read("uint:6")
try:
return{
1 : self.parse_alp_read_file_data_action,
32 : self.parse_alp_return_file_data_action,
34 : self.parse_alp_return_status_action,
35 : self.parse_tag_response_action,
52 : self.parse_tag_request_action
}[op](b7, b6, s)
except KeyError:
raise ParseError("alp_action " + str(op) + " is not implemented")
def parse_alp_read_file_data_action(self, b7, b6, s):
operand = self.parse_alp_file_data_request_operand(s)
return RegularAction(group=b7,
resp=b6,
operation=ReadFileData(operand=operand))
def parse_alp_file_data_request_operand(self, s):
offset = self.parse_offset(s)
length = s.read("uint:8")
return DataRequest(length=length, offset=offset)
def parse_alp_return_file_data_action(self, b7, b6, s):
operand = self.parse_alp_return_file_data_operand(s)
return RegularAction(group=b7,
resp=b6,
operation=ReturnFileData(operand=operand))
def parse_alp_return_file_data_operand(self, s):
offset = self.parse_offset(s)
length = s.read("uint:8")
data = s.read("bytes:" + str(length))
return Data(offset=offset, data=map(ord,data))
def parse_alp_return_status_action(self, b7, b6, s):
if b7:
raise ParseError("Status Operand extension 2 and 3 is RFU")
if b6: # interface status
interface_id = s.read("uint:8")
try:
interface_status_operation = {
0x00 : self.parse_alp_interface_status_host,
0xd7 : self.parse_alp_interface_status_d7asp,
}[interface_id](s)
return StatusAction(operation=interface_status_operation,
status_operand_extension=StatusActionOperandExtensions.INTERFACE_STATUS)
except KeyError:
raise ParseError("Received ALP Interface status for interface " + str(interface_id) + " which is not implemented")
else: # action status
pass # TODO
def parse_tag_request_action(self, b7, b6, s):
if b6:
raise ParseError("bit 6 is RFU")
tag_id = s.read("uint:8")
return TagRequestAction(respond_when_completed=b7, operation=TagRequest(operand=TagId(tag_id=tag_id)))
def parse_tag_response_action(self, b7, b6, s):
if b7:
raise ParseError("bit 7 is RFU")
tag_id = s.read("uint:8")
return TagResponseAction(error=b6, operation=TagResponse(operand=TagId(tag_id=tag_id)))
def parse_alp_interface_status_host(self, s):
pass # no interface status defined for host interface
def parse_alp_interface_status_d7asp(self, s):
channel_header = s.read("uint:8") # TODO parse
channel_index = struct.unpack(">h", s.read("bytes:2"))[0]
rx_level = s.read("int:8")
link_budget = s.read("uint:8")
target_rx_level = s.read("uint:8")
nls = s.read("bool")
missed = s.read("bool")
retry = s.read("bool")
unicast = s.read("bool" )
_ = s.read("pad:4")
fifo_token = s.read("uint:8")
seq_nr = s.read("uint:8")
response_to = CT.parse(s)
addressee = Addressee.parse(s)
status = Status(channel_header=channel_header, channel_index=channel_index,
rx_level=rx_level, link_budget=link_budget,
target_rx_level=target_rx_level, nls=nls, missed=missed,
retry=retry, unicast=unicast, fifo_token=fifo_token,
seq_nr=seq_nr, response_to=response_to, addressee=addressee)
return InterfaceStatus(
operand=InterfaceStatusOperand(interface_id=0xd7, interface_status=status)
)
def parse_offset(self, s):
id = s.read("uint:8")
size = s.read("uint:2") # + 1 = already read
offset = s.read("uint:" + str(6+(size * 8)))
return Offset(id=id, size=size+1, offset=offset)
|
the-stack_0_27403
|
from ex115.lib.interface import *
def arquivoExiste(nome):
try:
a = open(nome, 'rt')
a.close()
except FileNotFoundError:
return False
else:
return True
def criarArquivo(nome):
try:
a = open(nome, 'wt+')
except:
print('Houve um erro na criação do arquivo !')
else:
print(f'Arquivo {nome} criado com sucesso!')
def lerArquivo(nome):
try:
a = open(nome, 'rt')
except:
print('Erro ao ler o arquivo!')
else:
cabeçalho('Pessoas cadastradas')
for linha in a:
dado = linha.split(';')
dado[1] = dado[1].replace('\n', '')
print(f'{dado[0]} {dado[1]}')
finally:
a.close()
def cadastrar(arq, nome='Desconhecido', idade=0):
try:
a = open(arq, 'at')
except:
print('Houve um erro na abertura do arquivo.')
else:
try:
a.write(f'{nome}, {idade}\n')
except:
print('Houve um erro na hora de escrever esses dados!')
else:
print(f'Novo registro de {nome} adicionado')
a.close()
|
the-stack_0_27406
|
# This program demonstrates the BankAccount class
# with the __str__ method added to it.
import bankaccount2
def main():
# Get the starting balance
start_bal = float(input('Enter your starting balance: '))
# Create a BankAccount object.
savings = bankaccount2.BankAccount(start_bal)
# Deposit the user's paycheck.
pay = float(input('How much were you paid this week? '))
print('I will deposit that into your account.')
savings.deposit(pay)
# Display the balance.
print(savings)
# Get the amount to withdraw.
cash = float(input('How much would you like to withdraw? '))
print('I will withdraw that from your account.')
savings.withdraw(cash)
# Display the balance
print(savings)
# Call the main function
main()
|
the-stack_0_27408
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagAPITests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagAPITests(TestCase):
"""Test the authorizedd user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'[email protected]',
'test123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'test123'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tags_successful(self):
"""Test creating a new tag"""
payload = {
'name': 'Test tag'
}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipe(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
the-stack_0_27414
|
from GDNet.module import *
class Feature(nn.Module):
def __init__(self):
super(Feature, self).__init__()
self.conv_start = nn.Sequential(
BasicConv(3, 32, kernel_size=3, padding=1),
BasicConv(32, 32, kernel_size=5, stride=3, padding=2),
BasicConv(32, 32, kernel_size=3, padding=1))
self.conv1a = BasicConv(32, 48, kernel_size=3, stride=2, padding=1)
self.conv2a = BasicConv(48, 64, kernel_size=3, stride=2, padding=1)
self.conv3a = BasicConv(64, 96, kernel_size=3, stride=2, padding=1)
self.conv4a = BasicConv(96, 128, kernel_size=3, stride=2, padding=1)
self.deconv4a = Conv2x(128, 96, deconv=True)
self.deconv3a = Conv2x(96, 64, deconv=True)
self.deconv2a = Conv2x(64, 48, deconv=True)
self.deconv1a = Conv2x(48, 32, deconv=True)
self.conv1b = Conv2x(32, 48)
self.conv2b = Conv2x(48, 64)
self.conv3b = Conv2x(64, 96)
self.conv4b = Conv2x(96, 128)
self.deconv4b = Conv2x(128, 96, deconv=True)
self.deconv3b = Conv2x(96, 64, deconv=True)
self.deconv2b = Conv2x(64, 48, deconv=True)
self.deconv1b = Conv2x(48, 32, deconv=True)
def forward(self, x):
x = self.conv_start(x)
rem0 = x
x = self.conv1a(x)
rem1 = x
x = self.conv2a(x)
rem2 = x
x = self.conv3a(x)
rem3 = x
x = self.conv4a(x)
rem4 = x
x = self.deconv4a(x, rem3)
rem3 = x
x = self.deconv3a(x, rem2)
rem2 = x
x = self.deconv2a(x, rem1)
rem1 = x
x = self.deconv1a(x, rem0)
rem0 = x
x = self.conv1b(x, rem1)
rem1 = x
x = self.conv2b(x, rem2)
rem2 = x
x = self.conv3b(x, rem3)
rem3 = x
x = self.conv4b(x, rem4)
x = self.deconv4b(x, rem3)
x = self.deconv3b(x, rem2)
x = self.deconv2b(x, rem1)
x = self.deconv1b(x, rem0)
return x
class Guidance(nn.Module):
def __init__(self):
super(Guidance, self).__init__()
self.conv0 = BasicConv(64, 16, kernel_size=3, padding=1)
self.conv1 = nn.Sequential(
BasicConv(16, 32, kernel_size=5, stride=3, padding=2),
BasicConv(32, 32, kernel_size=3, padding=1))
self.conv2 = BasicConv(32, 32, kernel_size=3, padding=1)
self.weight_sg1 = nn.Conv2d(32, 640, (3, 3), (1, 1), (1, 1), bias=False)
self.weight_sg2 = nn.Conv2d(32, 640, (3, 3), (1, 1), (1, 1), bias=False)
self.weight_lg1 = nn.Sequential(BasicConv(16, 16, kernel_size=3, padding=1),
nn.Conv2d(16, 75, (3, 3), (1, 1), (1, 1), bias=False))
self.weight_lg2 = nn.Sequential(BasicConv(16, 16, kernel_size=3, padding=1),
nn.Conv2d(16, 75, (3, 3), (1, 1), (1, 1), bias=False))
def forward(self, x):
x = self.conv0(x)
rem = x
x = self.conv1(x)
# sg1: 640, H/3, W/3
# 640 = 32*4*5
sg1 = self.weight_sg1(x)
x = self.conv2(x)
# sg1: 640, H/3, W/3
sg2 = self.weight_sg2(x)
# lg1: 75, H, W
# 75 = 3*5*5
lg1 = self.weight_lg1(rem)
# lg2: 75, H, W
lg2 = self.weight_lg2(rem)
return {
'sg1': sg1,
'sg2': sg2,
'lg1': lg1,
'lg2': lg2
}
class DisparityAggregation(nn.Module):
def __init__(self, max_disparity=192):
super(DisparityAggregation, self).__init__()
self.max_disparity = max_disparity
self.conv32x1 = nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False)
self.lga = LGA(5)
self.disparity = DisparityRegression(self.max_disparity)
def forward(self, x, lg1, lg2):
x = F.interpolate(self.conv32x1(x), [self.max_disparity, x.size()[3]*3, x.size()[4]*3], mode='trilinear', align_corners=False)
x = torch.squeeze(x, 1) # D, H, W
x = self.lga(x, lg1) # D, H, W
x = F.softmax(x, dim=1) # D, H, W
x = self.lga(x, lg2) # D, H, W
x = F.normalize(x, p=1, dim=1) # D, H, W
return self.disparity(x)
class CostAggregation(nn.Module):
def __init__(self, maxdisp=192):
super(CostAggregation, self).__init__()
self.maxdisp = maxdisp
self.conv_start = BasicConv(64, 32, is_3d=True, kernel_size=3, padding=1, relu=False)
self.conv0 = BasicConv(32, 32, is_3d=True, kernel_size=3, padding=1)
self.sga1 = SGABlock(32)
self.sga2 = SGABlock(32)
self.disp = DisparityAggregation(self.maxdisp)
def forward(self, x, g):
x = self.conv_start(x)
# x: 32, D/3, H/3, W/3
# sg1: 640, H/3, W/3
x = self.sga1(x, g['sg1'])
x = self.conv0(x)
# x: 32, D/3, H/3, W/3
# sg1: 640, H/3, W/3
x = self.sga2(x, g['sg2'])
# x:
# lg1: 75, H, W
# lg2: 75, H, W
x = self.disp(x, g['lg1'], g['lg2'])
return x
class GANet(nn.Module):
def __init__(self, max_disparity=192):
super(GANet, self).__init__()
self.max_disparity = max_disparity
self.conv_start = nn.Sequential(BasicConv(3, 16, kernel_size=3, padding=1),
BasicConv(16, 32, kernel_size=3, padding=1))
self.conv_x = BasicConv(32, 32, kernel_size=3, padding=1)
self.conv_y = BasicConv(32, 32, kernel_size=3, padding=1)
self.conv_refine = nn.Conv2d(32, 32, (3, 3), (1, 1), (1, 1), bias=False)
self.bn_relu = nn.Sequential(nn.BatchNorm2d(32),
nn.ReLU(inplace=True))
self.guidance = Guidance()
self.feature = Feature()
self.cost_volume = CostVolume(max_disparity/3)
self.cost_aggregation = CostAggregation(self.max_disparity)
def forward(self, x, y):
g = self.conv_start(x) # 32, H, W
x = self.feature(x)
y = self.feature(y)
rem = x
x = self.conv_x(x) # 32, H/3, W/3
y = self.conv_y(y) # 32, H/3, W/3
x = self.cost_volume(x, y) # 64, D/3, H/3, W/3
x1 = self.conv_refine(rem) # 32, H/3, W/3
# 32, H, W
x1 = F.interpolate(x1, [x1.size()[2] * 3, x1.size()[3] * 3], mode='bilinear', align_corners=False)
x1 = self.bn_relu(x1)
g = torch.cat((g, x1), 1) # 64, H, W
# sg1: 640, H/3, W/3
# sg2: 640, H/3, W/3
# lg1: 75, H, W
# lg2: 75, H, W
g = self.guidance(g)
return self.cost_aggregation(x, g)
|
the-stack_0_27417
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews_manager', '0010_auto_20170322_1505'),
]
operations = [
migrations.AddField(
model_name='clinicalannotation',
name='label',
field=models.CharField(max_length=40, unique=True, null=True),
),
migrations.AddField(
model_name='clinicalannotationstep',
name='label',
field=models.CharField(max_length=40, unique=True, null=True),
),
migrations.AddField(
model_name='roisannotation',
name='label',
field=models.CharField(max_length=40, unique=True, null=True),
),
migrations.AddField(
model_name='roisannotationstep',
name='label',
field=models.CharField(max_length=40, unique=True, null=True),
),
]
|
the-stack_0_27419
|
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from opencensus.metrics.export.gauge import DerivedDoubleGauge
if sys.version_info < (3,):
from BaseHTTPServer import HTTPServer
else:
from http.server import HTTPServer
requests_map = dict()
ORIGINAL_CONSTRUCTOR = HTTPServer.__init__
def request_patch(func):
def wrapper(self=None):
func(self)
count = requests_map.get('count', 0)
requests_map['count'] = count + 1
return wrapper
def server_patch(*args, **kwargs):
if len(args) >= 3:
handler = args[2]
if handler:
# Patch the handler methods if they exist
if "do_DELETE" in dir(handler):
handler.do_DELETE = request_patch(handler.do_DELETE)
if "do_GET" in dir(handler):
handler.do_GET = request_patch(handler.do_GET)
if "do_HEAD" in dir(handler):
handler.do_HEAD = request_patch(handler.do_HEAD)
if "do_OPTIONS" in dir(handler):
handler.do_OPTIONS = request_patch(handler.do_OPTIONS)
if "do_POST" in dir(handler):
handler.do_POST = request_patch(handler.do_POST)
if "do_PUT" in dir(handler):
handler.do_PUT = request_patch(handler.do_PUT)
result = ORIGINAL_CONSTRUCTOR(*args, **kwargs)
return result
def setup():
# Patch the HTTPServer handler to track request information
HTTPServer.__init__ = server_patch
class RequestsRateMetric(object):
NAME = "\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Requests/Sec"
def __init__(self):
setup()
@staticmethod
def get_value():
current_count = requests_map.get('count', 0)
current_time = time.time()
last_count = requests_map.get('last_count', 0)
last_time = requests_map.get('last_time')
last_result = requests_map.get('last_result', 0)
try:
# last_time is None the very first time this function is called
if last_time is not None:
elapsed_seconds = current_time - last_time
interval_count = current_count - last_count
result = interval_count / elapsed_seconds
else:
result = 0
requests_map['last_time'] = current_time
requests_map['last_count'] = current_count
requests_map['last_result'] = result
return result
except ZeroDivisionError:
# If elapsed_seconds is 0, exporter call made too close to previous
# Return the previous result if this is the case
return last_result
def __call__(self):
""" Returns a derived gauge for incoming requests per second
Calculated by obtaining by getting the number of incoming requests
made to an HTTPServer within an elapsed time and dividing that value
over the elapsed time.
:rtype: :class:`opencensus.metrics.export.gauge.DerivedLongGauge`
:return: The gauge representing the incoming requests metric
"""
gauge = DerivedDoubleGauge(
RequestsRateMetric.NAME,
'Incoming Requests per second',
'rps',
[])
gauge.create_default_time_series(RequestsRateMetric.get_value)
return gauge
|
the-stack_0_27426
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from contextlib import closing
from copy import deepcopy
from typing import Iterable, Optional, Tuple, Union
import psycopg2
import psycopg2.extensions
import psycopg2.extras
from psycopg2.extensions import connection
from psycopg2.extras import DictCursor, NamedTupleCursor, RealDictCursor
from airflow.hooks.dbapi import DbApiHook
from airflow.models.connection import Connection
CursorType = Union[DictCursor, RealDictCursor, NamedTupleCursor]
class PostgresHook(DbApiHook):
"""
Interact with Postgres.
You can specify ssl parameters in the extra field of your connection
as ``{"sslmode": "require", "sslcert": "/path/to/cert.pem", etc}``.
Also you can choose cursor as ``{"cursor": "dictcursor"}``. Refer to the
psycopg2.extras for more details.
Note: For Redshift, use keepalives_idle in the extra connection parameters
and set it to less than 300 seconds.
Note: For AWS IAM authentication, use iam in the extra connection parameters
and set it to true. Leave the password field empty. This will use the
"aws_default" connection to get the temporary token unless you override
in extras.
extras example: ``{"iam":true, "aws_conn_id":"my_aws_conn"}``
For Redshift, also use redshift in the extra connection parameters and
set it to true. The cluster-identifier is extracted from the beginning of
the host field, so is optional. It can however be overridden in the extra field.
extras example: ``{"iam":true, "redshift":true, "cluster-identifier": "my_cluster_id"}``
:param postgres_conn_id: The :ref:`postgres conn id <howto/connection:postgres>`
reference to a specific postgres database.
:type postgres_conn_id: str
"""
conn_name_attr = 'postgres_conn_id'
default_conn_name = 'postgres_default'
conn_type = 'postgres'
hook_name = 'Postgres'
supports_autocommit = True
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.schema: Optional[str] = kwargs.pop("schema", None)
self.connection: Optional[Connection] = kwargs.pop("connection", None)
self.conn: connection = None
def _get_cursor(self, raw_cursor: str) -> CursorType:
_cursor = raw_cursor.lower()
if _cursor == 'dictcursor':
return psycopg2.extras.DictCursor
if _cursor == 'realdictcursor':
return psycopg2.extras.RealDictCursor
if _cursor == 'namedtuplecursor':
return psycopg2.extras.NamedTupleCursor
raise ValueError(f'Invalid cursor passed {_cursor}')
def get_conn(self) -> connection:
"""Establishes a connection to a postgres database."""
conn_id = getattr(self, self.conn_name_attr)
conn = deepcopy(self.connection or self.get_connection(conn_id))
# check for authentication via AWS IAM
if conn.extra_dejson.get('iam', False):
conn.login, conn.password, conn.port = self.get_iam_token(conn)
conn_args = dict(
host=conn.host,
user=conn.login,
password=conn.password,
dbname=self.schema or conn.schema,
port=conn.port,
)
raw_cursor = conn.extra_dejson.get('cursor', False)
if raw_cursor:
conn_args['cursor_factory'] = self._get_cursor(raw_cursor)
for arg_name, arg_val in conn.extra_dejson.items():
if arg_name not in [
'iam',
'redshift',
'cursor',
'cluster-identifier',
'aws_conn_id',
]:
conn_args[arg_name] = arg_val
self.conn = psycopg2.connect(**conn_args)
return self.conn
def copy_expert(self, sql: str, filename: str) -> None:
"""
Executes SQL using psycopg2 copy_expert method.
Necessary to execute COPY command without access to a superuser.
Note: if this method is called with a "COPY FROM" statement and
the specified input file does not exist, it creates an empty
file and no data is loaded, but the operation succeeds.
So if users want to be aware when the input file does not exist,
they have to check its existence by themselves.
"""
if not os.path.isfile(filename):
with open(filename, 'w'):
pass
with open(filename, 'r+') as file:
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
cur.copy_expert(sql, file)
file.truncate(file.tell())
conn.commit()
def bulk_load(self, table: str, tmp_file: str) -> None:
"""Loads a tab-delimited file into a database table"""
self.copy_expert(f"COPY {table} FROM STDIN", tmp_file)
def bulk_dump(self, table: str, tmp_file: str) -> None:
"""Dumps a database table into a tab-delimited file"""
self.copy_expert(f"COPY {table} TO STDOUT", tmp_file)
# pylint: disable=signature-differs
@staticmethod
def _serialize_cell(cell: object, conn: Optional[connection] = None) -> object:
"""
Postgresql will adapt all arguments to the execute() method internally,
hence we return cell without any conversion.
See http://initd.org/psycopg/docs/advanced.html#adapting-new-types for
more information.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The cell
:rtype: object
"""
return cell
def get_iam_token(self, conn: Connection) -> Tuple[str, str, int]:
"""
Uses AWSHook to retrieve a temporary password to connect to Postgres
or Redshift. Port is required. If none is provided, default is used for
each service
"""
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
redshift = conn.extra_dejson.get('redshift', False)
aws_conn_id = conn.extra_dejson.get('aws_conn_id', 'aws_default')
aws_hook = AwsBaseHook(aws_conn_id, client_type='rds')
login = conn.login
if conn.port is None:
port = 5439 if redshift else 5432
else:
port = conn.port
if redshift:
# Pull the custer-identifier from the beginning of the Redshift URL
# ex. my-cluster.ccdre4hpd39h.us-east-1.redshift.amazonaws.com returns my-cluster
cluster_identifier = conn.extra_dejson.get('cluster-identifier', conn.host.split('.')[0])
client = aws_hook.get_client_type('redshift')
cluster_creds = client.get_cluster_credentials(
DbUser=conn.login,
DbName=self.schema or conn.schema,
ClusterIdentifier=cluster_identifier,
AutoCreate=False,
)
token = cluster_creds['DbPassword']
login = cluster_creds['DbUser']
else:
token = aws_hook.conn.generate_db_auth_token(conn.host, port, conn.login)
return login, token, port
@staticmethod
def _generate_insert_sql(
table: str, values: Tuple[str, ...], target_fields: Iterable[str], replace: bool, **kwargs
) -> str:
"""
Static helper method that generate the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax.
:param table: Name of the target table
:type table: str
:param values: The row to insert into the table
:type values: tuple of cell values
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param replace: Whether to replace instead of insert
:type replace: bool
:param replace_index: the column or list of column names to act as
index for the ON CONFLICT clause
:type replace_index: str or list
:return: The generated INSERT or REPLACE SQL statement
:rtype: str
"""
placeholders = [
"%s",
] * len(values)
replace_index = kwargs.get("replace_index")
if target_fields:
target_fields_fragment = ", ".join(target_fields)
target_fields_fragment = f"({target_fields_fragment})"
else:
target_fields_fragment = ''
sql = f"INSERT INTO {table} {target_fields_fragment} VALUES ({','.join(placeholders)})"
if replace:
if target_fields is None:
raise ValueError("PostgreSQL ON CONFLICT upsert syntax requires column names")
if replace_index is None:
raise ValueError("PostgreSQL ON CONFLICT upsert syntax requires an unique index")
if isinstance(replace_index, str):
replace_index = [replace_index]
replace_index_set = set(replace_index)
replace_target = [
"{0} = excluded.{0}".format(col) for col in target_fields if col not in replace_index_set
]
sql += " ON CONFLICT ({}) DO UPDATE SET {}".format(
", ".join(replace_index),
", ".join(replace_target),
)
return sql
|
the-stack_0_27429
|
import io
import pandas as pd
import numpy as np
import json
import os
import pickle
import logging
model_name = 'model.pkl'
scaler_name = 'scaler.pkl'
"""
Inference script. This script is used for prediction by scoring server when schema is known.
"""
model = None
scaler = None
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger_pred = logging.getLogger('model-prediction')
logger_pred.setLevel(logging.INFO)
logger_feat = logging.getLogger('features')
logger_feat.setLevel(logging.INFO)
def load_model():
"""
Loads model from the serialized format
Returns
-------
model: a model instance on which predict API can be invoked
"""
global model, scaler
model_dir = os.path.dirname(os.path.realpath(__file__))
contents = os.listdir(model_dir)
if model_name in contents:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), model_name), "rb") as file:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), scaler_name), "rb") as sfile:
model = pickle.load(file)
scaler = pickle.load(sfile)
assert model != None
assert scaler != None
logger_pred.info("Loaded model and scaler...")
else:
raise Exception('{0} is not found in model directory {1}'.format(model_name, model_dir))
return model
# added for data scaling
def preprocess_data(x):
global scaler
logger_pred.info("Scaling features...")
x = scaler.transform(x)
return x
def predict(data, model=load_model()):
"""
Returns prediction given the model and data to predict
Parameters
----------
model: Model instance returned by load_model API
data: Data format as expected by the predict API of the core estimator. For eg. in case of sckit models it could be numpy array/List of list/Panda DataFrame
Returns
-------
predictions: Output from scoring server
Format: {'prediction':output from model.predict method}
"""
logger_pred.info("In predict...")
# some check
assert model is not None, "Model is not loaded"
x = pd.read_json(io.StringIO(data)).values
logger_feat.info("Logging features before scaling")
logger_feat.info(x)
logger_feat.info("...")
# apply scaling
x = preprocess_data(x)
logger_feat.info("Logging features after scaling")
logger_feat.info(x)
logger_feat.info("...")
logger_pred.info("Invoking model......")
preds = model.predict_proba(x)
preds = np.round(preds[:, 1], 4)
preds = preds.tolist()
logger_pred.info("Logging predictions")
logger_pred.info(preds)
return { 'prediction': preds }
|
the-stack_0_27433
|
import logging
l = logging.getLogger("archinfo.arch_mips32")
try:
import capstone as _capstone
except ImportError:
_capstone = None
try:
import keystone as _keystone
except ImportError:
_keystone = None
try:
import unicorn as _unicorn
except ImportError:
_unicorn = None
from .arch import Arch, register_arch, Endness, Register
from .tls import TLSArchInfo
# FIXME: Tell fish to fix whatever he was storing in info['current_function']
# TODO: Only persist t9 in PIC programs
class ArchMIPS32(Arch):
def __init__(self, endness=Endness.BE):
super(ArchMIPS32, self).__init__(endness)
if endness == Endness.BE:
self.function_prologs = {
br"\x27\xbd\xff[\x00-\xff]" # addiu $sp, xxx
br"\x3c\x1c[\x00-\xff][\x00-\xff]\x9c\x27[\x00-\xff][\x00-\xff]" # lui $gp, xxx; addiu $gp, $gp, xxxx
}
self.function_epilogs = {
br"\x8f\xbf[\x00-\xff]{2}([\x00-\xff]{4}){0,4}\x03\xe0\x00\x08" # lw ra, off(sp); ... ; jr ra
}
self.qemu_name = 'mips'
self.triplet = 'mips-linux-gnu'
self.linux_name = 'mips'
bits = 32
vex_arch = "VexArchMIPS32"
name = "MIPS32"
ida_processor = 'mipsb'
qemu_name = 'mipsel'
linux_name = 'mipsel' # ???
triplet = 'mipsel-linux-gnu'
max_inst_bytes = 4
ret_offset = 16
syscall_num_offset = 16
call_pushes_ret = False
stack_change = -4
branch_delay_slot = True
sizeof = {'short': 16, 'int': 32, 'long': 32, 'long long': 64}
if _capstone:
cs_arch = _capstone.CS_ARCH_MIPS
cs_mode = _capstone.CS_MODE_32 + _capstone.CS_MODE_LITTLE_ENDIAN
if _keystone:
ks_arch = _keystone.KS_ARCH_MIPS
ks_mode = _keystone.KS_MODE_32 + _keystone.KS_MODE_LITTLE_ENDIAN
uc_arch = _unicorn.UC_ARCH_MIPS if _unicorn else None
uc_mode = (_unicorn.UC_MODE_32 + _unicorn.UC_MODE_LITTLE_ENDIAN) if _unicorn else None
uc_const = _unicorn.mips_const if _unicorn else None
uc_prefix = "UC_MIPS_" if _unicorn else None
function_prologs = {
br"[\x00-\xff]\xff\xbd\x27", # addiu $sp, xxx
br"[\x00-\xff][\x00-\xff]\x1c\x3c[\x00-\xff][\x00-\xff]\x9c\x27" # lui $gp, xxx; addiu $gp, $gp, xxxx
}
function_epilogs = {
br"[\x00-\xff]{2}\xbf\x8f([\x00-\xff]{4}){0,4}\x08\x00\xe0\x03" # lw ra, off(sp); ... ; jr ra
}
ret_instruction = b"\x08\x00\xE0\x03" + b"\x25\x08\x20\x00"
nop_instruction = b"\x00\x00\x00\x00"
instruction_alignment = 4
register_list = [
Register(name='zero', size=4, alias_names=('r0',)),
Register(name='at', size=4, alias_names=('r1',),
general_purpose=True),
Register(name='v0', size=4, alias_names=('r2',),
general_purpose=True, linux_entry_value='ld_destructor'),
Register(name='v1', size=4, alias_names=('r3',),
general_purpose=True),
Register(name='a0', size=4, alias_names=('r4',),
general_purpose=True, argument=True),
Register(name='a1', size=4, alias_names=('r5',),
general_purpose=True, argument=True),
Register(name='a2', size=4, alias_names=('r6',),
general_purpose=True, argument=True),
Register(name='a3', size=4, alias_names=('r7',),
general_purpose=True, argument=True),
Register(name='t0', size=4, alias_names=('r8',),
general_purpose=True),
Register(name='t1', size=4, alias_names=('r9',),
general_purpose=True),
Register(name='t2', size=4, alias_names=('r10',),
general_purpose=True),
Register(name='t3', size=4, alias_names=('r11',),
general_purpose=True),
Register(name='t4', size=4, alias_names=('r12',),
general_purpose=True),
Register(name='t5', size=4, alias_names=('r13',),
general_purpose=True),
Register(name='t6', size=4, alias_names=('r14',),
general_purpose=True),
Register(name='t7', size=4, alias_names=('r15',),
general_purpose=True),
Register(name='s0', size=4, alias_names=('r16',),
general_purpose=True),
Register(name='s1', size=4, alias_names=('r17',),
general_purpose=True),
Register(name='s2', size=4, alias_names=('r18',),
general_purpose=True),
Register(name='s3', size=4, alias_names=('r19',),
general_purpose=True),
Register(name='s4', size=4, alias_names=('r20',),
general_purpose=True),
Register(name='s5', size=4, alias_names=('r21',),
general_purpose=True),
Register(name='s6', size=4, alias_names=('r22',),
general_purpose=True),
Register(name='s7', size=4, alias_names=('r23',),
general_purpose=True),
Register(name='t8', size=4, alias_names=('r24',),
general_purpose=True),
Register(name='t9', size=4, alias_names=('r25',),
general_purpose=True, persistent=True),
Register(name='k0', size=4, alias_names=('r26',),
general_purpose=True),
Register(name='k1', size=4, alias_names=('r27',),
general_purpose=True),
Register(name='gp', size=4, alias_names=('r28',),
persistent=True),
Register(name='sp', size=4, alias_names=('r29',),
default_value=(Arch.initial_sp, True, 'global')),
Register(name='s8', size=4, alias_names=('r30', 'fp', 'bp'),
general_purpose=True),
Register(name='ra', size=4, alias_names=('r31', 'lr'),
general_purpose=True, persistent=True, linux_entry_value=0),
Register(name='pc', size=4, alias_names=('ip',)),
Register(name='hi', size=4, general_purpose=True),
Register(name='lo', size=4, general_purpose=True),
Register(name='f0', size=8, floating_point=True),
Register(name='f1', size=8, floating_point=True),
Register(name='f2', size=8, floating_point=True),
Register(name='f3', size=8, floating_point=True),
Register(name='f4', size=8, floating_point=True),
Register(name='f5', size=8, floating_point=True),
Register(name='f6', size=8, floating_point=True),
Register(name='f7', size=8, floating_point=True),
Register(name='f8', size=8, floating_point=True),
Register(name='f9', size=8, floating_point=True),
Register(name='f10', size=8, floating_point=True),
Register(name='f11', size=8, floating_point=True),
Register(name='f12', size=8, floating_point=True),
Register(name='f13', size=8, floating_point=True),
Register(name='f14', size=8, floating_point=True),
Register(name='f15', size=8, floating_point=True),
Register(name='f16', size=8, floating_point=True),
Register(name='f17', size=8, floating_point=True),
Register(name='f18', size=8, floating_point=True),
Register(name='f19', size=8, floating_point=True),
Register(name='f20', size=8, floating_point=True),
Register(name='f21', size=8, floating_point=True),
Register(name='f22', size=8, floating_point=True),
Register(name='f23', size=8, floating_point=True),
Register(name='f24', size=8, floating_point=True),
Register(name='f25', size=8, floating_point=True),
Register(name='f26', size=8, floating_point=True),
Register(name='f27', size=8, floating_point=True),
Register(name='f28', size=8, floating_point=True),
Register(name='f29', size=8, floating_point=True),
Register(name='f30', size=8, floating_point=True),
Register(name='f31', size=8, floating_point=True),
Register(name='fir', size=4, floating_point=True),
Register(name='fccr', size=4, floating_point=True),
Register(name='fexr', size=4, floating_point=True),
Register(name='fenr', size=4, floating_point=True),
Register(name='fcsr', size=4, floating_point=True),
Register(name='ulr', size=4),
Register(name='emnote', size=4),
Register(name='cmstart', size=4),
Register(name='cmlen', size=4),
Register(name='nraddr', size=4),
Register(name='cond', size=4),
Register(name='dspcontrol', size=4),
Register(name='ac0', size=8),
Register(name='ac1', size=8),
Register(name='ac2', size=8),
Register(name='ac3', size=8),
Register(name='cp0_status', size=4),
Register(name='ip_at_syscall', size=4),
]
# see https://github.com/radare/radare/blob/master/src/include/elf/mips.h
dynamic_tag_translation = {
0x70000001: 'DT_MIPS_RLD_VERSION',
0x70000002: 'DT_MIPS_TIME_STAMP',
0x70000003: 'DT_MIPS_ICHECKSUM',
0x70000004: 'DT_MIPS_IVERSION',
0x70000005: 'DT_MIPS_FLAGS',
0x70000006: 'DT_MIPS_BASE_ADDRESS',
0x70000007: 'DT_MIPS_MSYM',
0x70000008: 'DT_MIPS_CONFLICT',
0x70000009: 'DT_MIPS_LIBLIST',
0x7000000a: 'DT_MIPS_LOCAL_GOTNO',
0x7000000b: 'DT_MIPS_CONFLICTNO',
0x70000010: 'DT_MIPS_LIBLISTNO',
0x70000011: 'DT_MIPS_SYMTABNO',
0x70000012: 'DT_MIPS_UNREFEXTNO',
0x70000013: 'DT_MIPS_GOTSYM',
0x70000014: 'DT_MIPS_HIPAGENO',
0x70000016: 'DT_MIPS_RLD_MAP',
0x70000017: 'DT_MIPS_DELTA_CLASS',
0x70000018: 'DT_MIPS_DELTA_CLASS_NO',
0x70000019: 'DT_MIPS_DELTA_INSTANCE',
0x7000001a: 'DT_MIPS_DELTA_INSTANCE_NO',
0x7000001b: 'DT_MIPS_DELTA_RELOC',
0x7000001c: 'DT_MIPS_DELTA_RELOC_NO',
0x7000001d: 'DT_MIPS_DELTA_SYM',
0x7000001e: 'DT_MIPS_DELTA_SYM_NO',
0x70000020: 'DT_MIPS_DELTA_CLASSSYM',
0x70000021: 'DT_MIPS_DELTA_CLASSSYM_NO',
0x70000022: 'DT_MIPS_CXX_FLAGS',
0x70000023: 'DT_MIPS_PIXIE_INIT',
0x70000024: 'DT_MIPS_SYMBOL_LIB',
0x70000025: 'DT_MIPS_LOCALPAGE_GOTIDX',
0x70000026: 'DT_MIPS_LOCAL_GOTIDX',
0x70000027: 'DT_MIPS_HIDDEN_GOTIDX',
0x70000028: 'DT_MIPS_PROTECTED_GOTIDX',
0x70000029: 'DT_MIPS_OPTIONS',
0x7000002a: 'DT_MIPS_INTERFACE',
0x7000002b: 'DT_MIPS_DYNSTR_ALIGN',
0x7000002c: 'DT_MIPS_INTERFACE_SIZE',
0x7000002d: 'DT_MIPS_RLD_TEXT_RESOLVE_ADDR',
0x7000002e: 'DT_MIPS_PERF_SUFFIX',
0x7000002f: 'DT_MIPS_COMPACT_SIZE',
0x70000030: 'DT_MIPS_GP_VALUE',
0x70000031: 'DT_MIPS_AUX_DYNAMIC',
0x70000032: 'DT_MIPS_PLTGOT'
}
got_section_name = '.got'
ld_linux_name = 'ld.so.1'
elf_tls = TLSArchInfo(1, 8, [], [0], [], 0x7000, 0x8000)
register_arch([r'mipsel|mipsle'], 32, Endness.LE , ArchMIPS32)
register_arch([r'.*mips.*'], 32, 'any' , ArchMIPS32)
|
the-stack_0_27434
|
# -*- coding: utf-8 -*-
from alphaware.base import (Factor,
FactorContainer)
from alphaware.enums import (FactorType,
FactorNormType,
NAStrategy)
from alphaware.utils import (fwd_return,
load_factor_data_from_csv)
from alphaware.analyzer import FactorIC
from alphaware.preprocess import (FactorNeutralizer,
FactorStandardizer,
FactorWinsorizer,
FactorImputer)
# 加载MV和PB数据
data_pb = load_factor_data_from_csv('pb.csv')
data_mv = load_factor_data_from_csv('mv.csv') / 100000000
# 创建Factor实例,储存数据以及相关参数
factor_pb = Factor(data=data_pb,
name='PB',
property_dict={'type': FactorType.ALPHA_FACTOR, 'norm_type': FactorNormType.Industry_Neutral})
factor_mv = Factor(data=data_mv,
name='MV',
property_dict={'type': FactorType.ALPHA_FACTOR_MV, 'norm_type': FactorNormType.Industry_Neutral})
# 加载月度收益数据
data_return = load_factor_data_from_csv('return.csv')
# 将数据改成未来1月收益
data_return = fwd_return(data_return)
factor_return = Factor(data=data_return, name='1_Fwd_Return', property_dict={'type': FactorType.FWD_RETURN})
# 加载行业数据(早年的wind的行业代码不太全,可能用其他数据源的数据更好,此处仅做示例用)
data_industry_code = load_factor_data_from_csv('sw.csv')
factor_industry_code = Factor(data=data_industry_code,
name='industry_code',
property_dict={'type': FactorType.INDUSTY_CODE})
# 创建FactorContainer实例,加载所有的因子信息
fc = FactorContainer(start_date='2014-01-01',
end_date='2014-03-01',
factors=[factor_mv, factor_pb, factor_return, factor_industry_code])
# 第一步,处理极个别N/A, 有中位数替换
fc = FactorImputer(numerical_strategy=NAStrategy.MEDIAN,
categorical_strategy=NAStrategy.CUSTOM,
custom_value='other',
out_container=True).fit_transform(fc)
# 第二部,去极值化
fc = FactorWinsorizer(quantile_range=(5, 95),
out_container=True).fit_transform(fc)
# 第三步,标准化
fc = FactorStandardizer(out_container=True).fit_transform(fc)
# 第四步,中性化
fc = FactorNeutralizer(out_container=True).fit_transform(fc)
# 第五步,求因子IC
ic = FactorIC().predict(fc)
print (ic)
|
the-stack_0_27436
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 12:31:05 2020
@author: grosati
"""
import pandas as pd
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
#%%
type_proc = 'necop_pos_neg'
type_stopw = ''
type_os = ''
#%%
file_path = r'./data/proc/data_' + type_proc + '_proc' + type_os + type_stopw + '.csv'
#%%
data = pd.read_csv(file_path,lineterminator='\n')
data.dropna(subset=['texto_ed'], inplace=True)
#%% test & train data unbalanced
train_data = data.sample(frac=.75, random_state=2247)
test_data = data.drop(train_data.index)
train_data.to_csv('./data/proc/train_' + type_proc + type_os + '_' + type_stopw + '.csv')
test_data.to_csv('./data/proc/test_' + type_proc + type_os + '_' + type_stopw + '.csv')
#%% index under & oversampled
rus = RandomUnderSampler(random_state=9797)
rus.fit_resample(train_data[['texto']], train_data['etiqueta_final'])
ros = RandomOverSampler(random_state=1244)
ros.fit_resample(train_data[['texto']], train_data['etiqueta_final'])
train_data_us = train_data.iloc[rus.sample_indices_,]
train_data_os = train_data.iloc[ros.sample_indices_,]
#%% save data
train_data_us.to_csv('./data/proc/train_data_us_nostop.csv')
train_data_os.to_csv('./data/proc/train_data_os_nostop.csv')
|
the-stack_0_27437
|
class Solution:
def searchInsert(self, nums, target):
length = len(nums)
if length == 1:
if nums[0]<target:
return length
else:
return length - 1 #Return 0
for i in range(length):
if not nums[i]< target:
break
if i == length - 1:
if nums[i] == target:
return i
if target > nums[i]:
return i + 1
return i
sol = Solution()
print(sol.searchInsert([1,3], 2))
|
the-stack_0_27439
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 09:57:43 2019
@author: amandaash
"""
import numpy as np
import matplotlib.pyplot as plt
p = 2
v = 1
x = 0
m = 10
time_step = 0.0001
k = 3
t0 = 0
tf = 10
"""
x_val = []
v_val = []
time_array = np.arange(t0,tf, time_step)
for n in time_array:
v1 = v + (time_step/m)*(-k*x**(p-1))
x1 = x + time_step*v
x_val.append(x1)
v_val.append(v1)
v1 = v
x1 = x
plt.plot(time_array, x_val)
plt.show()
plt.plot(time_array, v_val)
plt.show()
"""
def harmonic_oscillator(p,k,v0,x0,m,time_step,t0,tf):
v = v0
x = x0
x_val = []
v_val = []
time_array = np.arange(t0,tf, time_step)
for n in time_array:
vf = v + (time_step/m)*(-k*x**(p-1))
xf = x + time_step*v
x_val.append(xf)
v_val.append(vf)
x = xf
v = vf
return x_val, v_val, time_array
#P_val = np.arange(2,8,2)
P_val = np.array([2,6,10])
fig1 = plt.figure()
ax1 = fig1.add_subplot(1, 1, 1)
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
for P_value in P_val:
x_P, v_P, t_P= harmonic_oscillator(P_value, 10, 0, 1, 1, 0.0001, 0, 10)
ax1.plot(x_P, t_P, label = "P = {0}".format(P_value))
ax2.plot(v_P, t_P, label = "P = {0}".format(P_value))
ax1.set_xlabel('distance')
ax1.set_ylabel('time')
ax1.legend()
fig1.savefig('spring_dt.pdf')
fig1.show()
ax2.set_xlabel('velocity')
ax2.set_ylabel('time')
ax2.legend()
fig2.savefig('spring_vt.pdf')
fig2.show()
#amplitude - frequency things:
fig3 = plt.figure()
ax3 = fig3.add_subplot(1,1,1)
x_ic = np.arange(0.5,2.0,0.5)
for amplitude in x_ic:
x_a, v_a, t_a = harmonic_oscillator(6, 10, 0, amplitude, 1, 0.0001, 0, 10)
ax3.plot(x_a, t_a, label = '$x_0$ = {0}'.format(amplitude))
ax3.set_title('P = 6, non-harmonic oscillator varying $x_0$')
ax3.set_xlabel('x')
ax3.set_ylabel('t')
ax3.legend()
fig3.savefig('non_harmonic_amplitude.pdf')
fig3.show()
#Going between the RK2 method and the Euler method from exercise 19, I see no
#difference between the two methods in either the position vs. time or velocity vs. time for the oscillator.
|
the-stack_0_27440
|
#MIT License
#Copyright (c) 2021 SUBIN
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from pyrogram.errors import MessageNotModified
from pyrogram import Client, emoji
from utils import mp, playlist
from config import Config
HELP = """
<b>
Use /play <song name> or use /play as a reply to an audio file or youtube link.
Use /yplay to play all the songs of a youtube playlist.
You can also use <code>/splay song name</code> to play a song from Jio Saavn or <code>/splay -a album name</code> to play all the songs from a jiosaavn album or /cplay <channel username or channel id> to play music from a telegram channel.</b>
**Common Commands**:
**/play** Reply to an audio file or YouTube link to play it or use /play <song name>.
**/splay** Play music from Jio Saavn, Use /splay <song name> or <code>/splay -a album name</code> to play all the songs from that album.
**/player** Show current playing song.
**/upload** Uploads current playing song as audio file.
**/help** Show help for commands
**/playlist** Shows the playlist.
**Admin Commands**:
**/skip** [n] ... Skip current or n where n >= 2.
**/cplay** Play music from a channel's music files.
**/yplay** Play music from a youtube playlist.
**/join** Join voice chat.
**/leave** Leave current voice chat
**/shuffle** Shuffle Playlist.
**/vc** Check which VC is joined.
**/stop** Stop playing.
**/radio** Start Radio.
**/stopradio** Stops Radio Stream.
**/clearplaylist** Clear the playlist.
**/export** Export current playlist for future use.
**/import** Import a previously exported playlist.
**/replay** Play from the beginning.
**/clean** Remove unused RAW PCM files.
**/pause** Pause playing.
**/resume** Resume playing.
**/volume** Change volume(0-200).
**/chup** Mute in VC.
**/unmute** Unmute in VC.
**/restart** Update and restarts the Bot.
"""
@Client.on_callback_query()
async def cb_handler(client: Client, query: CallbackQuery):
admins = await mp.get_admins(Config.CHAT)
if query.from_user.id not in admins and query.data != "help":
await query.answer(
"😒 Played Joji.mp3",
show_alert=True
)
return
else:
await query.answer()
if query.data == "replay":
group_call = mp.group_call
if not playlist:
return
group_call.restart_playout()
if not playlist:
pl = f"{emoji.NO_ENTRY} Empty Playlist"
else:
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
try:
await query.edit_message_text(
f"{pl}",
parse_mode="Markdown",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
except MessageNotModified:
pass
elif query.data == "pause":
if not playlist:
return
else:
mp.group_call.pause_playout()
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
try:
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Paused\n\n{pl},",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="resume"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
except MessageNotModified:
pass
elif query.data == "resume":
if not playlist:
return
else:
mp.group_call.resume_playout()
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
try:
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Resumed\n\n{pl}",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
except MessageNotModified:
pass
elif query.data=="skip":
if not playlist:
return
else:
await mp.skip_current_playing()
if len(playlist)>=25:
tplaylist=playlist[:25]
pl=f"Listing first 25 songs of total {len(playlist)} songs.\n"
pl += f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(tplaylist)
])
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}\n"
for i, x in enumerate(playlist)
])
try:
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Skipped\n\n{pl}",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
except MessageNotModified:
pass
elif query.data=="help":
buttons = [
[
InlineKeyboardButton('⚙️ My Master', url='https://t.me/toxic_rahul'),
InlineKeyboardButton('🧩 Source', url='https://t.me/toxicrahul_musicbot'),
]
]
reply_markup = InlineKeyboardMarkup(buttons)
try:
await query.edit_message_text(
HELP,
reply_markup=reply_markup
)
except MessageNotModified:
pass
|
the-stack_0_27441
|
#!/usr/bin/env python3
"""An HTML-based URL shortener for static sites."""
import argparse
import configparser
import os
import random
import re
import string
import sys
def duplicate_check(checkstring, site_directory):
"""Given a site directory and a test string, check to see if we've already created
a .html file containing this test string"""
for root, dir, files in os.walk(site_directory): # pylint: disable=unused-variable,redefined-builtin
for name in files:
file_extension = os.path.splitext(name)
if file_extension[1] == ".html":
try:
to_check = open(site_directory + '/' + name, 'r')
except OSError as exception:
print(exception)
print("Failed to open site file for duplicate checking.")
sys.exit(1)
if re.search(checkstring, to_check.read()):
print("This target already exists as slug " + file_extension[0])
sys.exit(0)
to_check.close()
# Create our parser object, define the URL params we take, and parse them
parser = argparse.ArgumentParser(description="A Github Pages based URL shortener.")
parser.add_argument("url", help="The target URL.")
parser.add_argument("--slug", help="Define the slug manually")
args = parser.parse_args()
# This generaes a minimally viable HTML that performs the browser redirection.
HTML_RESULT = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta http-equiv="Refresh" content="0; url='{target}'" />
<title>Redirecting...</title>
<link rel="icon" type="image/png" href="data:image/png;base64,">
</head>
</html>""".format(target=args.url)
# We assume that if this string occurs in any HTML file within the target directory, this means
# we already have a slug for the given target URL.
DUP_CHECK = """<meta http-equiv="Refresh" content="0; url='{target}'" />""".format(target=args.url)
# Attempt to load in the config file, gracefully handing the cases
# where we can't for whatever reason.
config = configparser.ConfigParser()
try:
config.read('duck.ini')
except configparser.Error as exception:
print(exception)
print("Error parsing the config file.")
sys.exit(1)
site_dir = config.get('config', 'sitedir')
# If the user provides a custom slug, use that. Otherwise, auto-generate
# a slug.
SLUG = ''
if args.slug:
SLUG = args.slug
else:
slug_length = config.get('options', 'sluglength')
for length in range(int(slug_length)):
SLUG = SLUG + random.choice(string.ascii_letters)
# Do some sanity checking
# Ensure the site directory exists
if not os.path.isdir(site_dir):
print("Configured site directory doesn't exist.")
sys.exit(1)
# Ensure the slug doesn't exist already
if os.path.isfile(site_dir + '/' + SLUG + '.html'):
if args.slug:
print("Slug already exists.")
sys.exit(1)
else:
print("Slug already exists, please re-run to regenerate the slug.")
sys.exit(1)
# Check for duplicates
duplicate_check(DUP_CHECK, site_dir)
# Write out our Refresh file
try:
writefile = open(site_dir + '/' + SLUG + '.html', 'x')
writefile.write(HTML_RESULT)
writefile.close()
except OSError as exception:
print(exception)
print("Error saving the site file.")
sys.exit(1)
# Let the user know we succeeded and give them the slug
print("Success! Slug is " + SLUG)
|
the-stack_0_27443
|
from nayuki_qrcodegen.qrcodegen import *
import os
def binary_to_bitmap(data, write_svg=False):
"""
Returns a QR Code bitmap for the binary data with the following properties:
- Pixels are referenced by bitmap[y][x] with top left corner (x=0, y=0)
- Pixel values are False = white, True = black
"""
qr_code = QrCode.encode_binary(data, QrCode.Ecc.LOW)
if write_svg:
qr_code_svg = open('qr-code.svg', 'w')
qr_code_svg.write(qr_code.to_svg_str(1))
qr_code_svg.close()
return qr_code._modules
def string_to_bitmap(string, write_svg=False):
"""
Returns a QR Code bitmap for the ASCII string with the following properties:
- Pixels are referenced by bitmap[y][x] with top left corner (x=0, y=0)
- Pixel values are False = white, True = black
"""
return binary_to_bitmap(string.encode('ascii'), write_svg)
def bitmap_to_repr(bitmap, black='[]', white=' ', eol='\n'):
"""
Returns a string representation of a QR Code bitmap.
"""
string = ''
for col in bitmap:
for val in col:
if val:
string += black
else:
string += white
string += eol
return string
def otp_bitmap(size=2953, write_svg=False):
"""
Returns a tuple including a cryptographically secure one time pad byte array
and its QR Code bitmap.
"""
otp = os.urandom(size)
return (otp, binary_to_bitmap(otp, write_svg))
def string_and_otp_bitmap(string, sepchar, size, write_svg=False):
"""
Returns a tuple including a byte array containing first a string, separating character,
and a cryptographically secure one time pad and second its QR Code bitmap.
"""
otp = os.urandom(size)
return (otp, binary_to_bitmap(bytearray().join([(string + sepchar).encode('ascii'), otp]), write_svg))
'''
def write_bitmap_to_canvas(bitmap, w):
"""
Writes a bitmap to a Tkinter canvas for scanning purposes
"""
<<<<<<< HEAD
bitmap_size = len(bitmap)
w_width = w.info_width()
w_height = w.info_height()
=======
bitmap_size = len(bitmap)
w_width = w.winfo_width()
w_height = w.winfo_height()
w.create_rectangle(0, 0, w_width, w_height, fill="white", outline="")
>>>>>>> 3561c90d455611358774135ec50a8435e5820d82
x_offset = 0
y_offset = 0
w_size = 0
if w_width < w_height:
w_size = w_width
<<<<<<< HEAD
y_offset = (w_height - w_width) / 2
else:
w_size = w_height
x_offset = (w_width - w_height) / 2
rect_size = w_size / bitmap_size
for y in range(bitmap_size):
for x in range(bitmap_size):
if bitmap[y][x]:
canvas.create_rectangle(x * rect_size + x_offset, y * rect_size + y_offset, (x + 1) * rect_size + x_offset, (y + 1) * rect_size + y_offset, fill="black", outline="")
else:
canvas.create_rectangle(x * rect_size + x_offset, y * rect_size + y_offset, (x + 1) * rect_size + x_offset, (y + 1) * rect_size + y_offset, fill="white", outline="")
=======
y_offset = (w_height - w_width) // 2
else:
w_size = w_height
x_offset = (w_width - w_height) // 2
rect_size = w_size // bitmap_size
for y in range(bitmap_size):
for x in range(bitmap_size):
if bitmap[y][x]:
w.create_rectangle(x * rect_size + x_offset, y * rect_size + y_offset,
(x + 1) * rect_size + x_offset, (y + 1) * rect_size + y_offset,
fill="black", outline="")
else:
w.create_rectangle(x * rect_size + x_offset, y * rect_size + y_offset,
(x + 1) * rect_size + x_offset, (y + 1) * rect_size + y_offset,
fill="white", outline="")
w.after(20, write_bitmap_to_canvas)
>>>>>>> 3561c90d455611358774135ec50a8435e5820d82
'''
|
the-stack_0_27446
|
import argparse
import csv
from collections import defaultdict
import numpy as np
import sys
def main(args):
missinginfo_scores = defaultdict(list)
missinginfo_score_histogram = defaultdict(int)
model_scores_by_hit = {'bart': defaultdict(list), 'bart_misinfo': defaultdict(list), 'bart_misinfo_pplm': defaultdict(list), 'ref': defaultdict(list), 'gan': defaultdict(list)}
model_scores = {'bart': [], 'bart_misinfo': [], 'bart_misinfo_pplm': [], 'ref': [], 'gan': []}
worker_scores = defaultdict(list)
with open(args.mturk_batch1_results_file) as csv_results_file:
csv_reader = csv.DictReader(csv_results_file)
for row in csv_reader:
hit_id = row['HITId']
model = row['Input.model']
worker_id = row['WorkerId']
for i in range(3):
if row['Answer.missinginfo_%d.%d' % (i, i)] == 'true':
missinginfo_scores[hit_id].append(i)
missinginfo_score_histogram[i] += 1
if i == 2 or i == 1:
model_scores_by_hit[model][hit_id].append(1)
elif i == 0:
model_scores_by_hit[model][hit_id].append(0)
worker_scores[worker_id].append(i)
with open(args.mturk_batch2_results_file) as csv_results_file:
csv_reader = csv.DictReader(csv_results_file)
for row in csv_reader:
hit_id = row['HITId']
model = row['Input.model']
worker_id = row['WorkerId']
for i in range(2):
if row['Answer.missinginfo_%d.%d' % (i, i)] == 'true':
missinginfo_scores[hit_id].append(i)
missinginfo_score_histogram[i] += 1
if i == 1:
model_scores_by_hit[model][hit_id].append(1)
elif i == 0:
model_scores_by_hit[model][hit_id].append(0)
worker_scores[worker_id].append(i)
with open(args.mturk_batch3_results_file) as csv_results_file:
csv_reader = csv.DictReader(csv_results_file)
for row in csv_reader:
hit_id = row['HITId']
model = row['Input.model']
worker_id = row['WorkerId']
for i in range(2):
if row['Answer.missinginfo_%d.%d' % (i, i)] == 'true':
missinginfo_scores[hit_id].append(i)
missinginfo_score_histogram[i] += 1
if i == 1:
model_scores_by_hit[model][hit_id].append(1)
elif i == 0:
model_scores_by_hit[model][hit_id].append(0)
worker_scores[worker_id].append(i)
with open(args.mturk_batch4_results_file) as csv_results_file:
csv_reader = csv.DictReader(csv_results_file)
for row in csv_reader:
hit_id = row['HITId']
model = row['Input.model']
worker_id = row['WorkerId']
for i in range(2):
if row['Answer.missinginfo_%d.%d' % (i, i)] == 'true':
missinginfo_scores[hit_id].append(i)
missinginfo_score_histogram[i] += 1
if i == 1:
model_scores_by_hit[model][hit_id].append(1)
elif i == 0:
model_scores_by_hit[model][hit_id].append(0)
worker_scores[worker_id].append(i)
for model in model_scores_by_hit:
for hit_id in model_scores_by_hit[model]:
# print(model_scores_by_hit[model][hit_id])
if model_scores_by_hit[model][hit_id].count(1) > model_scores_by_hit[model][hit_id].count(0):
model_scores[model].append(1)
else:
model_scores[model].append(0)
# print('\n\n')
missinginfo_score_var = 0
missinginfo_agreement = 0
for hit_id in missinginfo_scores:
print(missinginfo_scores[hit_id])
missinginfo_score_var += np.var(missinginfo_scores[hit_id])
for i in range(2):
if missinginfo_scores[hit_id].count(i) > 1:
# if missinginfo_scores[hit_id].count(i) > 2 or (missinginfo_scores[hit_id].count(i) + missinginfo_scores[hit_id].count(i-1)) > 2 or (missinginfo_scores[hit_id].count(i) + missinginfo_scores[hit_id].count(i+1)) > 2:
missinginfo_agreement += 1
break
# print('missinginfo score variance %.2f' % (missinginfo_score_var*1./len(missinginfo_scores)))
# print('missinginfo agreement: %d out of %d' % (missinginfo_agreement, len(missinginfo_scores)))
for model in model_scores:
print('%s: %.2f' % (model, sum(model_scores[model])*1.0/len(model_scores[model])))
# print('missinginfo score histogram')
# print(missinginfo_score_histogram)
# for worker_id in worker_scores:
# print(worker_id)
# print(worker_scores[worker_id])
# print(worker_scores[worker_id].count(2))
if __name__ == "__main__":
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument("--mturk_batch1_results_file", type = str)
argparser.add_argument("--mturk_batch2_results_file", type = str)
argparser.add_argument("--mturk_batch3_results_file", type = str)
argparser.add_argument("--mturk_batch4_results_file", type = str)
args = argparser.parse_args()
print(args)
main(args)
|
the-stack_0_27447
|
"""FitFunction.py
Data definitions and examples of functional forms to be used in fitting.
Definitions:
fit_function:
Function that given an x value and a list of parameters and constants,
returns a y value, which is the fit.
A fit_function has the form:
f(x, params, const_list, const_dict) -> y
The FitFunction object defined below satisfies this definition
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from constants import FF_NAME_PREF, FF_NAME_SEP, FF_NAME_SUFF
from constants import FF_CODE_PREF, FF_CODE_SEP, FF_CODE_SUFF
class FitFunction:
"""A 'function' that is designed for use in fitting, particularly with
the meta-fit algorithm.
"""
def __init__(
self, func, num_fit_params, name=None, code='',
force_zero=None, force_zero_func=None,
force_k=None, force_k_func=None
):
"""Initializes a FitFunction
:param func: defines the functional form of the fit. This should
satisfy the definition of a fit_function (see top of file)
:param num_fit_params: number of fit parameters
:param name: name of the fit func
:param code: abbreviated name for use in file names
:param force_zero: if not None, the fit will be forced to zero for
this value of x.
The functional form becomes
f'(x) = f(x) - f(x0)
:param force_zero_func: if not None (and force_zero is None), this
func, f(const_dict) -> x0, is applied to the const_dict to
determine x0, the x value for which the fit should be 0
The functional form becomes
f'(x) = f(x) - f(x0)
:param force_k: if not None (and force_zero and force_zero_func are
None), this 2-tuple (x0, k) defines a point that the fit should be
forced through.
The functional form becomes
f'(x) = f(x) - f(x0) + k
:param force_k_func: if not None (and force_zero and force_zero_func
and force_k are None), this func, f(const_dict) -> (x0, k),
defines a point that the fit should be forced through.
The functional form becomes
f'(x) = f(x) - f(x0) + k
"""
self.fn = func
self.num_fit_params = num_fit_params
self.fz = force_zero
self.fzfn = force_zero_func
self.fk = force_k
self.fkfn = force_k_func
self.name = name
self.code = code
self._set_name()
self._set_code()
self.__name__ = self.name
def __call__(self, x, params, const_list, const_dict):
if self.fz is not None:
f = self.fn
return (
f(x, params, const_list, const_dict) -
f(self.fz, params, const_list, const_dict)
)
elif self.fzfn is not None:
f = self.fn
x0 = self.fzfn(const_dict)
return (
f(x, params, const_list, const_dict) -
f(x0, params, const_list, const_dict)
)
elif self.fk is not None:
f = self.fn
x0, k = self.fk
return (
f(x, params, const_list, const_dict) -
f(x0, params, const_list, const_dict) +
k
)
elif self.fkfn is not None:
f = self.fn
x0, k = self.fkfn(const_dict)
return (
f(x, params, const_list, const_dict) -
f(x0, params, const_list, const_dict) +
k
)
else:
return self.fn(x, params, const_list, const_dict)
def _set_name(self):
if self.name is None:
self.name = self.fn.__name__
if self.fz is not None:
self.name += b' force zero {}'.format(self.fz)
elif self.fzfn is not None:
self.name += b' force zero with {}'.format(self.fzfn.__name__)
elif self.fk is not None:
self.name += b' force point {}'.format(self.fk)
elif self.fkfn is not None:
self.name += b' force point with {}'.format(self.fkfn.__name__)
def _set_code(self):
if self.code is '':
self.code = self.name if self.name is not None else self.fn.__name__
if self.fz is not None:
self.code += b'fz{}'.format(self.fz)
elif self.fzfn is not None:
self.code += b'fz:' + str(self.fzfn.__name__[6:])
elif self.fk is not None:
self.code += b'fk{}'.format(self.fk)
elif self.fkfn is not None:
self.code += b'fk:' + str(self.fkfn.__name__[6:])
def combine_ffns(
list_of_ffn, force_zero=None,
_name_pref=FF_NAME_PREF,
_name_sep=FF_NAME_SEP,
_name_suff=FF_NAME_SUFF,
_code_pref=FF_CODE_PREF,
_code_sep=FF_CODE_SEP,
_code_suff=FF_CODE_SUFF,
**kwargs
):
"""Linearly combines multiple fit functions (and/or dependencies)
into one fit function.
:param list_of_ffn: A list of FitFunctions to combine into one
:param force_zero: (optional) force the zero of the overall result to a
specified point
:param _name_pref: prefix for the name of the combined fit function
:param _name_sep: separator to go between fit functions
:return: A combined fit function object, which may be used to optimize with
respect to all of the degrees of freedom of its sub-functions
:param _name_suff: suffix for the name of the combined fit function
:param _code_pref: prefix for the code associated with the combined fit
function
:param _code_sep: separator to go between fit function codes
:param _code_suff: suffix for the code of the combined fit function
"""
params_lengths = [ffn.num_fit_params for ffn in list_of_ffn]
params_breaks = [0]
for pl, i in zip(params_lengths, range(len(params_lengths))):
params_breaks.append(pl + params_breaks[i])
total_params_length = params_breaks[-1]
combined_name = _name_pref
combined_code = _code_pref
for ffn in list_of_ffn:
combined_name += ffn.name + _name_sep
combined_code += ffn.code + _code_sep
combined_name = combined_name[:combined_name.rfind(_name_sep)] + _name_suff
combined_code = combined_code[:combined_code.rfind(_code_sep)] + _code_suff
def combined_ffns(x, params, const_list, const_dict):
result = 0
for fitfn, ii, jj in zip(list_of_ffn, params_breaks, params_breaks[1:]):
result += fitfn(x, params[ii:jj], const_list, const_dict)
return result
return FitFunction(
func=combined_ffns, num_fit_params=total_params_length,
force_zero=force_zero, name=combined_name, code=combined_code, **kwargs
)
# INDEPENDENT
def scalar():
"""Returns a scalar fit function
y(x) = const
"""
# noinspection PyUnusedLocal
def sf(x, params, const_list, const_dict):
a = params[0]
return a
return FitFunction(func=sf, num_fit_params=1, name='scalar', code='s')
def x1(force_zero=None, **kwargs):
"""Returns a fit function of the form
y(x) = a0 * x,
where a0 is the fit parameter
"""
# noinspection PyUnusedLocal
def x1f(x, params, const_list, const_dict):
a = params[0]
return a * x
return FitFunction(func=x1f, num_fit_params=1, force_zero=force_zero,
name='x^1', code='x1', **kwargs)
def linear(force_zero=None, **kwargs):
"""Returns a fit function of the form
y(x) = a0 + a1 * x,
where a0 and a1 are fit parameters
"""
if force_zero is None and len(kwargs) == 0:
# noinspection PyUnusedLocal
def lf(x, params, const_list, const_dict):
a, b = params[0:2]
return a * x + b
return FitFunction(
func=lf, num_fit_params=2, force_zero=force_zero,
name='linear', code='p1', **kwargs
)
else:
# noinspection PyUnusedLocal
def lf(x, params, const_list, const_dict):
a = params[0]
return a * x
return FitFunction(
func=lf, num_fit_params=1, force_zero=force_zero,
name='linear', code='p1', **kwargs
)
def x2(force_zero=None, **kwargs):
"""Returns a fit function of the form
y(x) = a0 * x^2
"""
# noinspection PyUnusedLocal
def x2f(x, params, const_list, const_dict):
a = params[0]
return a * x ** 2
return FitFunction(func=x2f, num_fit_params=1, force_zero=force_zero,
name='x^2', code='x2', **kwargs)
def quadratic(force_zero=None, **kwargs):
"""Returns a fit function of the form
y(x) = a0 + a1 * x + a2 * x^2
"""
if force_zero is None and len(kwargs) == 0:
# noinspection PyUnusedLocal
def qf(x, params, const_list, const_dict):
a, b, c = params[0:3]
return np.polyval([a, b, c], x)
return FitFunction(
func=qf, num_fit_params=3, force_zero=force_zero,
name='quadratic', code='p2', **kwargs
)
else:
# noinspection PyUnusedLocal
def qf(x, params, const_list, const_dict):
a, b = params[0:2]
return np.polyval([a, b, 0], x)
return FitFunction(
func=qf, num_fit_params=2, force_zero=force_zero,
name='quadratic', code='p2', **kwargs
)
def x_power(n, force_zero=None, **kwargs):
"""Returns a fit function of the form
y(x) = x^n
"""
# noinspection PyUnusedLocal
def xnf(x, params, const_list, const_dict):
a = params[0]
return a * x ** n
return FitFunction(
func=xnf, num_fit_params=1, force_zero=force_zero,
name='x^{}'.format(n), code='x{}'.format(n), **kwargs
)
def poly(n, force_zero=None, **kwargs):
"""Returns a fit function that is a polynomial of degree n
y(x) = a0 + a1 * x + a2 * x^2 + ... + an * x^n
"""
if force_zero is None and len(kwargs) == 0:
# noinspection PyUnusedLocal
def pf(x, params, const_list, const_dict):
return np.polyval(params, x)
return FitFunction(
func=pf, num_fit_params=n + 1, force_zero=force_zero,
name='poly{}'.format(n), code='p{}'.format(n), **kwargs
)
else:
# noinspection PyUnusedLocal
def pf(x, params, const_list, const_dict):
return np.polyval(np.concatenate((params, np.zeros(1))), x)
return FitFunction(
func=pf, num_fit_params=n, force_zero=force_zero,
name='poly{}'.format(n), code='p{}'.format(n), **kwargs
)
def asymptote(n, force_zero=None, **kwargs):
"""Returns a fit function of the form
y(x) = - a0 / x^n
I do not remember why I bothered putting the minus sign in the functional
form, as that could have easily been absorbed into the constant, but
whatevs broseph
"""
# noinspection PyUnusedLocal
def af(x, params, const_list, const_dict):
a = params[0]
return - a / x ** n
return FitFunction(
func=af, num_fit_params=1, force_zero=force_zero,
name='asymptote{}'.format(n), code='a{}'.format(n), **kwargs
)
def asymptote_n(force_zero=None, **kwargs):
"""Returns a fit function of the form
y(x) = - a0 / x^(a1),
where a0 and a1 are the fit parameters
"""
# noinspection PyUnusedLocal
def anf(x, params, const_list, const_dict):
a, n = params[0:2]
return - a / x ** n
return FitFunction(
func=anf, num_fit_params=2,
force_zero=force_zero, name='asymptote_n', code='an', **kwargs
)
# DEPENDENTS
# Allow a dependence on a particular constant in the const_dict, as identified
# by keys in dep_keys
def scalar_dependence(dep_keys, ctfs=list()):
"""Returns a fit function that allows scalar dependence on the constants
associated with the given dep_keys
y(x) = a0 * b0 + a1 * b1 + a2 * b2 + ...,
where the a's are the fit parameters and the b's are the constants
associated with the dep_keys in the const_dict or the constants
constructed by the constant transform functions (ctfs)
"""
return _dependence(
f=lambda p, x: p[0], n_params=1, dep_keys=dep_keys,
ctfs=ctfs, name='scalar dependence', code='s:{}'
)
def x1_dependence(dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows x depdendence on the constants
associated with each of the dep_keys
y(x) = (a0 * b0 + a1 * b1 + a2 * b2 + ...) * x,
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
return _dependence(
f=lambda p, x: p[0] * x, n_params=1,
dep_keys=dep_keys, ctfs=ctfs,
force_zero=force_zero, name='x dependence', code='x1:{}', **kwargs
)
def linear_dependence(dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows linear depdendence on the constants
associated with each of the dep_keys
y(x) = (a0,0 * b0,0 + a1,0 * b1,0 + ...) +
(a0,1 * b0,1 + a1,1 * b1,1 + ...) * x,
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
if force_zero is None and len(kwargs) == 0:
return _dependence(
f=np.polyval, n_params=2,
dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='linear dependence', code='p1:{}', **kwargs
)
else:
return _dependence(
f=lambda p, x: np.polyval(np.concatenate((p, np.zeros(1))), x),
n_params=1, dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='linear dependence', code='p1:{}', **kwargs
)
def x2_dependence(dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows x^2 depdendence on the constants
associated with each of the dep_keys
y(x) = (a0 * b0 + a1 * b1 + ...) * x^2,
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
return _dependence(
f=lambda p, x: p[0] * x ** 2, n_params=1,
dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='x^2 dependence', code='x2:{}', **kwargs
)
def quadratic_dependence(dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows quadratic depdendence on
the constants associated with each of the dep_keys
y(x) = (a0,0 * b0,0 + a1,0 * b1,0 + ...) +
(a0,1 * b0,1 + a1,1 * b1,1 + ...) * x +
(a0,2 * b0,2 + a1,2 * b1,2 + ...) * x^2,
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
if force_zero is None and len(kwargs) == 0:
return _dependence(
f=np.polyval, n_params=3,
dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='quadratic dependence', code='p2:{}', **kwargs
)
else:
return _dependence(
f=lambda p, x: np.polyval(np.concatenate((p, np.zeros(1))), x),
n_params=2, dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='quadratic dependence', code='p2:{}', **kwargs
)
def x_power_dependence(n, dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows x^n depdendence on the constants
associated with each of the dep_keys
y(x) = (a0 * b0 + a1 * b1 + ...) * x^n
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
return _dependence(
f=lambda p, x: p[0] * x ** n, n_params=1,
dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='x^{} dependence'.format(n), code='x{}'.format(n)+':{}', **kwargs
)
def poly_dependence(n, dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows polynomial depdendence on the
constants associated with each of the dep_keys
y(x) = (a0,0 * b0,0 + a1,0 * b1,0 + ...) +
(a0,1 * b0,1 + a1,1 * b1,1 + ...) * x +
(a0,2 * b0,2 + a1,2 * b1,2 + ...) * x^2 +
(a0,2 * b0,2 + a1,2 * b1,2 + ...) * x^3 +
... +
(a0,2 * b0,2 + a1,2 * b1,2 + ...) * x^n,
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
if force_zero is None and len(kwargs) == 0:
return _dependence(
f=np.polyval, n_params=n + 1, dep_keys=dep_keys, ctfs=ctfs,
force_zero=force_zero, name='poly{n} dependence'.format(n=n),
code='p{}'.format(n) + ':{}', **kwargs
)
else:
return _dependence(
f=lambda p, x: np.polyval(np.concatenate((p, np.zeros(1))), x),
n_params=n, dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='poly{n} dependence'.format(n=n),
code='p{}'.format(n) + ':{}', **kwargs
)
def asymptotic_dependence(n, dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows asymptotic depdendence on the
constants associated with each of the dep_keys
y(x) = - (a0 * b0 + a1 * b1 + ...) / x^n,
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
return _dependence(
f=lambda p, x: - p[0] / x ** n, n_params=1,
dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='asymptotic{} dependence'.format(n), code='a{}'.format(n) + ':{}',
**kwargs
)
def _dependence(f, n_params, dep_keys, name, ctfs=list(), force_zero=None,
code='', **kwargs):
"""An abstract function to determine f-dependence on constants given by
dep_keys and ctfs
:param f: f(p, x) -> y, a function that maps an array of parameters and an
x value to a y value. Example: If one wants linear dependence on x,
f(p, x) = p[0] * x + p[1], would be the correct function to use
:param n_params: the number of parameters that f requires
:param dep_keys: the keys to use with a constants dictionary to determine
constants values. Example: If one wants dependence on the values of n and
j, dep_keys=['n', 'j']
:param name: the name of the dependence function
:param ctfs: (Optional) constants transform functions are functions of the
constants dictionary that return special combinations of the constants.
Example: If one wants dependence on j^2, one would add the following
function to ctfs: lambda cd: cd['j]^2
:param force_zero: (Optional) an x value at which to force the dependence
function to be 0
:return: The dependence fit function
"""
l1 = len(dep_keys) * n_params
l2 = len(ctfs) * n_params
# noinspection PyUnusedLocal
def d(x, params, const_list, const_dict):
more_constants = _do_transforms(ctfs, const_dict)
p = np.zeros(n_params)
dep_psubs = [params[i:i + n_params] for i in range(0, l1, n_params)]
ctf_psubs = [params[i:i + n_params] for i in
range(l1, l1 + l2, n_params)]
for dep in zip(dep_keys, *dep_psubs):
k, p0 = dep[0], dep[1:]
if k not in const_dict:
continue
else:
v = const_dict[k]
for j, p0j in zip(range(n_params), p0):
p[j] = p[j] + p0j * v
for ctf in zip(more_constants, *ctf_psubs):
c, p0 = ctf[0], ctf[1:]
for j, p0j in zip(range(n_params), p0):
p[j] = p[j] + p0j * c
return f(p, x)
dep_str = _dep_str(dep_keys, ctfs)
return FitFunction(
func=d, num_fit_params=(len(dep_keys) + len(ctfs)) * n_params,
force_zero=force_zero,
name=name + ' on {}'.format(dep_str), code=code.format(dep_str),
**kwargs
)
def _dep_str(dep_keys, ctfs):
return (b'(' +
b', '.join((dep_keys + list(map(lambda c: c.__name__, ctfs)))) +
b')')
# FITTERS WITH DEPENDENCIES
# The following are examples of using combine_ffns() to combine regular fit
# functions with dependencies on constants
def linear_with_linear_dependence(
dep_keys, ctfs=list(), force_zero=None, **kwargs):
return combine_ffns(
list_of_ffn=[linear(force_zero=force_zero),
linear_dependence(dep_keys, ctfs, force_zero=force_zero)],
force_zero=force_zero, **kwargs
)
def poly_with_linear_dependence(
n, dep_keys, ctfs=list(), force_zero=None, **kwargs):
return combine_ffns(
list_of_ffn=[poly(n), linear_dependence(dep_keys, ctfs)],
force_zero=force_zero, **kwargs
)
def asymptote_with_linear_dependence(
n, dep_keys, ctfs=list(), force_zero=None, **kwargs
):
return combine_ffns(
list_of_ffn=[asymptote(n), linear_dependence(dep_keys, ctfs)],
force_zero=force_zero, **kwargs
)
def asymptote_with_asymptotic_dependence(
n, dep_keys, ctfs=list(), force_zero=None, **kwargs
):
return combine_ffns(
list_of_ffn=[asymptote(n),
asymptotic_dependence(n, dep_keys, ctfs)],
force_zero=force_zero, **kwargs
)
# CONSTANT TRANSFORMS
# I think these are self-explanatory
def _do_transforms(ctfs, const_dict):
r = list()
for ctf in ctfs:
r.append(ctf(const_dict))
return r
def joff2(const_dict):
j = const_dict['j']
return (j - 1) * abs(j - 1)
def jjoff(const_dict):
j = const_dict['j']
return j * (j - 1)
def ephw(const_dict):
e = const_dict['e']
hw = const_dict['hw']
return e + hw
def y0pzbt0(const_dict):
y0 = const_dict['y0']
zbt0 = const_dict['zbt0']
return y0 + zbt0
# FORCE ZERO FUNCTIONS
def fz_to_x0(const_dict):
return const_dict['x0']
# FORCE K FUNCTIONS
def fk_to_y0(const_dict):
return const_dict['x0'], const_dict['y0']
def fk_to_zbt0(const_dict):
return const_dict['x0'], const_dict['zbt0']
|
the-stack_0_27448
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# metadata
'''
Load metadata (DocType) class
Example:
meta = frappe.get_meta('User')
if meta.has_field('first_name'):
print("DocType" table has field "first_name")
'''
from __future__ import unicode_literals, print_function
from six.moves import range
import frappe, json, os
from frappe.utils import cstr, cint
from frappe.model import default_fields, no_value_fields, optional_fields
from frappe.model.document import Document
from frappe.model.base_document import BaseDocument
from frappe.model.db_schema import type_map
from frappe.modules import load_doctype_module
from frappe import _
def get_meta(doctype, cached=True):
if cached:
if not frappe.local.meta_cache.get(doctype):
frappe.local.meta_cache[doctype] = frappe.cache().hget("meta", doctype,
lambda: Meta(doctype))
return frappe.local.meta_cache[doctype]
else:
return load_meta(doctype)
def load_meta(doctype):
return Meta(doctype)
def get_table_columns(doctype):
return frappe.cache().hget("table_columns", doctype,
lambda: frappe.db.get_table_columns(doctype))
def load_doctype_from_file(doctype):
fname = frappe.scrub(doctype)
with open(frappe.get_app_path("frappe", "core", "doctype", fname, fname + ".json"), "r") as f:
txt = json.loads(f.read())
for d in txt.get("fields", []):
d["doctype"] = "DocField"
for d in txt.get("permissions", []):
d["doctype"] = "DocPerm"
txt["fields"] = [BaseDocument(d) for d in txt["fields"]]
if "permissions" in txt:
txt["permissions"] = [BaseDocument(d) for d in txt["permissions"]]
return txt
class Meta(Document):
_metaclass = True
default_fields = list(default_fields)[1:]
special_doctypes = ("DocField", "DocPerm", "Role", "DocType", "Module Def")
def __init__(self, doctype):
self._fields = {}
if isinstance(doctype, Document):
super(Meta, self).__init__(doctype.as_dict())
else:
super(Meta, self).__init__("DocType", doctype)
self.process()
def load_from_db(self):
try:
super(Meta, self).load_from_db()
except frappe.DoesNotExistError:
if self.doctype=="DocType" and self.name in self.special_doctypes:
self.__dict__.update(load_doctype_from_file(self.name))
else:
raise
def get_link_fields(self):
return self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]})
def get_dynamic_link_fields(self):
if not hasattr(self, '_dynamic_link_fields'):
self._dynamic_link_fields = self.get("fields", {"fieldtype": "Dynamic Link"})
return self._dynamic_link_fields
def get_select_fields(self):
return self.get("fields", {"fieldtype": "Select", "options":["not in",
["[Select]", "Loading..."]]})
def get_image_fields(self):
return self.get("fields", {"fieldtype": "Attach Image"})
def get_set_only_once_fields(self):
'''Return fields with `set_only_once` set'''
if not hasattr(self, "_set_only_once_fields"):
self._set_only_once_fields = self.get("fields", {"set_only_once": 1})
return self._set_only_once_fields
def get_table_fields(self):
if not hasattr(self, "_table_fields"):
if self.name!="DocType":
self._table_fields = self.get('fields', {"fieldtype":"Table"})
else:
self._table_fields = doctype_table_fields
return self._table_fields
def get_global_search_fields(self):
'''Returns list of fields with `in_global_search` set and `name` if set'''
fields = self.get("fields", {"in_global_search": 1, "fieldtype": ["not in", no_value_fields]})
if getattr(self, 'show_name_in_global_search', None):
fields.append(frappe._dict(fieldtype='Data', fieldname='name', label='Name'))
return fields
def get_valid_columns(self):
if not hasattr(self, "_valid_columns"):
if self.name in ("DocType", "DocField", "DocPerm", "Property Setter"):
self._valid_columns = get_table_columns(self.name)
else:
self._valid_columns = self.default_fields + \
[df.fieldname for df in self.get("fields") if df.fieldtype in type_map]
return self._valid_columns
def get_table_field_doctype(self, fieldname):
return { "fields": "DocField", "permissions": "DocPerm"}.get(fieldname)
def get_field(self, fieldname):
'''Return docfield from meta'''
if not self._fields:
for f in self.get("fields"):
self._fields[f.fieldname] = f
return self._fields.get(fieldname)
def has_field(self, fieldname):
'''Returns True if fieldname exists'''
return True if self.get_field(fieldname) else False
def get_label(self, fieldname):
'''Get label of the given fieldname'''
df = self.get_field(fieldname)
if df:
label = df.label
else:
label = {
'name': _('ID'),
'owner': _('Created By'),
'modified_by': _('Modified By'),
'creation': _('Created On'),
'modified': _('Last Modified On')
}.get(fieldname) or _('No Label')
return label
def get_options(self, fieldname):
return self.get_field(fieldname).options
def get_link_doctype(self, fieldname):
df = self.get_field(fieldname)
if df.fieldtype == "Link":
return df.options
elif df.fieldtype == "Dynamic Link":
return self.get_options(df.options)
else:
return None
def get_search_fields(self):
search_fields = self.search_fields or "name"
search_fields = [d.strip() for d in search_fields.split(",")]
if "name" not in search_fields:
search_fields.append("name")
return search_fields
def get_fields_to_fetch(self, link_fieldname=None):
'''Returns a list of docfield objects for fields whose values
are to be fetched and updated for a particular link field
These fields are of type Data, Link, Text, Readonly and their
options property is set as `link_fieldname`.`source_fieldname`'''
out = []
if not link_fieldname:
link_fields = [df.fieldname for df in self.get_link_fields()]
for df in self.fields:
if df.fieldtype in ('Data', 'Read Only', 'Text', 'Small Text',
'Text Editor', 'Code') and df.options:
if link_fieldname:
if df.options.startswith(link_fieldname + '.'):
out.append(df)
else:
if '.' in df.options:
fieldname = df.options.split('.', 1)[0]
if fieldname in link_fields:
out.append(df)
return out
def get_list_fields(self):
list_fields = ["name"] + [d.fieldname \
for d in self.fields if (d.in_list_view and d.fieldtype in type_map)]
if self.title_field and self.title_field not in list_fields:
list_fields.append(self.title_field)
return list_fields
def get_custom_fields(self):
return [d for d in self.fields if d.get('is_custom_field')]
def get_title_field(self):
'''Return the title field of this doctype,
explict via `title_field`, or `title` or `name`'''
title_field = getattr(self, 'title_field', None)
if not title_field and self.has_field('title'):
title_field = 'title'
if not title_field:
title_field = 'name'
return title_field
def get_translatable_fields(self):
'''Return all fields that are translation enabled'''
return [d.fieldname for d in self.fields if d.translatable]
def is_translatable(self, fieldname):
'''Return true of false given a field'''
field = self.get_field(fieldname)
return field and field.translatable
def process(self):
# don't process for special doctypes
# prevent's circular dependency
if self.name in self.special_doctypes:
return
self.add_custom_fields()
self.apply_property_setters()
self.sort_fields()
self.get_valid_columns()
self.set_custom_permissions()
def add_custom_fields(self):
try:
self.extend("fields", frappe.db.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.name,), as_dict=1,
update={"is_custom_field": 1}))
except Exception as e:
if e.args[0]==1146:
return
else:
raise
def apply_property_setters(self):
property_setters = frappe.db.sql("""select * from `tabProperty Setter` where
doc_type=%s""", (self.name,), as_dict=1)
if not property_setters: return
integer_docfield_properties = [d.fieldname for d in frappe.get_meta('DocField').fields
if d.fieldtype in ('Int', 'Check')]
for ps in property_setters:
if ps.doctype_or_field=='DocType':
if ps.property_type in ('Int', 'Check'):
ps.value = cint(ps.value)
self.set(ps.property, ps.value)
else:
docfield = self.get("fields", {"fieldname":ps.field_name}, limit=1)
if docfield:
docfield = docfield[0]
else:
continue
if ps.property in integer_docfield_properties:
ps.value = cint(ps.value)
docfield.set(ps.property, ps.value)
def sort_fields(self):
"""sort on basis of insert_after"""
custom_fields = sorted(self.get_custom_fields(), key=lambda df: df.idx)
if custom_fields:
newlist = []
# if custom field is at top
# insert_after is false
for c in list(custom_fields):
if not c.insert_after:
newlist.append(c)
custom_fields.pop(custom_fields.index(c))
# standard fields
newlist += [df for df in self.get('fields') if not df.get('is_custom_field')]
newlist_fieldnames = [df.fieldname for df in newlist]
for i in range(2):
for df in list(custom_fields):
if df.insert_after in newlist_fieldnames:
cf = custom_fields.pop(custom_fields.index(df))
idx = newlist_fieldnames.index(df.insert_after)
newlist.insert(idx + 1, cf)
newlist_fieldnames.insert(idx + 1, cf.fieldname)
if not custom_fields:
break
# worst case, add remaining custom fields to last
if custom_fields:
newlist += custom_fields
# renum idx
for i, f in enumerate(newlist):
f.idx = i + 1
self.fields = newlist
def set_custom_permissions(self):
'''Reset `permissions` with Custom DocPerm if exists'''
if frappe.flags.in_patch or frappe.flags.in_import or frappe.flags.in_install:
return
if not self.istable and self.name not in ('DocType', 'DocField', 'DocPerm',
'Custom DocPerm'):
custom_perms = frappe.get_all('Custom DocPerm', fields='*',
filters=dict(parent=self.name), update=dict(doctype='Custom DocPerm'))
if custom_perms:
self.permissions = [Document(d) for d in custom_perms]
def get_fieldnames_with_value(self):
return [df.fieldname for df in self.fields if df.fieldtype not in no_value_fields]
def get_fields_to_check_permissions(self, user_permission_doctypes):
fields = self.get("fields", {
"fieldtype":"Link",
"parent": self.name,
"ignore_user_permissions":("!=", 1),
"options":("in", user_permission_doctypes)
})
if self.name in user_permission_doctypes:
fields.append(frappe._dict({
"label":"Name",
"fieldname":"name",
"options": self.name
}))
return fields
def get_high_permlevel_fields(self):
"""Build list of fields with high perm level and all the higher perm levels defined."""
if not hasattr(self, "high_permlevel_fields"):
self.high_permlevel_fields = []
for df in self.fields:
if df.permlevel > 0:
self.high_permlevel_fields.append(df)
return self.high_permlevel_fields
def get_dashboard_data(self):
'''Returns dashboard setup related to this doctype.
This method will return the `data` property in the
`[doctype]_dashboard.py` file in the doctype folder'''
data = frappe._dict()
try:
module = load_doctype_module(self.name, suffix='_dashboard')
if hasattr(module, 'get_data'):
data = frappe._dict(module.get_data())
except ImportError:
pass
return data
def get_row_template(self):
return self.get_web_template(suffix='_row')
def get_web_template(self, suffix=''):
'''Returns the relative path of the row template for this doctype'''
module_name = frappe.scrub(self.module)
doctype = frappe.scrub(self.name)
template_path = frappe.get_module_path(module_name, 'doctype',
doctype, 'templates', doctype + suffix + '.html')
if os.path.exists(template_path):
return '{module_name}/doctype/{doctype_name}/templates/{doctype_name}{suffix}.html'.format(
module_name = module_name, doctype_name = doctype, suffix=suffix)
return None
doctype_table_fields = [
frappe._dict({"fieldname": "fields", "options": "DocField"}),
frappe._dict({"fieldname": "permissions", "options": "DocPerm"})
]
#######
def is_single(doctype):
try:
return frappe.db.get_value("DocType", doctype, "issingle")
except IndexError:
raise Exception('Cannot determine whether %s is single' % doctype)
def get_parent_dt(dt):
parent_dt = frappe.db.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s and (parent not like "old_parent:%%")
limit 1""", dt)
return parent_dt and parent_dt[0][0] or ''
def set_fieldname(field_id, fieldname):
frappe.db.set_value('DocField', field_id, 'fieldname', fieldname)
def get_field_currency(df, doc=None):
"""get currency based on DocField options and fieldvalue in doc"""
currency = None
if not df.get("options"):
return None
if not doc:
return None
if not getattr(frappe.local, "field_currency", None):
frappe.local.field_currency = frappe._dict()
if not (frappe.local.field_currency.get((doc.doctype, doc.name), {}).get(df.fieldname) or
(doc.parent and frappe.local.field_currency.get((doc.doctype, doc.parent), {}).get(df.fieldname))):
ref_docname = doc.parent or doc.name
if ":" in cstr(df.get("options")):
split_opts = df.get("options").split(":")
if len(split_opts)==3:
currency = frappe.db.get_value(split_opts[0], doc.get(split_opts[1]), split_opts[2])
else:
currency = doc.get(df.get("options"))
if doc.parent:
if currency:
ref_docname = doc.name
else:
currency = frappe.db.get_value(doc.parenttype, doc.parent, df.get("options"))
if currency:
frappe.local.field_currency.setdefault((doc.doctype, ref_docname), frappe._dict())\
.setdefault(df.fieldname, currency)
return frappe.local.field_currency.get((doc.doctype, doc.name), {}).get(df.fieldname) or \
(doc.parent and frappe.local.field_currency.get((doc.doctype, doc.parent), {}).get(df.fieldname))
def get_field_precision(df, doc=None, currency=None):
"""get precision based on DocField options and fieldvalue in doc"""
from frappe.utils import get_number_format_info
if cint(df.precision):
precision = cint(df.precision)
elif df.fieldtype == "Currency":
precision = cint(frappe.db.get_default("currency_precision"))
if not precision:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
else:
precision = cint(frappe.db.get_default("float_precision")) or 3
return precision
def get_default_df(fieldname):
if fieldname in default_fields:
if fieldname in ("creation", "modified"):
return frappe._dict(
fieldname = fieldname,
fieldtype = "Datetime"
)
else:
return frappe._dict(
fieldname = fieldname,
fieldtype = "Data"
)
def trim_tables(doctype=None):
"""Use this to remove columns that don't exist in meta"""
ignore_fields = default_fields + optional_fields
filters={ "issingle": 0 }
if doctype:
filters["name"] = doctype
for doctype in frappe.db.get_all("DocType", filters=filters):
doctype = doctype.name
columns = frappe.db.get_table_columns(doctype)
fields = frappe.get_meta(doctype).get_fieldnames_with_value()
columns_to_remove = [f for f in list(set(columns) - set(fields)) if f not in ignore_fields
and not f.startswith("_")]
if columns_to_remove:
print(doctype, "columns removed:", columns_to_remove)
columns_to_remove = ", ".join(["drop `{0}`".format(c) for c in columns_to_remove])
query = """alter table `tab{doctype}` {columns}""".format(
doctype=doctype, columns=columns_to_remove)
frappe.db.sql_ddl(query)
def clear_cache(doctype=None):
cache = frappe.cache()
if getattr(frappe.local, 'meta_cache') and (doctype in frappe.local.meta_cache):
del frappe.local.meta_cache[doctype]
for key in ('is_table', 'doctype_modules'):
cache.delete_value(key)
groups = ["meta", "form_meta", "table_columns", "last_modified",
"linked_doctypes", 'email_alerts']
def clear_single(dt):
for name in groups:
cache.hdel(name, dt)
if doctype:
clear_single(doctype)
# clear all parent doctypes
for dt in frappe.db.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s""", (doctype,)):
clear_single(dt[0])
# clear all notifications
from frappe.desk.notifications import delete_notification_count_for
delete_notification_count_for(doctype)
else:
# clear all
for name in groups:
cache.delete_value(name)
|
the-stack_0_27449
|
# -*- coding: utf-8 -*-
"""Functions to make 3D plots with M/EEG data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
# Mark Wronkiewicz <[email protected]>
#
# License: Simplified BSD
from itertools import cycle
import os.path as op
import sys
import warnings
from collections.abc import Iterable
from functools import partial
import numpy as np
from ..defaults import DEFAULTS
from ..fixes import _crop_colorbar, _get_img_fdata, _get_args
from .._freesurfer import _read_mri_info
from ..io import _loc_to_coil_trans
from ..io.pick import pick_types, _picks_to_idx
from ..io.constants import FIFF
from ..io.meas_info import read_fiducials, create_info
from ..source_space import (_ensure_src, _create_surf_spacing, _check_spacing,
SourceSpaces, read_freesurfer_lut)
from ..surface import (get_meg_helmet_surf, _read_mri_surface, _DistanceQuery,
transform_surface_to, _project_onto_surface,
_reorder_ccw)
from ..transforms import (_find_trans, apply_trans, rot_to_quat,
combine_transforms, _get_trans, _ensure_trans,
invert_transform, Transform, rotation,
read_ras_mni_t, _print_coord_trans)
from ..utils import (get_subjects_dir, logger, _check_subject, verbose, warn,
has_nibabel, check_version, fill_doc, _pl, get_config,
_ensure_int, _validate_type, _check_option)
from .utils import (mne_analyze_colormap, _get_color_list,
plt_show, tight_layout, figure_nobar, _check_time_unit)
from .misc import _check_mri
from ..bem import (ConductorModel, _bem_find_surface,
read_bem_surfaces, _ensure_bem_surfaces)
verbose_dec = verbose
FIDUCIAL_ORDER = (FIFF.FIFFV_POINT_LPA, FIFF.FIFFV_POINT_NASION,
FIFF.FIFFV_POINT_RPA)
# XXX: to unify with digitization
def _fiducial_coords(points, coord_frame=None):
"""Generate 3x3 array of fiducial coordinates."""
points = points or [] # None -> list
if coord_frame is not None:
points = [p for p in points if p['coord_frame'] == coord_frame]
points_ = {p['ident']: p for p in points if
p['kind'] == FIFF.FIFFV_POINT_CARDINAL}
if points_:
return np.array([points_[i]['r'] for i in FIDUCIAL_ORDER])
else:
# XXX eventually this should probably live in montage.py
if coord_frame is None or coord_frame == FIFF.FIFFV_COORD_HEAD:
# Try converting CTF HPI coils to fiducials
out = np.empty((3, 3))
out.fill(np.nan)
for p in points:
if p['kind'] == FIFF.FIFFV_POINT_HPI:
if np.isclose(p['r'][1:], 0, atol=1e-6).all():
out[0 if p['r'][0] < 0 else 2] = p['r']
elif np.isclose(p['r'][::2], 0, atol=1e-6).all():
out[1] = p['r']
if np.isfinite(out).all():
return out
return np.array([])
def plot_head_positions(pos, mode='traces', cmap='viridis', direction='z',
show=True, destination=None, info=None, color='k',
axes=None):
"""Plot head positions.
Parameters
----------
pos : ndarray, shape (n_pos, 10) | list of ndarray
The head position data. Can also be a list to treat as a
concatenation of runs.
mode : str
Can be 'traces' (default) to show position and quaternion traces,
or 'field' to show the position as a vector field over time.
cmap : colormap
Colormap to use for the trace plot, default is "viridis".
direction : str
Can be any combination of "x", "y", or "z" (default: "z") to show
directional axes in "field" mode.
show : bool
Show figure if True. Defaults to True.
destination : str | array-like, shape (3,) | None
The destination location for the head, assumed to be in head
coordinates. See :func:`mne.preprocessing.maxwell_filter` for
details.
.. versionadded:: 0.16
info : instance of mne.Info | None
Measurement information. If provided, will be used to show the
destination position when ``destination is None``, and for
showing the MEG sensors.
.. versionadded:: 0.16
color : color object
The color to use for lines in ``mode == 'traces'`` and quiver
arrows in ``mode == 'field'``.
.. versionadded:: 0.16
axes : array-like, shape (3, 2)
The matplotlib axes to use. Only used for ``mode == 'traces'``.
.. versionadded:: 0.16
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from ..chpi import head_pos_to_trans_rot_t
from ..preprocessing.maxwell import _check_destination
import matplotlib.pyplot as plt
_check_option('mode', mode, ['traces', 'field'])
dest_info = dict(dev_head_t=None) if info is None else info
destination = _check_destination(destination, dest_info, head_frame=True)
if destination is not None:
destination = _ensure_trans(destination, 'head', 'meg') # probably inv
destination = destination['trans'][:3].copy()
destination[:, 3] *= 1000
if not isinstance(pos, (list, tuple)):
pos = [pos]
for ii, p in enumerate(pos):
p = np.array(p, float)
if p.ndim != 2 or p.shape[1] != 10:
raise ValueError('pos (or each entry in pos if a list) must be '
'dimension (N, 10), got %s' % (p.shape,))
if ii > 0: # concatenation
p[:, 0] += pos[ii - 1][-1, 0] - p[0, 0]
pos[ii] = p
borders = np.cumsum([len(pp) for pp in pos])
pos = np.concatenate(pos, axis=0)
trans, rot, t = head_pos_to_trans_rot_t(pos) # also ensures pos is okay
# trans, rot, and t are for dev_head_t, but what we really want
# is head_dev_t (i.e., where the head origin is in device coords)
use_trans = np.einsum('ijk,ik->ij', rot[:, :3, :3].transpose([0, 2, 1]),
-trans) * 1000
use_rot = rot.transpose([0, 2, 1])
use_quats = -pos[:, 1:4] # inverse (like doing rot.T)
surf = rrs = lims = None
if info is not None:
meg_picks = pick_types(info, meg=True, ref_meg=False, exclude=())
if len(meg_picks) > 0:
rrs = 1000 * np.array([info['chs'][pick]['loc'][:3]
for pick in meg_picks], float)
if mode == 'traces':
lims = np.array((rrs.min(0), rrs.max(0))).T
else: # mode == 'field'
surf = get_meg_helmet_surf(info)
transform_surface_to(surf, 'meg', info['dev_head_t'],
copy=False)
surf['rr'] *= 1000.
helmet_color = (0.0, 0.0, 0.6)
if mode == 'traces':
if axes is None:
axes = plt.subplots(3, 2, sharex=True)[1]
else:
axes = np.array(axes)
if axes.shape != (3, 2):
raise ValueError('axes must have shape (3, 2), got %s'
% (axes.shape,))
fig = axes[0, 0].figure
labels = ['xyz', ('$q_1$', '$q_2$', '$q_3$')]
for ii, (quat, coord) in enumerate(zip(use_quats.T, use_trans.T)):
axes[ii, 0].plot(t, coord, color, lw=1., zorder=3)
axes[ii, 0].set(ylabel=labels[0][ii], xlim=t[[0, -1]])
axes[ii, 1].plot(t, quat, color, lw=1., zorder=3)
axes[ii, 1].set(ylabel=labels[1][ii], xlim=t[[0, -1]])
for b in borders[:-1]:
for jj in range(2):
axes[ii, jj].axvline(t[b], color='r')
for ii, title in enumerate(('Position (mm)', 'Rotation (quat)')):
axes[0, ii].set(title=title)
axes[-1, ii].set(xlabel='Time (s)')
if rrs is not None:
pos_bads = np.any([(use_trans[:, ii] <= lims[ii, 0]) |
(use_trans[:, ii] >= lims[ii, 1])
for ii in range(3)], axis=0)
for ii in range(3):
oidx = list(range(ii)) + list(range(ii + 1, 3))
# knowing it will generally be spherical, we can approximate
# how far away we are along the axis line by taking the
# point to the left and right with the smallest distance
from scipy.spatial.distance import cdist
dists = cdist(rrs[:, oidx], use_trans[:, oidx])
left = rrs[:, [ii]] < use_trans[:, ii]
left_dists_all = dists.copy()
left_dists_all[~left] = np.inf
# Don't show negative Z direction
if ii != 2 and np.isfinite(left_dists_all).any():
idx = np.argmin(left_dists_all, axis=0)
left_dists = rrs[idx, ii]
bads = ~np.isfinite(
left_dists_all[idx, np.arange(len(idx))]) | pos_bads
left_dists[bads] = np.nan
axes[ii, 0].plot(t, left_dists, color=helmet_color,
ls='-', lw=0.5, zorder=2)
else:
axes[ii, 0].axhline(lims[ii][0], color=helmet_color,
ls='-', lw=0.5, zorder=2)
right_dists_all = dists
right_dists_all[left] = np.inf
if np.isfinite(right_dists_all).any():
idx = np.argmin(right_dists_all, axis=0)
right_dists = rrs[idx, ii]
bads = ~np.isfinite(
right_dists_all[idx, np.arange(len(idx))]) | pos_bads
right_dists[bads] = np.nan
axes[ii, 0].plot(t, right_dists, color=helmet_color,
ls='-', lw=0.5, zorder=2)
else:
axes[ii, 0].axhline(lims[ii][1], color=helmet_color,
ls='-', lw=0.5, zorder=2)
for ii in range(3):
axes[ii, 1].set(ylim=[-1, 1])
if destination is not None:
vals = np.array([destination[:, 3],
rot_to_quat(destination[:, :3])]).T.ravel()
for ax, val in zip(fig.axes, vals):
ax.axhline(val, color='r', ls=':', zorder=2, lw=1.)
else: # mode == 'field':
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits.mplot3d import Axes3D # noqa: F401, analysis:ignore
fig, ax = plt.subplots(1, subplot_kw=dict(projection='3d'))
# First plot the trajectory as a colormap:
# http://matplotlib.org/examples/pylab_examples/multicolored_line.html
pts = use_trans[:, np.newaxis]
segments = np.concatenate([pts[:-1], pts[1:]], axis=1)
norm = Normalize(t[0], t[-2])
lc = Line3DCollection(segments, cmap=cmap, norm=norm)
lc.set_array(t[:-1])
ax.add_collection(lc)
# now plot the head directions as a quiver
dir_idx = dict(x=0, y=1, z=2)
kwargs = dict(pivot='tail')
for d, length in zip(direction, [5., 2.5, 1.]):
use_dir = use_rot[:, :, dir_idx[d]]
# draws stems, then heads
array = np.concatenate((t, np.repeat(t, 2)))
ax.quiver(use_trans[:, 0], use_trans[:, 1], use_trans[:, 2],
use_dir[:, 0], use_dir[:, 1], use_dir[:, 2], norm=norm,
cmap=cmap, array=array, length=length, **kwargs)
if destination is not None:
ax.quiver(destination[0, 3],
destination[1, 3],
destination[2, 3],
destination[dir_idx[d], 0],
destination[dir_idx[d], 1],
destination[dir_idx[d], 2], color=color,
length=length, **kwargs)
mins = use_trans.min(0)
maxs = use_trans.max(0)
if surf is not None:
ax.plot_trisurf(*surf['rr'].T, triangles=surf['tris'],
color=helmet_color, alpha=0.1, shade=False)
ax.scatter(*rrs.T, s=1, color=helmet_color)
mins = np.minimum(mins, rrs.min(0))
maxs = np.maximum(maxs, rrs.max(0))
scale = (maxs - mins).max() / 2.
xlim, ylim, zlim = (maxs + mins)[:, np.newaxis] / 2. + [-scale, scale]
ax.set(xlabel='x', ylabel='y', zlabel='z',
xlim=xlim, ylim=ylim, zlim=zlim)
_set_aspect_equal(ax)
ax.view_init(30, 45)
tight_layout(fig=fig)
plt_show(show)
return fig
def _set_aspect_equal(ax):
# XXX recent MPL throws an error for 3D axis aspect setting, not much
# we can do about it at this point
try:
ax.set_aspect('equal')
except NotImplementedError:
pass
@verbose
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1, fig=None, vmax=None, n_contours=21,
verbose=None):
"""Plot MEG/EEG fields on head surface and helmet in 3D.
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str | None
How to print info about the time instant visualized.
%(n_jobs)s
fig : instance of mayavi.core.api.Scene | None
If None (default), a new figure will be created, otherwise it will
plot into the given figure.
.. versionadded:: 0.20
vmax : float | None
Maximum intensity. Can be None to use the max(abs(data)).
.. versionadded:: 0.21
n_contours : int
The number of contours.
.. versionadded:: 0.21
%(verbose)s
Returns
-------
fig : instance of mayavi.mlab.Figure
The mayavi figure.
"""
# Update the backend
from .backends.renderer import _get_renderer
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
_validate_type(vmax, (None, 'numeric'), 'vmax')
n_contours = _ensure_int(n_contours, 'n_contours')
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
del types
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
# Plot them
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
renderer = _get_renderer(fig, bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
# Make a solid surface
if vmax is None:
vmax = np.max(np.abs(data))
vmax = float(vmax)
alpha = alphas[ii]
renderer.surface(surface=surf, color=colors[ii],
opacity=alpha)
# Now show our field pattern
renderer.surface(surface=surf, vmin=-vmax, vmax=vmax,
scalars=data, colormap=colormap,
polygon_offset=-1)
# And the field lines on top
renderer.contour(surface=surf, scalars=data, contours=n_contours,
vmin=-vmax, vmax=vmax, opacity=alpha,
colormap=colormap_lines)
if time_label is not None:
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
renderer.text2d(x_window=0.01, y_window=0.01, text=time_label)
renderer.set_camera(azimuth=10, elevation=60)
renderer.show()
return renderer.scene()
@verbose
def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None,
surfaces='auto', coord_frame='head',
meg=None, eeg='original', fwd=None,
dig=False, ecog=True, src=None, mri_fiducials=False,
bem=None, seeg=True, fnirs=True, show_axes=False, dbs=True,
fig=None, interaction='trackball', verbose=None):
"""Plot head, sensor, and source space alignment in 3D.
Parameters
----------
info : dict | None
The measurement info.
If None (default), no sensor information will be shown.
%(trans)s
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. Can be omitted if ``src`` is provided.
%(subjects_dir)s
surfaces : str | list | dict
Surfaces to plot. Supported values:
* scalp: one of 'head', 'outer_skin' (alias for 'head'),
'head-dense', or 'seghead' (alias for 'head-dense')
* skull: 'outer_skull', 'inner_skull', 'brain' (alias for
'inner_skull')
* brain: one of 'pial', 'white', 'inflated', or 'brain'
(alias for 'pial').
Can be dict to specify alpha values for each surface. Use None
to specify default value. Specified values must be between 0 and 1.
for example::
surfaces=dict(brain=0.4, outer_skull=0.6, head=None)
Defaults to 'auto', which will look for a head surface and plot
it if found.
.. note:: For single layer BEMs it is recommended to use 'brain'.
coord_frame : str
Coordinate frame to use, 'head', 'meg', or 'mri'.
meg : str | list | bool | None
Can be "helmet", "sensors" or "ref" to show the MEG helmet, sensors or
reference sensors respectively, or a combination like
``('helmet', 'sensors')`` (same as None, default). True translates to
``('helmet', 'sensors', 'ref')``.
eeg : bool | str | list
String options are:
- "original" (default; equivalent to ``True``)
Shows EEG sensors using their digitized locations (after
transformation to the chosen ``coord_frame``)
- "projected"
The EEG locations projected onto the scalp, as is done in forward
modeling
Can also be a list of these options, or an empty list (``[]``,
equivalent of ``False``).
fwd : instance of Forward
The forward solution. If present, the orientations of the dipoles
present in the forward solution are displayed.
dig : bool | 'fiducials'
If True, plot the digitization points; 'fiducials' to plot fiducial
points only.
ecog : bool
If True (default), show ECoG sensors.
src : instance of SourceSpaces | None
If not None, also plot the source space points.
mri_fiducials : bool | str
Plot MRI fiducials (default False). If ``True``, look for a file with
the canonical name (``bem/{subject}-fiducials.fif``). If ``str``,
it can be ``'estimated'`` to use :func:`mne.coreg.get_mni_fiducials`,
otherwise it should provide the full path to the fiducials file.
.. versionadded:: 0.22
Support for ``'estimated'``.
bem : list of dict | instance of ConductorModel | None
Can be either the BEM surfaces (list of dict), a BEM solution or a
sphere model. If None, we first try loading
``'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'``, and then look
for ``'$SUBJECT*$SOURCE.fif'`` in the same directory. For
``'outer_skin'``, the subjects bem and bem/flash folders are searched.
Defaults to None.
seeg : bool
If True (default), show sEEG electrodes.
fnirs : str | list | bool | None
Can be "channels", "pairs", "detectors", and/or "sources" to show the
fNIRS channel locations, optode locations, or line between
source-detector pairs, or a combination like ``('pairs', 'channels')``.
True translates to ``('pairs',)``.
.. versionadded:: 0.20
show_axes : bool
If True (default False), coordinate frame axis indicators will be
shown:
* head in pink.
* MRI in gray (if ``trans is not None``).
* MEG in blue (if MEG sensors are present).
.. versionadded:: 0.16
dbs : bool
If True (default), show DBS (deep brain stimulation) electrodes.
fig : mayavi.mlab.Figure | None
Mayavi Scene in which to plot the alignment.
If ``None``, creates a new 600x600 pixel figure with black background.
.. versionadded:: 0.16
interaction : str
Can be "trackball" (default) or "terrain", i.e. a turntable-style
camera.
.. versionadded:: 0.16
%(verbose)s
Returns
-------
fig : instance of mayavi.mlab.Figure
The mayavi figure.
See Also
--------
mne.viz.plot_bem
Notes
-----
This function serves the purpose of checking the validity of the many
different steps of source reconstruction:
- Transform matrix (keywords ``trans``, ``meg`` and ``mri_fiducials``),
- BEM surfaces (keywords ``bem`` and ``surfaces``),
- sphere conductor model (keywords ``bem`` and ``surfaces``) and
- source space (keywords ``surfaces`` and ``src``).
.. versionadded:: 0.15
"""
from ..forward import _create_meg_coils, Forward
from ..coreg import get_mni_fiducials
# Update the backend
from .backends.renderer import _get_renderer
if eeg is False:
eeg = list()
elif eeg is True:
eeg = 'original'
if meg is None:
meg = ('helmet', 'sensors')
# only consider warning if the value is explicit
warn_meg = False
else:
warn_meg = True
if meg is True:
meg = ('helmet', 'sensors', 'ref')
elif meg is False:
meg = list()
elif isinstance(meg, str):
meg = [meg]
if isinstance(eeg, str):
eeg = [eeg]
if fnirs is True:
fnirs = ['pairs']
elif fnirs is False:
fnirs = list()
elif isinstance(fnirs, str):
fnirs = [fnirs]
_check_option('interaction', interaction, ['trackball', 'terrain'])
for kind, var in zip(('eeg', 'meg', 'fnirs'), (eeg, meg, fnirs)):
if not isinstance(var, (list, tuple)) or \
not all(isinstance(x, str) for x in var):
raise TypeError('%s must be list or tuple of str, got %s'
% (kind, type(var)))
for xi, x in enumerate(meg):
_check_option('meg[%d]' % xi, x, ('helmet', 'sensors', 'ref'))
for xi, x in enumerate(eeg):
_check_option('eeg[%d]' % xi, x, ('original', 'projected'))
for xi, x in enumerate(fnirs):
_check_option('fnirs[%d]' % xi, x, ('channels', 'pairs',
'sources', 'detectors'))
info = create_info(1, 1000., 'misc') if info is None else info
_validate_type(info, "info")
if isinstance(surfaces, str):
surfaces = [surfaces]
if isinstance(surfaces, dict):
user_alpha = surfaces.copy()
for key, val in user_alpha.items():
_validate_type(key, "str", f"surfaces key {repr(key)}")
_validate_type(val, (None, "numeric"), f"surfaces[{repr(key)}]")
if val is not None:
user_alpha[key] = float(val)
if not 0 <= user_alpha[key] <= 1:
raise ValueError(
f'surfaces[{repr(key)}] ({val}) must be'
' between 0 and 1'
)
else:
user_alpha = {}
surfaces = list(surfaces)
for si, s in enumerate(surfaces):
_validate_type(s, "str", f"surfaces[{si}]")
brain = sorted(
set(surfaces) & set(['brain', 'pial', 'white', 'inflated']))
bem = _ensure_bem_surfaces(bem, extra_allow=(ConductorModel, None))
assert isinstance(bem, ConductorModel) or bem is None
_check_option('coord_frame', coord_frame, ['head', 'meg', 'mri'])
if src is not None:
src = _ensure_src(src)
src_subject = src._subject
subject = src_subject if subject is None else subject
if src_subject is not None and subject != src_subject:
raise ValueError('subject ("%s") did not match the subject name '
' in src ("%s")' % (subject, src_subject))
if fwd is not None:
_validate_type(fwd, [Forward])
fwd_rr = fwd['source_rr']
if fwd['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:
fwd_nn = fwd['source_nn'].reshape(-1, 1, 3)
else:
fwd_nn = fwd['source_nn'].reshape(-1, 3, 3)
ref_meg = 'ref' in meg
meg_picks = pick_types(info, meg=True, ref_meg=ref_meg)
eeg_picks = pick_types(info, meg=False, eeg=True, ref_meg=False)
fnirs_picks = pick_types(info, meg=False, eeg=False, ref_meg=False,
fnirs=True)
other_bools = dict(ecog=ecog, seeg=seeg, dbs=dbs,
fnirs=(('channels' in fnirs) |
('sources' in fnirs) |
('detectors' in fnirs)))
del ecog, seeg, dbs
other_keys = sorted(other_bools.keys())
other_picks = {key: pick_types(info, meg=False, ref_meg=False,
**{key: True}) for key in other_keys}
if trans == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
trans = _find_trans(subject, subjects_dir)
head_mri_t, _ = _get_trans(trans, 'head', 'mri')
dev_head_t, _ = _get_trans(info['dev_head_t'], 'meg', 'head')
del trans
# Figure out our transformations
if coord_frame == 'meg':
head_trans = invert_transform(dev_head_t)
meg_trans = Transform('meg', 'meg')
mri_trans = invert_transform(combine_transforms(
dev_head_t, head_mri_t, 'meg', 'mri'))
elif coord_frame == 'mri':
head_trans = head_mri_t
meg_trans = combine_transforms(dev_head_t, head_mri_t, 'meg', 'mri')
mri_trans = Transform('mri', 'mri')
else: # coord_frame == 'head'
head_trans = Transform('head', 'head')
meg_trans = dev_head_t
mri_trans = invert_transform(head_mri_t)
# both the head and helmet will be in MRI coordinates after this
surfs = dict()
# Head:
head = False
for s in surfaces:
if s in ('auto', 'head', 'outer_skin', 'head-dense', 'seghead'):
if head:
raise ValueError('Can only supply one head-like surface name')
surfaces.pop(surfaces.index(s))
head = True
head_surf = None
# Try the BEM if applicable
if s in ('auto', 'head', 'outer_skin'):
if bem is not None:
head_missing = (
'Could not find the surface for '
'head in the provided BEM model, '
'looking in the subject directory.')
try:
head_surf = _bem_find_surface(bem, 'head')
except RuntimeError:
logger.info(head_missing)
if head_surf is None:
if subject is None:
if s == 'auto':
# ignore
continue
raise ValueError('To plot the head surface, the BEM/sphere'
' model must contain a head surface '
'or "subject" must be provided (got '
'None)')
subject_dir = op.join(
get_subjects_dir(subjects_dir, raise_error=True), subject)
if s in ('head-dense', 'seghead'):
try_fnames = [
op.join(subject_dir, 'bem', '%s-head-dense.fif'
% subject),
op.join(subject_dir, 'surf', 'lh.seghead'),
]
else:
try_fnames = [
op.join(subject_dir, 'bem', 'outer_skin.surf'),
op.join(subject_dir, 'bem', 'flash',
'outer_skin.surf'),
op.join(subject_dir, 'bem', '%s-head.fif'
% subject),
]
for fname in try_fnames:
if op.exists(fname):
logger.info('Using %s for head surface.'
% (op.basename(fname),))
if op.splitext(fname)[-1] == '.fif':
head_surf = read_bem_surfaces(fname)[0]
else:
head_surf = _read_mri_surface(fname)
break
else:
raise IOError('No head surface found for subject '
'%s after trying:\n%s'
% (subject, '\n'.join(try_fnames)))
surfs['head'] = head_surf
# Skull:
skull = list()
for name in ('outer_skull', 'inner_skull'):
if name in surfaces:
surfaces.pop(surfaces.index(name))
if bem is None:
fname = op.join(
get_subjects_dir(subjects_dir, raise_error=True),
subject, 'bem', name + '.surf')
if not op.isfile(fname):
raise ValueError('bem is None and the the %s file cannot '
'be found:\n%s' % (name, fname))
surf = _read_mri_surface(fname)
else:
surf = _bem_find_surface(bem, name).copy()
surf['name'] = name
skull.append(surf)
assert all(isinstance(s, dict) for s in skull)
if mri_fiducials:
if mri_fiducials is True:
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if subject is None:
raise ValueError("Subject needs to be specified to "
"automatically find the fiducials file.")
mri_fiducials = op.join(subjects_dir, subject, 'bem',
subject + '-fiducials.fif')
if isinstance(mri_fiducials, str):
if mri_fiducials == 'estimated':
mri_fiducials = get_mni_fiducials(subject, subjects_dir)
else:
mri_fiducials, cf = read_fiducials(mri_fiducials)
if cf != FIFF.FIFFV_COORD_MRI:
raise ValueError("Fiducials are not in MRI space")
fid_loc = _fiducial_coords(mri_fiducials, FIFF.FIFFV_COORD_MRI)
fid_loc = apply_trans(mri_trans, fid_loc)
else:
fid_loc = []
if 'helmet' in meg and len(meg_picks) > 0:
surfs['helmet'] = get_meg_helmet_surf(info, head_mri_t)
assert surfs['helmet']['coord_frame'] == FIFF.FIFFV_COORD_MRI
# Brain:
if len(brain) > 1:
raise ValueError('Only one brain surface can be plotted. '
'Got %s.' % brain)
elif len(brain) == 0:
brain = False
else: # exactly 1
brain = brain[0]
surfaces.pop(surfaces.index(brain))
if brain in user_alpha:
user_alpha['lh'] = user_alpha['rh'] = user_alpha.pop(brain)
if bem is not None and bem['is_sphere'] and brain == 'brain':
surfs['lh'] = _bem_find_surface(bem, 'brain')
else:
brain = 'pial' if brain == 'brain' else brain
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
for hemi in ['lh', 'rh']:
fname = op.join(subjects_dir, subject, 'surf',
'%s.%s' % (hemi, brain))
surfs[hemi] = _read_mri_surface(fname)
brain = True
# we've looked through all of them, raise if some remain
if len(surfaces) > 0:
raise ValueError('Unknown surface type%s: %s'
% (_pl(surfaces), surfaces,))
skull_alpha = dict()
skull_colors = dict()
hemi_val = 0.5
no_deep = all(len(other_picks[key]) == 0 for key in ('dbs', 'seeg'))
max_alpha = 1.0 if no_deep else 0.75
if src is None or (brain and any(s['type'] == 'surf' for s in src)):
hemi_val = max_alpha
alphas = np.linspace(max_alpha / 2., 0, 5)[:len(skull) + 1]
for idx, this_skull in enumerate(skull):
name = this_skull['name']
skull_alpha[name] = alphas[idx + 1]
skull_colors[name] = (0.95 - idx * 0.2, 0.85, 0.95 - idx * 0.2)
surfs[name] = this_skull
if src is None and brain is False and len(skull) == 0 and not show_axes:
head_alpha = max_alpha
else:
head_alpha = alphas[0]
for key in surfs:
# Surfs can sometimes be in head coords (e.g., if coming from sphere)
surf = surfs[key]
assert isinstance(surf, dict), f'{key}: {type(surf)}'
surfs[key] = transform_surface_to(surfs[key], coord_frame,
[mri_trans, head_trans], copy=True)
if fwd is not None:
fwd_rr, fwd_nn = _update_coord_frame(fwd, fwd_rr, fwd_nn,
mri_trans, head_trans)
# determine points
meg_rrs, meg_tris = list(), list()
hpi_loc = list()
ext_loc = list()
car_loc = list()
eeg_loc = list()
eegp_loc = list()
other_loc = dict()
if len(eeg) > 0:
eeg_loc = np.array([info['chs'][k]['loc'][:3] for k in eeg_picks])
if len(eeg_loc) > 0:
eeg_loc = apply_trans(head_trans, eeg_loc)
# XXX do projections here if necessary
if 'projected' in eeg:
eegp_loc, eegp_nn = _project_onto_surface(
eeg_loc, surfs['head'], project_rrs=True,
return_nn=True)[2:4]
if 'original' not in eeg:
eeg_loc = list()
del eeg
if 'sensors' in meg:
coil_transs = [_loc_to_coil_trans(info['chs'][pick]['loc'])
for pick in meg_picks]
coils = _create_meg_coils([info['chs'][pick] for pick in meg_picks],
acc='normal')
offset = 0
for coil, coil_trans in zip(coils, coil_transs):
rrs, tris = _sensor_shape(coil)
rrs = apply_trans(coil_trans, rrs)
meg_rrs.append(rrs)
meg_tris.append(tris + offset)
offset += len(meg_rrs[-1])
if len(meg_rrs) == 0:
if warn_meg:
warn('MEG sensors not found. Cannot plot MEG locations.')
else:
meg_rrs = apply_trans(meg_trans, np.concatenate(meg_rrs, axis=0))
meg_tris = np.concatenate(meg_tris, axis=0)
del meg
if dig:
if dig == 'fiducials':
hpi_loc = ext_loc = []
elif dig is not True:
raise ValueError("dig needs to be True, False or 'fiducials', "
"not %s" % repr(dig))
else:
hpi_loc = np.array([
d['r'] for d in (info['dig'] or [])
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)])
ext_loc = np.array([
d['r'] for d in (info['dig'] or [])
if (d['kind'] == FIFF.FIFFV_POINT_EXTRA and
d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)])
car_loc = _fiducial_coords(info['dig'], FIFF.FIFFV_COORD_HEAD)
# Transform from head coords if necessary
if coord_frame == 'meg':
for loc in (hpi_loc, ext_loc, car_loc):
loc[:] = apply_trans(invert_transform(info['dev_head_t']), loc)
elif coord_frame == 'mri':
for loc in (hpi_loc, ext_loc, car_loc):
loc[:] = apply_trans(head_mri_t, loc)
if len(car_loc) == len(ext_loc) == len(hpi_loc) == 0:
warn('Digitization points not found. Cannot plot digitization.')
del dig
for key, picks in other_picks.items():
if other_bools[key] and len(picks):
title = DEFAULTS["titles"][key] if key != 'fnirs' else 'fNIRS'
if key != 'fnirs' or 'channels' in fnirs:
other_loc[key] = [
info['chs'][pick]['loc'][:3] for pick in picks
]
# deal with NaN
other_loc[key] = np.array([loc for loc in other_loc[key]
if np.isfinite(loc).all()], float)
logger.info(
f'Plotting {len(other_loc[key])} {title}'
f' location{_pl(other_loc[key])}')
if key == 'fnirs':
if 'sources' in fnirs:
other_loc['source'] = np.array(
[info['chs'][pick]['loc'][3:6]
for pick in picks])
logger.info('Plotting %d %s source%s'
% (len(other_loc['source']),
title, _pl(other_loc['source'])))
if 'detectors' in fnirs:
other_loc['detector'] = np.array(
[info['chs'][pick]['loc'][6:9]
for pick in picks])
logger.info('Plotting %d %s detector%s'
% (len(other_loc['detector']),
title, _pl(other_loc['detector'])))
for v in other_loc.values():
v[:] = apply_trans(head_trans, v)
other_keys = sorted(other_loc) # re-sort and only keep non-empty
del other_bools
# initialize figure
renderer = _get_renderer(fig, bgcolor=(0.5, 0.5, 0.5), size=(800, 800))
if interaction == 'terrain':
renderer.set_interaction('terrain')
# plot surfaces
alphas = dict(head=head_alpha, helmet=0.25, lh=hemi_val, rh=hemi_val)
alphas.update(skull_alpha)
# replace default alphas with specified user_alpha
for k, v in user_alpha.items():
if v is not None:
alphas[k] = v
colors = dict(head=DEFAULTS['coreg']['head_color'],
helmet=(0.0, 0.0, 0.6), lh=(0.5,) * 3,
rh=(0.5,) * 3)
colors.update(skull_colors)
for key, surf in surfs.items():
renderer.surface(surface=surf, color=colors[key],
opacity=alphas[key],
backface_culling=(key != 'helmet'))
if brain and 'lh' not in surfs: # one layer sphere
assert bem['coord_frame'] == FIFF.FIFFV_COORD_HEAD
center = bem['r0'].copy()
center = apply_trans(head_trans, center)
renderer.sphere(center, scale=0.01, color=colors['lh'],
opacity=alphas['lh'])
if show_axes:
axes = [(head_trans, (0.9, 0.3, 0.3))] # always show head
if not np.allclose(head_mri_t['trans'], np.eye(4)): # Show MRI
axes.append((mri_trans, (0.6, 0.6, 0.6)))
if len(meg_picks) > 0: # Show MEG
axes.append((meg_trans, (0., 0.6, 0.6)))
for ax in axes:
x, y, z = np.tile(ax[0]['trans'][:3, 3], 3).reshape((3, 3)).T
u, v, w = ax[0]['trans'][:3, :3]
renderer.sphere(center=np.column_stack((x[0], y[0], z[0])),
color=ax[1], scale=3e-3)
renderer.quiver3d(x=x, y=y, z=z, u=u, v=v, w=w, mode='arrow',
scale=2e-2, color=ax[1],
scale_mode='scalar', resolution=20,
scalars=[0.33, 0.66, 1.0])
# plot points
defaults = DEFAULTS['coreg']
datas = [eeg_loc,
hpi_loc,
ext_loc] + list(other_loc[key] for key in other_keys)
colors = [defaults['eeg_color'],
defaults['hpi_color'],
defaults['extra_color']
] + [defaults[key + '_color'] for key in other_keys]
alphas = [0.8,
0.5,
0.25] + [0.8] * len(other_keys)
scales = [defaults['eeg_scale'],
defaults['hpi_scale'],
defaults['extra_scale']
] + [defaults[key + '_scale'] for key in other_keys]
assert len(datas) == len(colors) == len(alphas) == len(scales)
fid_colors = tuple(
defaults[f'{key}_color'] for key in ('lpa', 'nasion', 'rpa'))
glyphs = ['sphere'] * len(datas)
for kind, loc in (('dig', car_loc), ('mri', fid_loc)):
if len(loc) > 0:
datas.extend(loc[:, np.newaxis])
colors.extend(fid_colors)
alphas.extend(3 * (defaults[f'{kind}_fid_opacity'],))
scales.extend(3 * (defaults[f'{kind}_fid_scale'],))
glyphs.extend(3 * (('oct' if kind == 'mri' else 'sphere'),))
for data, color, alpha, scale, glyph in zip(
datas, colors, alphas, scales, glyphs):
if len(data) > 0:
if glyph == 'oct':
transform = np.eye(4)
transform[:3, :3] = mri_trans['trans'][:3, :3] * scale
# rotate around Z axis 45 deg first
transform = transform @ rotation(0, 0, np.pi / 4)
renderer.quiver3d(
x=data[:, 0], y=data[:, 1], z=data[:, 2],
u=1., v=0., w=0., color=color, mode='oct',
scale=1., opacity=alpha, backface_culling=True,
solid_transform=transform)
else:
assert glyph == 'sphere'
assert data.ndim == 2 and data.shape[1] == 3, data.shape
renderer.sphere(center=data, color=color, scale=scale,
opacity=alpha, backface_culling=True)
if len(eegp_loc) > 0:
renderer.quiver3d(
x=eegp_loc[:, 0], y=eegp_loc[:, 1], z=eegp_loc[:, 2],
u=eegp_nn[:, 0], v=eegp_nn[:, 1], w=eegp_nn[:, 2],
color=defaults['eegp_color'], mode='cylinder',
scale=defaults['eegp_scale'], opacity=0.6,
glyph_height=defaults['eegp_height'],
glyph_center=(0., -defaults['eegp_height'], 0),
glyph_resolution=20,
backface_culling=True)
if len(meg_rrs) > 0:
color, alpha = (0., 0.25, 0.5), 0.25
surf = dict(rr=meg_rrs, tris=meg_tris)
renderer.surface(surface=surf, color=color,
opacity=alpha, backface_culling=True)
if src is not None:
atlas_ids, colors = read_freesurfer_lut()
for ss in src:
src_rr = ss['rr'][ss['inuse'].astype(bool)]
src_nn = ss['nn'][ss['inuse'].astype(bool)]
src_rr, src_nn = _update_coord_frame(src[0], src_rr, src_nn,
mri_trans, head_trans)
# volume sources
if ss['type'] == 'vol':
seg_name = ss.get('seg_name', None)
if seg_name is not None and seg_name in colors:
color = colors[seg_name][:3]
color = tuple(i / 256. for i in color)
else:
color = (1., 1., 0.)
# surface and discrete sources
else:
color = (1., 1., 0.)
if len(src_rr) > 0:
renderer.quiver3d(
x=src_rr[:, 0], y=src_rr[:, 1], z=src_rr[:, 2],
u=src_nn[:, 0], v=src_nn[:, 1], w=src_nn[:, 2],
color=color, mode='cylinder', scale=3e-3,
opacity=0.75, glyph_height=0.25,
glyph_center=(0., 0., 0.), glyph_resolution=20,
backface_culling=True)
if fwd is not None:
red = (1.0, 0.0, 0.0)
green = (0.0, 1.0, 0.0)
blue = (0.0, 0.0, 1.0)
for ori, color in zip(range(fwd_nn.shape[1]), (red, green, blue)):
renderer.quiver3d(fwd_rr[:, 0],
fwd_rr[:, 1],
fwd_rr[:, 2],
fwd_nn[:, ori, 0],
fwd_nn[:, ori, 1],
fwd_nn[:, ori, 2],
color=color, mode='arrow', scale=1.5e-3)
if 'pairs' in fnirs and len(fnirs_picks) > 0:
origin = apply_trans(head_trans, np.array(
[info['chs'][k]['loc'][3:6] for k in fnirs_picks]))
destination = apply_trans(head_trans, np.array(
[info['chs'][k]['loc'][6:9] for k in fnirs_picks]))
logger.info(f'Plotting {origin.shape[0]} fNIRS pair{_pl(origin)}')
renderer.tube(origin=origin, destination=destination)
renderer.set_camera(azimuth=90, elevation=90,
distance=0.6, focalpoint=(0., 0., 0.))
renderer.show()
return renderer.scene()
def _make_tris_fan(n_vert):
"""Make tris given a number of vertices of a circle-like obj."""
tris = np.zeros((n_vert - 2, 3), int)
tris[:, 2] = np.arange(2, n_vert)
tris[:, 1] = tris[:, 2] - 1
return tris
def _sensor_shape(coil):
"""Get the sensor shape vertices."""
from scipy.spatial import ConvexHull
id_ = coil['type'] & 0xFFFF
pad = True
# Square figure eight
if id_ in (FIFF.FIFFV_COIL_NM_122,
FIFF.FIFFV_COIL_VV_PLANAR_W,
FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
):
# wound by right hand rule such that +x side is "up" (+z)
long_side = coil['size'] # length of long side (meters)
offset = 0.0025 # offset of the center portion of planar grad coil
rrs = np.array([
[offset, -long_side / 2.],
[long_side / 2., -long_side / 2.],
[long_side / 2., long_side / 2.],
[offset, long_side / 2.],
[-offset, -long_side / 2.],
[-long_side / 2., -long_side / 2.],
[-long_side / 2., long_side / 2.],
[-offset, long_side / 2.]])
tris = np.concatenate((_make_tris_fan(4),
_make_tris_fan(4)[:, ::-1] + 4), axis=0)
# Square
elif id_ in (FIFF.FIFFV_COIL_POINT_MAGNETOMETER,
FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3,
FIFF.FIFFV_COIL_KIT_REF_MAG,
):
# square magnetometer (potentially point-type)
size = 0.001 if id_ == 2000 else (coil['size'] / 2.)
rrs = np.array([[-1., 1.], [1., 1.], [1., -1.], [-1., -1.]]) * size
tris = _make_tris_fan(4)
# Circle
elif id_ in (FIFF.FIFFV_COIL_MAGNES_MAG,
FIFF.FIFFV_COIL_MAGNES_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_BABY_MAG,
FIFF.FIFFV_COIL_BABY_REF_MAG,
FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG,
):
n_pts = 15 # number of points for circle
circle = np.exp(2j * np.pi * np.arange(n_pts) / float(n_pts))
circle = np.concatenate(([0.], circle))
circle *= coil['size'] / 2. # radius of coil
rrs = np.array([circle.real, circle.imag]).T
tris = _make_tris_fan(n_pts + 1)
# Circle
elif id_ in (FIFF.FIFFV_COIL_MAGNES_GRAD,
FIFF.FIFFV_COIL_CTF_GRAD,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD,
FIFF.FIFFV_COIL_MAGNES_REF_GRAD,
FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD,
FIFF.FIFFV_COIL_KIT_GRAD,
FIFF.FIFFV_COIL_BABY_GRAD,
FIFF.FIFFV_COIL_ARTEMIS123_GRAD,
FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD,
):
# round coil 1st order (off-diagonal) gradiometer
baseline = coil['base'] if id_ in (5004, 4005) else 0.
n_pts = 16 # number of points for circle
# This time, go all the way around circle to close it fully
circle = np.exp(2j * np.pi * np.arange(-1, n_pts) / float(n_pts - 1))
circle[0] = 0 # center pt for triangulation
circle *= coil['size'] / 2.
rrs = np.array([ # first, second coil
np.concatenate([circle.real + baseline / 2.,
circle.real - baseline / 2.]),
np.concatenate([circle.imag, -circle.imag])]).T
tris = np.concatenate([_make_tris_fan(n_pts + 1),
_make_tris_fan(n_pts + 1) + n_pts + 1])
# 3D convex hull (will fail for 2D geometry, can extend later if needed)
else:
rrs = coil['rmag_orig'].copy()
pad = False
tris = _reorder_ccw(rrs, ConvexHull(rrs).simplices)
# Go from (x,y) -> (x,y,z)
if pad:
rrs = np.pad(rrs, ((0, 0), (0, 1)), mode='constant')
assert rrs.ndim == 2 and rrs.shape[1] == 3
return rrs, tris
def _get_cmap(colormap):
import matplotlib.pyplot as plt
if isinstance(colormap, str) and colormap in ('mne', 'mne_analyze'):
colormap = mne_analyze_colormap([0, 1, 2], format='matplotlib')
else:
colormap = plt.get_cmap(colormap)
return colormap
def _process_clim(clim, colormap, transparent, data=0., allow_pos_lims=True):
"""Convert colormap/clim options to dict.
This fills in any "auto" entries properly such that round-trip
calling gives the same results.
"""
# Based on type of limits specified, get cmap control points
from matplotlib.colors import Colormap
_validate_type(colormap, (str, Colormap), 'colormap')
data = np.asarray(data)
if isinstance(colormap, str):
if colormap == 'auto':
if clim == 'auto':
if allow_pos_lims and (data < 0).any():
colormap = 'mne'
else:
colormap = 'hot'
else:
if 'lims' in clim:
colormap = 'hot'
else: # 'pos_lims' in clim
colormap = 'mne'
colormap = _get_cmap(colormap)
assert isinstance(colormap, Colormap)
diverging_maps = ['PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr',
'seismic']
diverging_maps += [d + '_r' for d in diverging_maps]
diverging_maps += ['mne', 'mne_analyze']
if clim == 'auto':
# this is merely a heuristic!
if allow_pos_lims and colormap.name in diverging_maps:
key = 'pos_lims'
else:
key = 'lims'
clim = {'kind': 'percent', key: [96, 97.5, 99.95]}
if not isinstance(clim, dict):
raise ValueError('"clim" must be "auto" or dict, got %s' % (clim,))
if ('lims' in clim) + ('pos_lims' in clim) != 1:
raise ValueError('Exactly one of lims and pos_lims must be specified '
'in clim, got %s' % (clim,))
if 'pos_lims' in clim and not allow_pos_lims:
raise ValueError('Cannot use "pos_lims" for clim, use "lims" '
'instead')
diverging = 'pos_lims' in clim
ctrl_pts = np.array(clim['pos_lims' if diverging else 'lims'], float)
ctrl_pts = np.array(ctrl_pts, float)
if ctrl_pts.shape != (3,):
raise ValueError('clim has shape %s, it must be (3,)'
% (ctrl_pts.shape,))
if (np.diff(ctrl_pts) < 0).any():
raise ValueError('colormap limits must be monotonically '
'increasing, got %s' % (ctrl_pts,))
clim_kind = clim.get('kind', 'percent')
_check_option("clim['kind']", clim_kind, ['value', 'values', 'percent'])
if clim_kind == 'percent':
perc_data = np.abs(data) if diverging else data
ctrl_pts = np.percentile(perc_data, ctrl_pts)
logger.info('Using control points %s' % (ctrl_pts,))
assert len(ctrl_pts) == 3
clim = dict(kind='value')
clim['pos_lims' if diverging else 'lims'] = ctrl_pts
mapdata = dict(clim=clim, colormap=colormap, transparent=transparent)
return mapdata
def _separate_map(mapdata):
"""Help plotters that cannot handle limit equality."""
diverging = 'pos_lims' in mapdata['clim']
key = 'pos_lims' if diverging else 'lims'
ctrl_pts = np.array(mapdata['clim'][key])
assert ctrl_pts.shape == (3,)
if len(set(ctrl_pts)) == 1: # three points match
if ctrl_pts[0] == 0: # all are zero
warn('All data were zero')
ctrl_pts = np.arange(3, dtype=float)
else:
ctrl_pts *= [0., 0.5, 1] # all nonzero pts == max
elif len(set(ctrl_pts)) == 2: # two points match
# if points one and two are identical, add a tiny bit to the
# control point two; if points two and three are identical,
# subtract a tiny bit from point two.
bump = 1e-5 if ctrl_pts[0] == ctrl_pts[1] else -1e-5
ctrl_pts[1] = ctrl_pts[0] + bump * (ctrl_pts[2] - ctrl_pts[0])
mapdata['clim'][key] = ctrl_pts
def _linearize_map(mapdata):
from matplotlib.colors import ListedColormap
diverging = 'pos_lims' in mapdata['clim']
scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']
if diverging:
lims = [-scale_pts[2], scale_pts[2]]
ctrl_norm = np.concatenate([-scale_pts[::-1] / scale_pts[2], [0],
scale_pts / scale_pts[2]]) / 2 + 0.5
linear_norm = [0, 0.25, 0.5, 0.5, 0.5, 0.75, 1]
trans_norm = [1, 1, 0, 0, 0, 1, 1]
else:
lims = [scale_pts[0], scale_pts[2]]
range_ = scale_pts[2] - scale_pts[0]
mid = (scale_pts[1] - scale_pts[0]) / range_ if range_ > 0 else 0.5
ctrl_norm = [0, mid, 1]
linear_norm = [0, 0.5, 1]
trans_norm = [0, 1, 1]
# do the piecewise linear transformation
interp_to = np.linspace(0, 1, 256)
colormap = np.array(mapdata['colormap'](
np.interp(interp_to, ctrl_norm, linear_norm)))
if mapdata['transparent']:
colormap[:, 3] = np.interp(interp_to, ctrl_norm, trans_norm)
lims = np.array([lims[0], np.mean(lims), lims[1]])
colormap = ListedColormap(colormap)
return colormap, lims
def _get_map_ticks(mapdata):
diverging = 'pos_lims' in mapdata['clim']
ticks = mapdata['clim']['pos_lims' if diverging else 'lims']
delta = 1e-2 * (ticks[2] - ticks[0])
if ticks[1] <= ticks[0] + delta: # Only two worth showing
ticks = ticks[::2]
if ticks[1] <= ticks[0] + delta: # Actually only one
ticks = ticks[::2]
if diverging:
idx = int(ticks[0] == 0)
ticks = list(-np.array(ticks[idx:])[::-1]) + [0] + list(ticks[idx:])
return np.array(ticks)
def _handle_time(time_label, time_unit, times):
"""Handle time label string and units."""
_validate_type(time_label, (None, str, 'callable'), 'time_label')
if time_label == 'auto':
if times is not None and len(times) > 1:
if time_unit == 's':
time_label = 'time=%0.3fs'
elif time_unit == 'ms':
time_label = 'time=%0.1fms'
else:
time_label = None
# convert to callable
if isinstance(time_label, str):
time_label_fmt = time_label
def time_label(x):
try:
return time_label_fmt % x
except Exception:
return time_label # in case it's static
assert time_label is None or callable(time_label)
if times is not None:
_, times = _check_time_unit(time_unit, times)
return time_label, times
def _key_pressed_slider(event, params):
"""Handle key presses for time_viewer slider."""
step = 1
if event.key.startswith('ctrl'):
step = 5
event.key = event.key.split('+')[-1]
if event.key not in ['left', 'right']:
return
time_viewer = event.canvas.figure
value = time_viewer.slider.val
times = params['stc'].times
if params['time_unit'] == 'ms':
times = times * 1000.
time_idx = np.argmin(np.abs(times - value))
if event.key == 'left':
time_idx = np.max((0, time_idx - step))
elif event.key == 'right':
time_idx = np.min((len(times) - 1, time_idx + step))
this_time = times[time_idx]
time_viewer.slider.set_val(this_time)
def _smooth_plot(this_time, params):
"""Smooth source estimate data and plot with mpl."""
from ..morph import _hemi_morph
ax = params['ax']
stc = params['stc']
ax.clear()
times = stc.times
scaler = 1000. if params['time_unit'] == 'ms' else 1.
if this_time is None:
time_idx = 0
else:
time_idx = np.argmin(np.abs(times - this_time / scaler))
if params['hemi_idx'] == 0:
data = stc.data[:len(stc.vertices[0]), time_idx:time_idx + 1]
else:
data = stc.data[len(stc.vertices[0]):, time_idx:time_idx + 1]
morph = _hemi_morph(
params['tris'], params['inuse'], params['vertices'],
params['smoothing_steps'], maps=None, warn=True)
array_plot = morph @ data
range_ = params['scale_pts'][2] - params['scale_pts'][0]
colors = (array_plot - params['scale_pts'][0]) / range_
faces = params['faces']
greymap = params['greymap']
cmap = params['cmap']
polyc = ax.plot_trisurf(*params['coords'].T, triangles=faces,
antialiased=False, vmin=0, vmax=1)
color_ave = np.mean(colors[faces], axis=1).flatten()
curv_ave = np.mean(params['curv'][faces], axis=1).flatten()
colors = cmap(color_ave)
# alpha blend
colors[:, :3] *= colors[:, [3]]
colors[:, :3] += greymap(curv_ave)[:, :3] * (1. - colors[:, [3]])
colors[:, 3] = 1.
polyc.set_facecolor(colors)
if params['time_label'] is not None:
ax.set_title(params['time_label'](times[time_idx] * scaler,),
color='w')
_set_aspect_equal(ax)
ax.axis('off')
ax.set(xlim=[-80, 80], ylim=(-80, 80), zlim=[-80, 80])
ax.figure.canvas.draw()
def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto', smoothing_steps=10,
subjects_dir=None, views='lat', clim='auto', figure=None,
initial_time=None, time_unit='s', background='black',
spacing='oct6', time_viewer=False, colorbar=True,
transparent=True):
"""Plot source estimate using mpl."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.widgets import Slider
import nibabel as nib
from scipy import stats
from ..morph import _get_subject_sphere_tris
if hemi not in ['lh', 'rh']:
raise ValueError("hemi must be 'lh' or 'rh' when using matplotlib. "
"Got %s." % hemi)
lh_kwargs = {'lat': {'elev': 0, 'azim': 180},
'med': {'elev': 0, 'azim': 0},
'ros': {'elev': 0, 'azim': 90},
'cau': {'elev': 0, 'azim': -90},
'dor': {'elev': 90, 'azim': -90},
'ven': {'elev': -90, 'azim': -90},
'fro': {'elev': 0, 'azim': 106.739},
'par': {'elev': 30, 'azim': -120}}
rh_kwargs = {'lat': {'elev': 0, 'azim': 0},
'med': {'elev': 0, 'azim': 180},
'ros': {'elev': 0, 'azim': 90},
'cau': {'elev': 0, 'azim': -90},
'dor': {'elev': 90, 'azim': -90},
'ven': {'elev': -90, 'azim': -90},
'fro': {'elev': 16.739, 'azim': 60},
'par': {'elev': 30, 'azim': -60}}
time_viewer = False if time_viewer == 'auto' else time_viewer
kwargs = dict(lh=lh_kwargs, rh=rh_kwargs)
views = 'lat' if views == 'auto' else views
_check_option('views', views, sorted(lh_kwargs.keys()))
mapdata = _process_clim(clim, colormap, transparent, stc.data)
_separate_map(mapdata)
colormap, scale_pts = _linearize_map(mapdata)
del transparent, mapdata
time_label, times = _handle_time(time_label, time_unit, stc.times)
fig = plt.figure(figsize=(6, 6)) if figure is None else figure
try:
ax = Axes3D(fig, auto_add_to_figure=False)
except Exception: # old mpl
ax = Axes3D(fig)
else:
fig.add_axes(ax)
hemi_idx = 0 if hemi == 'lh' else 1
surf = op.join(subjects_dir, subject, 'surf', '%s.%s' % (hemi, surface))
if spacing == 'all':
coords, faces = nib.freesurfer.read_geometry(surf)
inuse = slice(None)
else:
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
surf = _create_surf_spacing(surf, hemi, subject, stype, ico_surf,
subjects_dir)
inuse = surf['vertno']
faces = surf['use_tris']
coords = surf['rr'][inuse]
shape = faces.shape
faces = stats.rankdata(faces, 'dense').reshape(shape) - 1
faces = np.round(faces).astype(int) # should really be int-like anyway
del surf
vertices = stc.vertices[hemi_idx]
n_verts = len(vertices)
tris = _get_subject_sphere_tris(subject, subjects_dir)[hemi_idx]
cmap = cm.get_cmap(colormap)
greymap = cm.get_cmap('Greys')
curv = nib.freesurfer.read_morph_data(
op.join(subjects_dir, subject, 'surf', '%s.curv' % hemi))[inuse]
curv = np.clip(np.array(curv > 0, np.int64), 0.33, 0.66)
params = dict(ax=ax, stc=stc, coords=coords, faces=faces,
hemi_idx=hemi_idx, vertices=vertices, tris=tris,
smoothing_steps=smoothing_steps, n_verts=n_verts,
inuse=inuse, cmap=cmap, curv=curv,
scale_pts=scale_pts, greymap=greymap, time_label=time_label,
time_unit=time_unit)
_smooth_plot(initial_time, params)
ax.view_init(**kwargs[hemi][views])
try:
ax.set_facecolor(background)
except AttributeError:
ax.set_axis_bgcolor(background)
if time_viewer:
time_viewer = figure_nobar(figsize=(4.5, .25))
fig.time_viewer = time_viewer
ax_time = plt.axes()
if initial_time is None:
initial_time = 0
slider = Slider(ax=ax_time, label='Time', valmin=times[0],
valmax=times[-1], valinit=initial_time)
time_viewer.slider = slider
callback_slider = partial(_smooth_plot, params=params)
slider.on_changed(callback_slider)
callback_key = partial(_key_pressed_slider, params=params)
time_viewer.canvas.mpl_connect('key_press_event', callback_key)
time_viewer.subplots_adjust(left=0.12, bottom=0.05, right=0.75,
top=0.95)
fig.subplots_adjust(left=0., bottom=0., right=1., top=1.)
# add colorbar
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
sm = plt.cm.ScalarMappable(cmap=cmap,
norm=plt.Normalize(scale_pts[0], scale_pts[2]))
cax = inset_axes(ax, width="80%", height="5%", loc=8, borderpad=3.)
plt.setp(plt.getp(cax, 'xticklabels'), color='w')
sm.set_array(np.linspace(scale_pts[0], scale_pts[2], 256))
if colorbar:
cb = plt.colorbar(sm, cax=cax, orientation='horizontal')
cb_yticks = plt.getp(cax, 'yticklabels')
plt.setp(cb_yticks, color='w')
cax.tick_params(labelsize=16)
cb.patch.set_facecolor('0.5')
cax.set(xlim=(scale_pts[0], scale_pts[2]))
plt_show(True)
return fig
def link_brains(brains, time=True, camera=False, colorbar=True,
picking=False):
"""Plot multiple SourceEstimate objects with PyVista.
Parameters
----------
brains : list, tuple or np.ndarray
The collection of brains to plot.
time : bool
If True, link the time controllers. Defaults to True.
camera : bool
If True, link the camera controls. Defaults to False.
colorbar : bool
If True, link the colorbar controllers. Defaults to True.
picking : bool
If True, link the vertices picked with the mouse. Defaults to False.
"""
from .backends.renderer import _get_3d_backend
if _get_3d_backend() != 'pyvista':
raise NotImplementedError("Expected 3d backend is pyvista but"
" {} was given.".format(_get_3d_backend()))
from ._brain import Brain, _LinkViewer
if not isinstance(brains, Iterable):
brains = [brains]
if len(brains) == 0:
raise ValueError("The collection of brains is empty.")
for brain in brains:
if not isinstance(brain, Brain):
raise TypeError("Expected type is Brain but"
" {} was given.".format(type(brain)))
# enable time viewer if necessary
brain.setup_time_viewer()
subjects = [brain._subject_id for brain in brains]
if subjects.count(subjects[0]) != len(subjects):
raise RuntimeError("Cannot link brains from different subjects.")
# link brains properties
_LinkViewer(
brains=brains,
time=time,
camera=camera,
colorbar=colorbar,
picking=picking,
)
def _check_volume(stc, src, surface, backend_name):
from ..source_estimate import (
_BaseSurfaceSourceEstimate, _BaseMixedSourceEstimate)
if isinstance(stc, _BaseSurfaceSourceEstimate):
return False
else:
if backend_name == 'mayavi':
raise RuntimeError(
'Must use the PyVista 3D backend to plot a mixed or volume '
'source estimate')
_validate_type(src, SourceSpaces, 'src',
'src when stc is a mixed or volume source estimate')
if isinstance(stc, _BaseMixedSourceEstimate):
# When showing subvolumes, surfaces that preserve geometry must
# be used (i.e., no inflated)
_check_option(
'surface', surface, ('white', 'pial'),
extra='when plotting a mixed source estimate')
return True
@verbose
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto',
smoothing_steps=10, transparent=True, alpha=1.0,
time_viewer='auto', subjects_dir=None, figure=None,
views='auto', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None,
time_unit='s', backend='auto', spacing='oct6',
title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, brain_kwargs=None,
verbose=None):
"""Plot SourceEstimate.
Parameters
----------
stc : SourceEstimate
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str
Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case
of 'both', both hemispheres are shown in the same window.
In the case of 'split' hemispheres are displayed side-by-side
in different viewing panes.
%(colormap)s
The default ('auto') uses 'hot' for one-sided data and
'mne' for two-sided data.
%(time_label)s
smoothing_steps : int
The amount of smoothing.
%(transparent)s
alpha : float
Alpha value to apply globally to the overlay. Has no effect with mpl
backend.
time_viewer : bool | str
Display time viewer GUI. Can also be 'auto', which will mean True
for the PyVista backend and False otherwise.
.. versionchanged:: 0.20.0
"auto" mode added.
%(subjects_dir)s
figure : instance of mayavi.core.api.Scene | instance of matplotlib.figure.Figure | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id. If an
instance of matplotlib figure, mpl backend is used for plotting.
%(views)s
When plotting a standard SourceEstimate (not volume, mixed, or vector)
and using the PyVista backend, ``views='flat'`` is also supported to
plot cortex as a flatmap.
.. versionchanged:: 0.21.0
Support for flatmaps.
colorbar : bool
If True, display colorbar on scene.
%(clim)s
cortex : str or tuple
Specifies how binarized curvature values are rendered.
Either the name of a preset PySurfer cortex colorscheme (one of
'classic', 'bone', 'low_contrast', or 'high_contrast'), or the name of
mayavi colormap, or a tuple with values (colormap, min, max, reverse)
to fully specify the curvature colors. Has no effect with mpl backend.
size : float or tuple of float
The size of the window, in pixels. can be one number to specify
a square window, or the (width, height) of a rectangular window.
Has no effect with mpl backend.
background : matplotlib color
Color of the background of the display window.
foreground : matplotlib color | None
Color of the foreground of the display window. Has no effect with mpl
backend. None will choose white or black based on the background color.
initial_time : float | None
The time to display on the plot initially. ``None`` to display the
first time sample (default).
time_unit : 's' | 'ms'
Whether time is represented in seconds ("s", default) or
milliseconds ("ms").
backend : 'auto' | 'mayavi' | 'pyvista' | 'matplotlib'
Which backend to use. If ``'auto'`` (default), tries to plot with
pyvista, but resorts to matplotlib if no 3d backend is available.
.. versionadded:: 0.15.0
spacing : str
The spacing to use for the source space. Can be ``'ico#'`` for a
recursively subdivided icosahedron, ``'oct#'`` for a recursively
subdivided octahedron, or ``'all'`` for all points. In general, you can
speed up the plotting by selecting a sparser source space. Has no
effect with mayavi backend. Defaults to 'oct6'.
.. versionadded:: 0.15.0
title : str | None
Title for the figure. If None, the subject name will be used.
.. versionadded:: 0.17.0
%(show_traces)s
%(src_volume_options)s
%(view_layout)s
%(add_data_kwargs)s
%(brain_kwargs)s
%(verbose)s
Returns
-------
figure : instance of mne.viz.Brain | matplotlib.figure.Figure
An instance of :class:`mne.viz.Brain` or matplotlib figure.
Notes
-----
Flatmaps are available by default for ``fsaverage`` but not for other
subjects reconstructed by FreeSurfer. We recommend using
:func:`mne.compute_source_morph` to morph source estimates to ``fsaverage``
for flatmap plotting. If you want to construct your own flatmap for a given
subject, these links might help:
- https://surfer.nmr.mgh.harvard.edu/fswiki/FreeSurferOccipitalFlattenedPatch
- https://openwetware.org/wiki/Beauchamp:FreeSurfer
""" # noqa: E501
from .backends.renderer import _get_3d_backend, use_3d_backend
from ..source_estimate import _BaseSourceEstimate, _check_stc_src
_check_stc_src(stc, src)
_validate_type(stc, _BaseSourceEstimate, 'stc', 'source estimate')
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
subject = _check_subject(stc.subject, subject)
_check_option('backend', backend,
['auto', 'matplotlib', 'mayavi', 'pyvista', 'notebook'])
plot_mpl = backend == 'matplotlib'
if not plot_mpl:
if backend == 'auto':
try:
backend = _get_3d_backend()
except (ImportError, ModuleNotFoundError):
warn('No 3D backend found. Resorting to matplotlib 3d.')
plot_mpl = True
kwargs = dict(
subject=subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
subjects_dir=subjects_dir, views=views, clim=clim,
figure=figure, initial_time=initial_time, time_unit=time_unit,
background=background, time_viewer=time_viewer, colorbar=colorbar,
transparent=transparent)
if plot_mpl:
return _plot_mpl_stc(stc, spacing=spacing, **kwargs)
else:
with use_3d_backend(backend):
return _plot_stc(
stc, overlay_alpha=alpha, brain_alpha=alpha,
vector_alpha=alpha, cortex=cortex, foreground=foreground,
size=size, scale_factor=None, show_traces=show_traces,
src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
brain_kwargs=brain_kwargs, **kwargs)
def _plot_stc(stc, subject, surface, hemi, colormap, time_label,
smoothing_steps, subjects_dir, views, clim, figure, initial_time,
time_unit, background, time_viewer, colorbar, transparent,
brain_alpha, overlay_alpha, vector_alpha, cortex, foreground,
size, scale_factor, show_traces, src, volume_options,
view_layout, add_data_kwargs, brain_kwargs):
from .backends.renderer import _get_3d_backend, get_brain_class
from ..source_estimate import _BaseVolSourceEstimate
vec = stc._data_ndim == 3
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
subject = _check_subject(stc.subject, subject)
backend = _get_3d_backend()
del _get_3d_backend
using_mayavi = backend == "mayavi"
Brain = get_brain_class()
views = _check_views(surface, views, hemi, stc, backend)
_check_option('hemi', hemi, ['lh', 'rh', 'split', 'both'])
_check_option('view_layout', view_layout, ('vertical', 'horizontal'))
time_label, times = _handle_time(time_label, time_unit, stc.times)
show_traces, time_viewer = _check_st_tv(
show_traces, time_viewer, using_mayavi, times)
# convert control points to locations in colormap
use = stc.magnitude().data if vec else stc.data
mapdata = _process_clim(clim, colormap, transparent, use,
allow_pos_lims=not vec)
volume = _check_volume(stc, src, surface, backend)
# XXX we should only need to do this for PySurfer/Mayavi, the PyVista
# plotter should be smart enough to do this separation in the cmap-to-ctab
# conversion. But this will need to be another refactoring that will
# hopefully restore this line:
#
# if using_mayavi:
_separate_map(mapdata)
colormap = mapdata['colormap']
diverging = 'pos_lims' in mapdata['clim']
scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']
transparent = mapdata['transparent']
del mapdata
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
if overlay_alpha is None:
overlay_alpha = brain_alpha
if overlay_alpha == 0:
smoothing_steps = 1 # Disable smoothing to save time.
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
kwargs = {
"subject_id": subject, "hemi": hemi, "surf": surface,
"title": title, "cortex": cortex, "size": size,
"background": background, "foreground": foreground,
"figure": figure, "subjects_dir": subjects_dir,
"views": views, "alpha": brain_alpha,
}
if brain_kwargs is not None:
kwargs.update(brain_kwargs)
if backend in ['pyvista', 'notebook']:
kwargs["show"] = False
kwargs["view_layout"] = view_layout
else:
kwargs.update(_check_pysurfer_antialias(Brain))
if view_layout != 'vertical':
raise ValueError('view_layout must be "vertical" when using the '
'mayavi backend')
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(**kwargs)
del kwargs
if using_mayavi:
# Here we patch to avoid segfault:
# https://github.com/mne-tools/mne-python/pull/8828
brain.close = lambda *args, **kwargs: brain._close(False)
if scale_factor is None:
# Configure the glyphs scale directly
width = np.mean([np.ptp(brain.geo[hemi].coords[:, 1])
for hemi in hemis if hemi in brain.geo])
scale_factor = 0.025 * width / scale_pts[-1]
if transparent is None:
transparent = True
center = 0. if diverging else None
sd_kwargs = dict(transparent=transparent, center=center, verbose=False)
kwargs = {
"array": stc,
"colormap": colormap,
"smoothing_steps": smoothing_steps,
"time": times, "time_label": time_label,
"alpha": overlay_alpha,
"colorbar": colorbar,
"vector_alpha": vector_alpha,
"scale_factor": scale_factor,
"verbose": False,
"initial_time": initial_time,
"transparent": transparent,
"center": center,
"fmin": scale_pts[0],
"fmid": scale_pts[1],
"fmax": scale_pts[2],
"clim": clim,
"src": src,
"volume_options": volume_options,
"verbose": False,
}
if add_data_kwargs is not None:
kwargs.update(add_data_kwargs)
for hemi in hemis:
if isinstance(stc, _BaseVolSourceEstimate): # no surf data
break
vertices = stc.vertices[0 if hemi == 'lh' else 1]
if len(vertices) == 0: # no surf data for the given hemi
continue # no data
use_kwargs = kwargs.copy()
use_kwargs.update(hemi=hemi)
if using_mayavi:
del use_kwargs['clim'], use_kwargs['src']
del use_kwargs['volume_options']
use_kwargs.update(
min=use_kwargs.pop('fmin'), mid=use_kwargs.pop('fmid'),
max=use_kwargs.pop('fmax'), array=getattr(stc, hemi + '_data'),
vertices=vertices)
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(**use_kwargs)
if using_mayavi:
brain.scale_data_colormap(fmin=scale_pts[0], fmid=scale_pts[1],
fmax=scale_pts[2], **sd_kwargs)
if volume:
use_kwargs = kwargs.copy()
use_kwargs.update(hemi='vol')
brain.add_data(**use_kwargs)
del kwargs
need_peeling = (brain_alpha < 1.0 and
sys.platform != 'darwin' and
vec)
if using_mayavi:
for hemi in hemis:
for b in brain._brain_list:
for layer in b['brain'].data.values():
glyphs = layer['glyphs']
if glyphs is None:
continue
glyphs.glyph.glyph.scale_factor = scale_factor
glyphs.glyph.glyph.clamping = False
glyphs.glyph.glyph.range = (0., 1.)
# depth peeling patch
if need_peeling:
for ff in brain._figures:
for f in ff:
if f.scene is not None and sys.platform != 'darwin':
f.scene.renderer.use_depth_peeling = True
elif need_peeling:
brain.enable_depth_peeling()
if time_viewer:
if using_mayavi:
from surfer import TimeViewer
TimeViewer(brain)
else: # PyVista
brain.setup_time_viewer(time_viewer=time_viewer,
show_traces=show_traces)
else:
if not using_mayavi:
brain.show()
return brain
def _check_st_tv(show_traces, time_viewer, using_mayavi, times):
# time_viewer and show_traces
_check_option('time_viewer', time_viewer, (True, False, 'auto'))
_validate_type(show_traces, (str, bool, 'numeric'), 'show_traces')
if isinstance(show_traces, str):
_check_option('show_traces', show_traces,
('auto', 'separate', 'vertex', 'label'),
extra='when a string')
if time_viewer == 'auto':
time_viewer = not using_mayavi
if show_traces == 'auto':
show_traces = (
not using_mayavi and
time_viewer and
times is not None and
len(times) > 1
)
if show_traces and not time_viewer:
raise ValueError('show_traces cannot be used when time_viewer=False')
if using_mayavi and show_traces:
raise NotImplementedError("show_traces=True is not available "
"for the mayavi 3d backend.")
return show_traces, time_viewer
def _glass_brain_crosshairs(params, x, y, z):
for ax, a, b in ((params['ax_y'], x, z),
(params['ax_x'], y, z),
(params['ax_z'], x, y)):
ax.axvline(a, color='0.75')
ax.axhline(b, color='0.75')
def _cut_coords_to_ijk(cut_coords, img):
ijk = apply_trans(np.linalg.inv(img.affine), cut_coords)
ijk = np.clip(np.round(ijk).astype(int), 0, np.array(img.shape[:3]) - 1)
return ijk
def _ijk_to_cut_coords(ijk, img):
return apply_trans(img.affine, ijk)
def _load_subject_mri(mri, stc, subject, subjects_dir, name):
import nibabel as nib
from nibabel.spatialimages import SpatialImage
_validate_type(mri, ('path-like', SpatialImage), name)
if isinstance(mri, str):
subject = _check_subject(stc.subject, subject)
mri = nib.load(_check_mri(mri, subject, subjects_dir))
return mri
@verbose
def plot_volume_source_estimates(stc, src, subject=None, subjects_dir=None,
mode='stat_map', bg_img='T1.mgz',
colorbar=True, colormap='auto', clim='auto',
transparent=None, show=True,
initial_time=None, initial_pos=None,
verbose=None):
"""Plot Nutmeg style volumetric source estimates using nilearn.
Parameters
----------
stc : VectorSourceEstimate
The vector source estimate to plot.
src : instance of SourceSpaces | instance of SourceMorph
The source space. Can also be a SourceMorph to morph the STC to
a new subject (see Examples).
.. versionchanged:: 0.18
Support for :class:`~nibabel.spatialimages.SpatialImage`.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
%(subjects_dir)s
mode : str
The plotting mode to use. Either 'stat_map' (default) or 'glass_brain'.
For "glass_brain", activation absolute values are displayed
after being transformed to a standard MNI brain.
bg_img : instance of SpatialImage | str
The background image used in the nilearn plotting function.
Can also be a string to use the ``bg_img`` file in the subject's
MRI directory (default is ``'T1.mgz'``).
Not used in "glass brain" plotting.
colorbar : bool, optional
If True, display a colorbar on the right of the plots.
%(colormap)s
%(clim)s
%(transparent)s
show : bool
Show figures if True. Defaults to True.
initial_time : float | None
The initial time to plot. Can be None (default) to use the time point
with the maximal absolute value activation across all voxels
or the ``initial_pos`` voxel (if ``initial_pos is None`` or not,
respectively).
.. versionadded:: 0.19
initial_pos : ndarray, shape (3,) | None
The initial position to use (in m). Can be None (default) to use the
voxel with the maximum absolute value activation across all time points
or at ``initial_time`` (if ``initial_time is None`` or not,
respectively).
.. versionadded:: 0.19
%(verbose)s
Returns
-------
fig : instance of Figure
The figure.
Notes
-----
Click on any of the anatomical slices to explore the time series.
Clicking on any time point will bring up the corresponding anatomical map.
The left and right arrow keys can be used to navigate in time.
To move in time by larger steps, use shift+left and shift+right.
In ``'glass_brain'`` mode, values are transformed to the standard MNI
brain using the FreeSurfer Talairach transformation
``$SUBJECTS_DIR/$SUBJECT/mri/transforms/talairach.xfm``.
.. versionadded:: 0.17
.. versionchanged:: 0.19
MRI volumes are automatically transformed to MNI space in
``'glass_brain'`` mode.
Examples
--------
Passing a :class:`mne.SourceMorph` as the ``src``
parameter can be useful for plotting in a different subject's space
(here, a ``'sample'`` STC in ``'fsaverage'``'s space)::
>>> morph = mne.compute_source_morph(src_sample, subject_to='fsaverage') # doctest: +SKIP
>>> fig = stc_vol_sample.plot(morph) # doctest: +SKIP
""" # noqa: E501
from matplotlib import pyplot as plt, colors
from matplotlib.cbook import mplDeprecation
import nibabel as nib
from ..source_estimate import VolSourceEstimate
from ..morph import SourceMorph
if not check_version('nilearn', '0.4'):
raise RuntimeError('This function requires nilearn >= 0.4')
from nilearn.plotting import plot_stat_map, plot_glass_brain
from nilearn.image import index_img
_check_option('mode', mode, ('stat_map', 'glass_brain'))
plot_func = dict(stat_map=plot_stat_map,
glass_brain=plot_glass_brain)[mode]
_validate_type(stc, VolSourceEstimate, 'stc')
if isinstance(src, SourceMorph):
img = src.apply(stc, 'nifti1', mri_resolution=False, mri_space=False)
stc = src.apply(stc, mri_resolution=False, mri_space=False)
kind, src_subject = 'morph.subject_to', src.subject_to
else:
src = _ensure_src(src, kind='volume', extra=' or SourceMorph')
img = stc.as_volume(src, mri_resolution=False)
kind, src_subject = 'src subject', src._subject
del src
_print_coord_trans(Transform('mri_voxel', 'ras', img.affine),
prefix='Image affine ', units='mm', level='debug')
subject = _check_subject(src_subject, subject, first_kind=kind)
stc_ijk = np.array(
np.unravel_index(stc.vertices[0], img.shape[:3], order='F')).T
assert stc_ijk.shape == (len(stc.vertices[0]), 3)
del kind
# XXX this assumes zooms are uniform, should probably mult by zooms...
dist_to_verts = _DistanceQuery(stc_ijk, allow_kdtree=True)
def _cut_coords_to_idx(cut_coords, img):
"""Convert voxel coordinates to index in stc.data."""
ijk = _cut_coords_to_ijk(cut_coords, img)
del cut_coords
logger.debug(' Affine remapped cut coords to [%d, %d, %d] idx'
% tuple(ijk))
dist, loc_idx = dist_to_verts.query(ijk[np.newaxis])
dist, loc_idx = dist[0], loc_idx[0]
logger.debug(' Using vertex %d at a distance of %d voxels'
% (stc.vertices[0][loc_idx], dist))
return loc_idx
ax_name = dict(x='X (saggital)', y='Y (coronal)', z='Z (axial)')
def _click_to_cut_coords(event, params):
"""Get voxel coordinates from mouse click."""
if event.inaxes is params['ax_x']:
ax = 'x'
x = params['ax_z'].lines[0].get_xdata()[0]
y, z = event.xdata, event.ydata
elif event.inaxes is params['ax_y']:
ax = 'y'
y = params['ax_x'].lines[0].get_xdata()[0]
x, z = event.xdata, event.ydata
elif event.inaxes is params['ax_z']:
ax = 'z'
x, y = event.xdata, event.ydata
z = params['ax_x'].lines[1].get_ydata()[0]
else:
logger.debug(' Click outside axes')
return None
cut_coords = np.array((x, y, z))
logger.debug('')
if params['mode'] == 'glass_brain': # find idx for MIP
# Figure out what XYZ in world coordinates is in our voxel data
codes = ''.join(nib.aff2axcodes(params['img_idx'].affine))
assert len(codes) == 3
# We don't care about directionality, just which is which dim
codes = codes.replace('L', 'R').replace('P', 'A').replace('I', 'S')
idx = codes.index(dict(x='R', y='A', z='S')[ax])
img_data = np.abs(_get_img_fdata(params['img_idx']))
ijk = _cut_coords_to_ijk(cut_coords, params['img_idx'])
if idx == 0:
ijk[0] = np.argmax(img_data[:, ijk[1], ijk[2]])
logger.debug(' MIP: i = %d idx' % (ijk[0],))
elif idx == 1:
ijk[1] = np.argmax(img_data[ijk[0], :, ijk[2]])
logger.debug(' MIP: j = %d idx' % (ijk[1],))
else:
ijk[2] = np.argmax(img_data[ijk[0], ijk[1], :])
logger.debug(' MIP: k = %d idx' % (ijk[2],))
cut_coords = _ijk_to_cut_coords(ijk, params['img_idx'])
logger.debug(' Cut coords for %s: (%0.1f, %0.1f, %0.1f) mm'
% ((ax_name[ax],) + tuple(cut_coords)))
return cut_coords
def _press(event, params):
"""Manage keypress on the plot."""
pos = params['lx'].get_xdata()
idx = params['stc'].time_as_index(pos)[0]
if event.key == 'left':
idx = max(0, idx - 2)
elif event.key == 'shift+left':
idx = max(0, idx - 10)
elif event.key == 'right':
idx = min(params['stc'].shape[1] - 1, idx + 2)
elif event.key == 'shift+right':
idx = min(params['stc'].shape[1] - 1, idx + 10)
_update_timeslice(idx, params)
params['fig'].canvas.draw()
def _update_timeslice(idx, params):
params['lx'].set_xdata(idx / params['stc'].sfreq +
params['stc'].tmin)
ax_x, ax_y, ax_z = params['ax_x'], params['ax_y'], params['ax_z']
plot_map_callback = params['plot_func']
# Crosshairs are the first thing plotted in stat_map, and the last
# in glass_brain
idxs = [0, 0, 1] if mode == 'stat_map' else [-2, -2, -1]
cut_coords = (
ax_y.lines[idxs[0]].get_xdata()[0],
ax_x.lines[idxs[1]].get_xdata()[0],
ax_x.lines[idxs[2]].get_ydata()[0])
ax_x.clear()
ax_y.clear()
ax_z.clear()
params.update({'img_idx': index_img(img, idx)})
params.update({'title': 'Activation (t=%.3f s.)'
% params['stc'].times[idx]})
plot_map_callback(
params['img_idx'], title='', cut_coords=cut_coords)
@verbose_dec
def _onclick(event, params, verbose=None):
"""Manage clicks on the plot."""
ax_x, ax_y, ax_z = params['ax_x'], params['ax_y'], params['ax_z']
plot_map_callback = params['plot_func']
if event.inaxes is params['ax_time']:
idx = params['stc'].time_as_index(
event.xdata, use_rounding=True)[0]
_update_timeslice(idx, params)
cut_coords = _click_to_cut_coords(event, params)
if cut_coords is None:
return # not in any axes
ax_x.clear()
ax_y.clear()
ax_z.clear()
plot_map_callback(params['img_idx'], title='',
cut_coords=cut_coords)
loc_idx = _cut_coords_to_idx(cut_coords, params['img_idx'])
ydata = stc.data[loc_idx]
if loc_idx is not None:
ax_time.lines[0].set_ydata(ydata)
else:
ax_time.lines[0].set_ydata(0.)
params['fig'].canvas.draw()
if mode == 'glass_brain':
subject = _check_subject(stc.subject, subject)
ras_mni_t = read_ras_mni_t(subject, subjects_dir)
if not np.allclose(ras_mni_t['trans'], np.eye(4)):
_print_coord_trans(
ras_mni_t, prefix='Transforming subject ', units='mm')
logger.info('')
# To get from voxel coords to world coords (i.e., define affine)
# we would apply img.affine, then also apply ras_mni_t, which
# transforms from the subject's RAS to MNI RAS. So we left-multiply
# these.
img = nib.Nifti1Image(
img.dataobj, np.dot(ras_mni_t['trans'], img.affine))
bg_img = None # not used
else: # stat_map
if bg_img is None:
bg_img = 'T1.mgz'
bg_img = _load_subject_mri(
bg_img, stc, subject, subjects_dir, 'bg_img')
if initial_time is None:
time_sl = slice(0, None)
else:
initial_time = float(initial_time)
logger.info('Fixing initial time: %s sec' % (initial_time,))
initial_time = np.argmin(np.abs(stc.times - initial_time))
time_sl = slice(initial_time, initial_time + 1)
if initial_pos is None: # find max pos and (maybe) time
loc_idx, time_idx = np.unravel_index(
np.abs(stc.data[:, time_sl]).argmax(), stc.data[:, time_sl].shape)
time_idx += time_sl.start
else: # position specified
initial_pos = np.array(initial_pos, float)
if initial_pos.shape != (3,):
raise ValueError('initial_pos must be float ndarray with shape '
'(3,), got shape %s' % (initial_pos.shape,))
initial_pos *= 1000
logger.info('Fixing initial position: %s mm'
% (initial_pos.tolist(),))
loc_idx = _cut_coords_to_idx(initial_pos, img)
if initial_time is not None: # time also specified
time_idx = time_sl.start
else: # find the max
time_idx = np.argmax(np.abs(stc.data[loc_idx]))
img_idx = index_img(img, time_idx)
assert img_idx.shape == img.shape[:3]
del initial_time, initial_pos
ijk = stc_ijk[loc_idx]
cut_coords = _ijk_to_cut_coords(ijk, img_idx)
np.testing.assert_allclose(_cut_coords_to_ijk(cut_coords, img_idx), ijk)
logger.info('Showing: t = %0.3f s, (%0.1f, %0.1f, %0.1f) mm, '
'[%d, %d, %d] vox, %d vertex'
% ((stc.times[time_idx],) + tuple(cut_coords) + tuple(ijk) +
(stc.vertices[0][loc_idx],)))
del ijk
# Plot initial figure
fig, (axes, ax_time) = plt.subplots(2)
axes.set(xticks=[], yticks=[])
marker = 'o' if len(stc.times) == 1 else None
ydata = stc.data[loc_idx]
ax_time.plot(stc.times, ydata, color='k', marker=marker)
if len(stc.times) > 1:
ax_time.set(xlim=stc.times[[0, -1]])
ax_time.set(xlabel='Time (s)', ylabel='Activation')
lx = ax_time.axvline(stc.times[time_idx], color='g')
fig.tight_layout()
allow_pos_lims = (mode != 'glass_brain')
mapdata = _process_clim(clim, colormap, transparent, stc.data,
allow_pos_lims)
_separate_map(mapdata)
diverging = 'pos_lims' in mapdata['clim']
ticks = _get_map_ticks(mapdata)
colormap, scale_pts = _linearize_map(mapdata)
del mapdata
ylim = [min((scale_pts[0], ydata.min())),
max((scale_pts[-1], ydata.max()))]
ylim = np.array(ylim) + np.array([-1, 1]) * 0.05 * np.diff(ylim)[0]
dup_neg = False
if stc.data.min() < 0:
ax_time.axhline(0., color='0.5', ls='-', lw=0.5, zorder=2)
dup_neg = not diverging # glass brain with signed data
yticks = list(ticks)
if dup_neg:
yticks += [0] + list(-np.array(ticks))
yticks = np.unique(yticks)
ax_time.set(yticks=yticks)
ax_time.set(ylim=ylim)
del yticks
if not diverging: # set eq above iff one-sided
# there is a bug in nilearn where this messes w/transparency
# Need to double the colormap
if (scale_pts < 0).any():
# XXX We should fix this, but it's hard to get nilearn to
# use arbitrary bounds :(
# Should get them to support non-mirrored colorbars, or
# at least a proper `vmin` for one-sided things.
# Hopefully this is a sufficiently rare use case!
raise ValueError('Negative colormap limits for sequential '
'control points clim["lims"] not supported '
'currently, consider shifting or flipping the '
'sign of your data for visualization purposes')
# due to nilearn plotting weirdness, extend this to go
# -scale_pts[2]->scale_pts[2] instead of scale_pts[0]->scale_pts[2]
colormap = plt.get_cmap(colormap)
colormap = colormap(
np.interp(np.linspace(-1, 1, 256),
scale_pts / scale_pts[2],
[0, 0.5, 1]))
colormap = colors.ListedColormap(colormap)
vmax = scale_pts[-1]
# black_bg = True is needed because of some matplotlib
# peculiarity. See: https://stackoverflow.com/a/34730204
# Otherwise, event.inaxes does not work for ax_x and ax_z
plot_kwargs = dict(
threshold=None, axes=axes,
resampling_interpolation='nearest', vmax=vmax, figure=fig,
colorbar=colorbar, bg_img=bg_img, cmap=colormap, black_bg=True,
symmetric_cbar=True)
def plot_and_correct(*args, **kwargs):
axes.clear()
if params.get('fig_anat') is not None and plot_kwargs['colorbar']:
params['fig_anat']._cbar.ax.clear()
with warnings.catch_warnings(record=True): # nilearn bug; ax recreated
warnings.simplefilter('ignore', mplDeprecation)
params['fig_anat'] = partial(
plot_func, **plot_kwargs)(*args, **kwargs)
params['fig_anat']._cbar.outline.set_visible(False)
for key in 'xyz':
params.update({'ax_' + key: params['fig_anat'].axes[key].ax})
# Fix nilearn bug w/cbar background being white
if plot_kwargs['colorbar']:
params['fig_anat']._cbar.patch.set_facecolor('0.5')
# adjust one-sided colorbars
if not diverging:
_crop_colorbar(params['fig_anat']._cbar, *scale_pts[[0, -1]])
params['fig_anat']._cbar.set_ticks(params['cbar_ticks'])
if mode == 'glass_brain':
_glass_brain_crosshairs(params, *kwargs['cut_coords'])
params = dict(stc=stc, ax_time=ax_time, plot_func=plot_and_correct,
img_idx=img_idx, fig=fig, lx=lx, mode=mode, cbar_ticks=ticks)
plot_and_correct(stat_map_img=params['img_idx'], title='',
cut_coords=cut_coords)
if show:
plt.show()
fig.canvas.mpl_connect('button_press_event',
partial(_onclick, params=params, verbose=verbose))
fig.canvas.mpl_connect('key_press_event',
partial(_press, params=params))
return fig
def _check_pysurfer_antialias(Brain):
antialias = _get_3d_option('antialias')
kwargs = dict()
if not antialias:
if 'antialias' not in _get_args(Brain):
raise ValueError('To turn off antialiasing, PySurfer needs to be '
'updated to version 0.11+')
kwargs['antialias'] = antialias
return kwargs
def _check_views(surf, views, hemi, stc=None, backend=None):
from ._brain.view import views_dicts
from ..source_estimate import SourceEstimate
_validate_type(views, (list, tuple, str), 'views')
views = [views] if isinstance(views, str) else list(views)
if surf == 'flat':
_check_option('views', views, (['auto'], ['flat']))
views = ['flat']
elif len(views) == 1 and views[0] == 'auto':
views = ['lateral']
if views == ['flat']:
if stc is not None:
_validate_type(stc, SourceEstimate, 'stc',
'SourceEstimate when a flatmap is used')
if backend is not None:
if backend not in ('pyvista', 'notebook'):
raise RuntimeError('The PyVista 3D backend must be used to '
'plot a flatmap')
if (views == ['flat']) ^ (surf == 'flat'): # exactly only one of the two
raise ValueError('surface="flat" must be used with views="flat", got '
f'surface={repr(surf)} and views={repr(views)}')
_check_option('hemi', hemi, ('split', 'both', 'lh', 'rh', 'vol'))
use_hemi = 'lh' if hemi == 'split' else hemi
for vi, v in enumerate(views):
_check_option(f'views[{vi}]', v, sorted(views_dicts[use_hemi]))
return views
@verbose
def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot',
time_label='auto', smoothing_steps=10,
transparent=None, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0,
scale_factor=None, time_viewer='auto',
subjects_dir=None, figure=None,
views='lateral',
colorbar=True, clim='auto', cortex='classic',
size=800, background='black',
foreground=None, initial_time=None,
time_unit='s', show_traces='auto',
src=None, volume_options=1.,
view_layout='vertical',
add_data_kwargs=None, brain_kwargs=None,
verbose=None):
"""Plot VectorSourceEstimate with PySurfer.
A "glass brain" is drawn and all dipoles defined in the source estimate
are shown using arrows, depicting the direction and magnitude of the
current moment at the dipole. Additionally, an overlay is plotted on top of
the cortex with the magnitude of the current.
Parameters
----------
stc : VectorSourceEstimate | MixedVectorSourceEstimate
The vector source estimate to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display.
%(colormap)s
This should be a sequential colormap.
%(time_label)s
smoothing_steps : int
The amount of smoothing.
%(transparent)s
brain_alpha : float
Alpha value to apply globally to the surface meshes. Defaults to 0.4.
overlay_alpha : float
Alpha value to apply globally to the overlay. Defaults to
``brain_alpha``.
vector_alpha : float
Alpha value to apply globally to the vector glyphs. Defaults to 1.
scale_factor : float | None
Scaling factor for the vector glyphs. By default, an attempt is made to
automatically determine a sane value.
time_viewer : bool | str
Display time viewer GUI. Can be "auto", which is True for the PyVista
backend and False otherwise.
.. versionchanged:: 0.20
Added "auto" option and default.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.api.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
%(views)s
colorbar : bool
If True, display colorbar on scene.
%(clim_onesided)s
cortex : str or tuple
Specifies how binarized curvature values are rendered.
either the name of a preset PySurfer cortex colorscheme (one of
'classic', 'bone', 'low_contrast', or 'high_contrast'), or the
name of mayavi colormap, or a tuple with values (colormap, min,
max, reverse) to fully specify the curvature colors.
size : float or tuple of float
The size of the window, in pixels. can be one number to specify
a square window, or the (width, height) of a rectangular window.
background : matplotlib color
Color of the background of the display window.
foreground : matplotlib color | None
Color of the foreground of the display window.
None will choose black or white based on the background color.
initial_time : float | None
The time to display on the plot initially. ``None`` to display the
first time sample (default).
time_unit : 's' | 'ms'
Whether time is represented in seconds ("s", default) or
milliseconds ("ms").
%(show_traces)s
%(src_volume_options)s
%(view_layout)s
%(add_data_kwargs)s
%(brain_kwargs)s
%(verbose)s
Returns
-------
brain : mne.viz.Brain
A instance of :class:`mne.viz.Brain`.
Notes
-----
.. versionadded:: 0.15
If the current magnitude overlay is not desired, set ``overlay_alpha=0``
and ``smoothing_steps=1``.
"""
from ..source_estimate import _BaseVectorSourceEstimate
_validate_type(
stc, _BaseVectorSourceEstimate, 'stc', 'vector source estimate')
return _plot_stc(
stc, subject=subject, surface='white', hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
subjects_dir=subjects_dir, views=views, clim=clim, figure=figure,
initial_time=initial_time, time_unit=time_unit, background=background,
time_viewer=time_viewer, colorbar=colorbar, transparent=transparent,
brain_alpha=brain_alpha, overlay_alpha=overlay_alpha,
vector_alpha=vector_alpha, cortex=cortex, foreground=foreground,
size=size, scale_factor=scale_factor, show_traces=show_traces,
src=src, volume_options=volume_options, view_layout=view_layout,
add_data_kwargs=add_data_kwargs, brain_kwargs=brain_kwargs)
@verbose
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=('cone', 'sphere'),
scale_factors=(1, 0.6),
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver.
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors.
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
high_resolution : bool
If True, plot on the original (non-downsampled) cortical mesh.
fig_name : str
Mayavi figure name.
fig_number : int
Matplotlib figure number.
labels : ndarray or list of ndarray
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
modes : list
Should be a list, with each entry being ``'cone'`` or ``'sphere'``
to specify how the dipoles should be shown.
The pivot for the glyphs in ``'cone'`` mode is always the tail
whereas the pivot in ``'sphere'`` mode is the center.
scale_factors : list
List of floating point scale factors for the markers.
%(verbose)s
**kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
Returns
-------
surface : instance of mayavi.mlab.pipeline.surface
The triangular mesh surface.
"""
import matplotlib.pyplot as plt
from matplotlib.colors import ColorConverter
# Update the backend
from .backends.renderer import _get_renderer
known_modes = ['cone', 'sphere']
if not isinstance(modes, (list, tuple)) or \
not all(mode in known_modes for mode in modes):
raise ValueError('mode must be a list containing only '
'"cone" or "sphere"')
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = _get_color_list()
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
color_converter = ColorConverter()
renderer = _get_renderer(bgcolor=bgcolor, size=(600, 600), name=fig_name)
surface = renderer.mesh(x=points[:, 0], y=points[:, 1],
z=points[:, 2], triangles=use_faces,
color=brain_color, opacity=opacity,
backface_culling=True, shading=True,
normals=normals, **kwargs)
# Show time courses
fig = plt.figure(fig_number)
fig.clf()
ax = fig.add_subplot(111)
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [next(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = next(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple)) and
len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
renderer.quiver3d(x=x, y=y, z=z, u=nx, v=ny, w=nz,
color=color_converter.to_rgb(c),
mode=mode, scale=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
ax.plot(1e3 * stcs[k].times, 1e9 * stcs[k].data[mask].ravel(),
c=c, linewidth=linewidth, linestyle=linestyle)
ax.set_xlabel('Time (ms)', fontsize=18)
ax.set_ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
ax.set_title(fig_name)
plt_show(show)
renderer.show()
return surface
@verbose
def plot_dipole_locations(dipoles, trans=None, subject=None, subjects_dir=None,
mode='orthoview', coord_frame='mri', idx='gof',
show_all=True, ax=None, block=False, show=True,
scale=5e-3, color=None, highlight_color='r',
fig=None, verbose=None, title=None):
"""Plot dipole locations.
If mode is set to 'arrow' or 'sphere', only the location of the first
time point of each dipole is shown else use the show_all parameter.
The option mode='orthoview' was added in version 0.14.
Parameters
----------
dipoles : list of instances of Dipole | Dipole
The dipoles to plot.
trans : dict | None
The mri to head trans.
Can be None with mode set to '3d'.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
Can be None with mode set to '3d'.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
mode : str
Can be ``'arrow'``, ``'sphere'`` or ``'orthoview'``.
.. versionadded:: 0.19.0
coord_frame : str
Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.
.. versionadded:: 0.14.0
idx : int | 'gof' | 'amplitude'
Index of the initially plotted dipole. Can also be 'gof' to plot the
dipole with highest goodness of fit value or 'amplitude' to plot the
dipole with the highest amplitude. The dipoles can also be browsed
through using up/down arrow keys or mouse scroll. Defaults to 'gof'.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show_all : bool
Whether to always plot all the dipoles. If ``True`` (default), the
active dipole is plotted as a red dot and its location determines the
shown MRI slices. The non-active dipoles are plotted as small blue
dots. If ``False``, only the active dipole is plotted.
Only used if ``mode='orthoview'``.
.. versionadded:: 0.14.0
ax : instance of matplotlib Axes3D | None
Axes to plot into. If None (default), axes will be created.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
block : bool
Whether to halt program execution until the figure is closed. Defaults
to False.
Only used if mode equals 'orthoview'.
.. versionadded:: 0.14.0
show : bool
Show figure if True. Defaults to True.
Only used if mode equals 'orthoview'.
scale : float
The scale of the dipoles if ``mode`` is 'arrow' or 'sphere'.
color : tuple
The color of the dipoles.
The default (None) will use ``'y'`` if mode is ``'orthoview'`` and
``show_all`` is True, else 'r'.
.. versionchanged:: 0.19.0
Color is now passed in orthoview mode.
highlight_color : color
The highlight color. Only used in orthoview mode with
``show_all=True``.
.. versionadded:: 0.19.0
fig : mayavi.mlab.Figure | None
3D Scene in which to plot the alignment.
If ``None``, creates a new 600x600 pixel figure with black background.
.. versionadded:: 0.19.0
%(verbose)s
%(dipole_locs_fig_title)s
.. versionadded:: 0.21.0
Returns
-------
fig : instance of mayavi.mlab.Figure or matplotlib.figure.Figure
The mayavi figure or matplotlib Figure.
Notes
-----
.. versionadded:: 0.9.0
"""
if mode == 'orthoview':
fig = _plot_dipole_mri_orthoview(
dipoles, trans=trans, subject=subject, subjects_dir=subjects_dir,
coord_frame=coord_frame, idx=idx, show_all=show_all,
ax=ax, block=block, show=show, color=color,
highlight_color=highlight_color, title=title)
elif mode in ['arrow', 'sphere']:
from .backends.renderer import _get_renderer
color = (1., 0., 0.) if color is None else color
renderer = _get_renderer(fig=fig, size=(600, 600))
pos = dipoles.pos
ori = dipoles.ori
if coord_frame != 'head':
trans = _get_trans(trans, fro='head', to=coord_frame)[0]
pos = apply_trans(trans, pos)
ori = apply_trans(trans, ori)
renderer.sphere(center=pos, color=color, scale=scale)
if mode == 'arrow':
x, y, z = pos.T
u, v, w = ori.T
renderer.quiver3d(x, y, z, u, v, w, scale=3 * scale,
color=color, mode='arrow')
renderer.show()
fig = renderer.scene()
else:
raise ValueError('Mode must be "cone", "arrow" or orthoview", '
'got %s.' % (mode,))
return fig
def snapshot_brain_montage(fig, montage, hide_sensors=True):
"""Take a snapshot of a Mayavi Scene and project channels onto 2d coords.
Note that this will take the raw values for 3d coordinates of each channel,
without applying any transforms. If brain images are flipped up/dn upon
using `~matplotlib.pyplot.imshow`, check your matplotlib backend as this
behavior changes.
Parameters
----------
fig : instance of ~mayavi.core.api.Scene
The figure on which you've plotted electrodes using
:func:`mne.viz.plot_alignment`.
montage : instance of DigMontage or Info | dict
The digital montage for the electrodes plotted in the scene. If
:class:`~mne.Info`, channel positions will be pulled from the ``loc``
field of ``chs``. dict should have ch:xyz mappings.
hide_sensors : bool
Whether to remove the spheres in the scene before taking a snapshot.
Returns
-------
xy : array, shape (n_channels, 2)
The 2d location of each channel on the image of the current scene view.
im : array, shape (m, n, 3)
The screenshot of the current scene view.
"""
from ..channels import DigMontage
from .. import Info
# Update the backend
from .backends.renderer import _get_renderer
if fig is None:
raise ValueError('The figure must have a scene')
if isinstance(montage, DigMontage):
chs = montage._get_ch_pos()
ch_names, xyz = zip(*[(ich, ixyz) for ich, ixyz in chs.items()])
elif isinstance(montage, Info):
xyz = [ich['loc'][:3] for ich in montage['chs']]
ch_names = [ich['ch_name'] for ich in montage['chs']]
elif isinstance(montage, dict):
if not all(len(ii) == 3 for ii in montage.values()):
raise ValueError('All electrode positions must be length 3')
ch_names, xyz = zip(*[(ich, ixyz) for ich, ixyz in montage.items()])
else:
raise TypeError('montage must be an instance of `DigMontage`, `Info`,'
' or `dict`')
# initialize figure
renderer = _get_renderer(fig, show=True)
xyz = np.vstack(xyz)
proj = renderer.project(xyz=xyz, ch_names=ch_names)
if hide_sensors is True:
proj.visible(False)
im = renderer.screenshot()
proj.visible(True)
return proj.xy, im
@fill_doc
def plot_sensors_connectivity(info, con, picks=None,
cbar_label='Connectivity'):
"""Visualize the sensor connectivity in 3D.
Parameters
----------
info : dict | None
The measurement info.
con : array, shape (n_channels, n_channels)
The computed connectivity measure(s).
%(picks_good_data)s
Indices of selected channels.
cbar_label : str
Label for the colorbar.
Returns
-------
fig : instance of mayavi.mlab.Figure
The mayavi figure.
"""
_validate_type(info, "info")
from .backends.renderer import _get_renderer
renderer = _get_renderer(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
picks = _picks_to_idx(info, picks)
if len(picks) != len(con):
raise ValueError('The number of channels picked (%s) does not '
'correspond to the size of the connectivity data '
'(%s)' % (len(picks), len(con)))
# Plot the sensor locations
sens_loc = [info['chs'][k]['loc'][:3] for k in picks]
sens_loc = np.array(sens_loc)
renderer.sphere(np.c_[sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2]],
color=(1, 1, 1), opacity=1, scale=0.005)
# Get the strongest connections
n_con = 20 # show up to 20 connections
min_dist = 0.05 # exclude sensors that are less than 5cm apart
threshold = np.sort(con, axis=None)[-n_con]
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if np.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
# Show the connections as tubes between sensors
vmax = np.max(con_val)
vmin = np.min(con_val)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
tube = renderer.tube(origin=np.c_[x1, y1, z1],
destination=np.c_[x2, y2, z2],
scalars=np.c_[val, val],
vmin=vmin, vmax=vmax,
reverse_lut=True)
renderer.scalarbar(source=tube, title=cbar_label)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
renderer.text3d(x, y, z, text=info['ch_names'][picks[node]],
scale=0.005,
color=(0, 0, 0))
renderer.set_camera(azimuth=-88.7, elevation=40.8,
distance=0.76,
focalpoint=np.array([-3.9e-4, -8.5e-3, -1e-2]))
renderer.show()
return renderer.scene()
def _plot_dipole_mri_orthoview(dipole, trans, subject, subjects_dir=None,
coord_frame='head', idx='gof', show_all=True,
ax=None, block=False, show=True, color=None,
highlight_color='r', title=None):
"""Plot dipoles on top of MRI slices in 3-D."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from .. import Dipole
if not has_nibabel():
raise ImportError('This function requires nibabel.')
_check_option('coord_frame', coord_frame, ['head', 'mri'])
if not isinstance(dipole, Dipole):
from ..dipole import _concatenate_dipoles
dipole = _concatenate_dipoles(dipole)
if idx == 'gof':
idx = np.argmax(dipole.gof)
elif idx == 'amplitude':
idx = np.argmax(np.abs(dipole.amplitude))
else:
idx = _ensure_int(idx, 'idx', 'an int or one of ["gof", "amplitude"]')
vox, ori, pos, data = _get_dipole_loc(
dipole, trans, subject, subjects_dir, coord_frame)
dims = len(data) # Symmetric size assumed.
dd = dims // 2
if ax is None:
fig, ax = plt.subplots(1, subplot_kw=dict(projection='3d'))
else:
_validate_type(ax, Axes3D, "ax", "Axes3D")
fig = ax.get_figure()
gridx, gridy = np.meshgrid(np.linspace(-dd, dd, dims),
np.linspace(-dd, dd, dims), indexing='ij')
params = {'ax': ax, 'data': data, 'idx': idx, 'dipole': dipole,
'vox': vox, 'gridx': gridx, 'gridy': gridy,
'ori': ori, 'coord_frame': coord_frame,
'show_all': show_all, 'pos': pos,
'color': color, 'highlight_color': highlight_color,
'title': title}
_plot_dipole(**params)
ax.view_init(elev=30, azim=-140)
callback_func = partial(_dipole_changed, params=params)
fig.canvas.mpl_connect('scroll_event', callback_func)
fig.canvas.mpl_connect('key_press_event', callback_func)
plt_show(show, block=block)
return fig
RAS_AFFINE = np.eye(4)
RAS_AFFINE[:3, 3] = [-128] * 3
RAS_SHAPE = (256, 256, 256)
def _get_dipole_loc(dipole, trans, subject, subjects_dir, coord_frame):
"""Get the dipole locations and orientations."""
import nibabel as nib
from nibabel.processing import resample_from_to
_check_option('coord_frame', coord_frame, ['head', 'mri'])
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
t1 = nib.load(t1_fname)
# Do everything in mm here to make life slightly easier
vox_ras_t, _, mri_ras_t, _, _ = _read_mri_info(
t1_fname, units='mm')
head_mri_t = _get_trans(trans, fro='head', to='mri')[0].copy()
head_mri_t['trans'][:3, 3] *= 1000 # m→mm
del trans
pos = dipole.pos * 1e3 # m→mm
ori = dipole.ori
# Figure out how to always resample to an identity, 256x256x256 RAS:
#
# 1. Resample to head or MRI surface RAS (the conditional), but also
# 2. Resample to what will work for the standard 1mm** RAS_AFFINE (resamp)
#
# We could do this with two resample_from_to calls, but it's cleaner,
# faster, and we get fewer boundary artifacts if we do it in one shot.
# So first olve usamp s.t. ``upsamp @ vox_ras_t == RAS_AFFINE`` (2):
upsamp = np.linalg.solve(vox_ras_t['trans'].T, RAS_AFFINE.T).T
# Now figure out how we would resample from RAS to head or MRI coords:
if coord_frame == 'head':
dest_ras_t = combine_transforms(
head_mri_t, mri_ras_t, 'head', 'ras')['trans']
else:
pos = apply_trans(head_mri_t, pos)
ori = apply_trans(head_mri_t, dipole.ori, move=False)
dest_ras_t = mri_ras_t['trans']
# The order here is wacky because we need `resample_from_to` to operate
# in a reverse order
affine = np.dot(np.dot(dest_ras_t, upsamp), vox_ras_t['trans'])
t1 = resample_from_to(t1, (RAS_SHAPE, affine), order=0)
# Now we could do:
#
# t1 = SpatialImage(t1.dataobj, RAS_AFFINE)
#
# And t1 would be in our destination (mri or head) space. But we don't
# need to construct the image -- let's just get our voxel coords and data:
vox = apply_trans(np.linalg.inv(RAS_AFFINE), pos)
t1_data = _get_img_fdata(t1)
return vox, ori, pos, t1_data
def _plot_dipole(ax, data, vox, idx, dipole, gridx, gridy, ori, coord_frame,
show_all, pos, color, highlight_color, title):
"""Plot dipoles."""
import matplotlib.pyplot as plt
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
xidx, yidx, zidx = np.round(vox[idx]).astype(int)
xslice = data[xidx]
yslice = data[:, yidx]
zslice = data[:, :, zidx]
ori = ori[idx]
if color is None:
color = 'y' if show_all else 'r'
color = np.array(color_converter.to_rgba(color))
highlight_color = np.array(color_converter.to_rgba(highlight_color))
if show_all:
colors = np.repeat(color[np.newaxis], len(vox), axis=0)
colors[idx] = highlight_color
size = np.repeat(5, len(vox))
size[idx] = 20
visible = np.arange(len(vox))
else:
colors = color
size = 20
visible = idx
offset = np.min(gridx)
xyz = pos
ax.scatter(xs=xyz[visible, 0], ys=xyz[visible, 1],
zs=xyz[visible, 2], zorder=2, s=size, facecolor=colors)
xx = np.linspace(offset, xyz[idx, 0], xidx)
yy = np.linspace(offset, xyz[idx, 1], yidx)
zz = np.linspace(offset, xyz[idx, 2], zidx)
ax.plot(xx, np.repeat(xyz[idx, 1], len(xx)), zs=xyz[idx, 2], zorder=1,
linestyle='-', color=highlight_color)
ax.plot(np.repeat(xyz[idx, 0], len(yy)), yy, zs=xyz[idx, 2], zorder=1,
linestyle='-', color=highlight_color)
ax.plot(np.repeat(xyz[idx, 0], len(zz)),
np.repeat(xyz[idx, 1], len(zz)), zs=zz, zorder=1,
linestyle='-', color=highlight_color)
q_kwargs = dict(length=50, color=highlight_color, pivot='tail')
ax.quiver(xyz[idx, 0], xyz[idx, 1], xyz[idx, 2], ori[0], ori[1], ori[2],
**q_kwargs)
dims = np.array([(len(data) / -2.), (len(data) / 2.)])
ax.set(xlim=-dims, ylim=-dims, zlim=dims)
# Plot slices
ax.contourf(xslice, gridx, gridy, offset=offset, zdir='x',
cmap='gray', zorder=0, alpha=.5)
ax.contourf(gridx, yslice, gridy, offset=offset, zdir='y',
cmap='gray', zorder=0, alpha=.5)
ax.contourf(gridx, gridy, zslice, offset=offset, zdir='z',
cmap='gray', zorder=0, alpha=.5)
# Plot orientations
args = np.array([list(xyz[idx]) + list(ori)] * 3)
for ii in range(3):
args[ii, [ii, ii + 3]] = [offset + 0.5, 0] # half a mm inward (z ord)
ax.quiver(*args.T, alpha=.75, **q_kwargs)
# These are the only two options
coord_frame_name = 'Head' if coord_frame == 'head' else 'MRI'
if title is None:
title = ('Dipole #%s / %s @ %.3fs, GOF: %.1f%%, %.1fnAm\n%s: ' % (
idx + 1, len(dipole.times), dipole.times[idx], dipole.gof[idx],
dipole.amplitude[idx] * 1e9, coord_frame_name) +
'(%0.1f, %0.1f, %0.1f) mm' % tuple(xyz[idx]))
ax.get_figure().suptitle(title)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.draw()
def _dipole_changed(event, params):
"""Handle dipole plotter scroll/key event."""
if event.key is not None:
if event.key == 'up':
params['idx'] += 1
elif event.key == 'down':
params['idx'] -= 1
else: # some other key
return
elif event.step > 0: # scroll event
params['idx'] += 1
else:
params['idx'] -= 1
params['idx'] = min(max(0, params['idx']), len(params['dipole'].pos) - 1)
params['ax'].clear()
_plot_dipole(**params)
def _update_coord_frame(obj, rr, nn, mri_trans, head_trans):
if obj['coord_frame'] == FIFF.FIFFV_COORD_MRI:
rr = apply_trans(mri_trans, rr)
nn = apply_trans(mri_trans, nn, move=False)
elif obj['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
rr = apply_trans(head_trans, rr)
nn = apply_trans(head_trans, nn, move=False)
return rr, nn
@fill_doc
def plot_brain_colorbar(ax, clim, colormap='auto', transparent=True,
orientation='vertical', label='Activation',
bgcolor='0.5'):
"""Plot a colorbar that corresponds to a brain activation map.
Parameters
----------
ax : instance of Axes
The Axes to plot into.
%(clim)s
%(colormap)s
%(transparent)s
orientation : str
Orientation of the colorbar, can be "vertical" or "horizontal".
label : str
The colorbar label.
bgcolor : color
The color behind the colorbar (for alpha blending).
Returns
-------
cbar : instance of ColorbarBase
The colorbar.
Notes
-----
.. versionadded:: 0.19
"""
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import Normalize
mapdata = _process_clim(clim, colormap, transparent)
ticks = _get_map_ticks(mapdata)
colormap, lims = _linearize_map(mapdata)
del mapdata
norm = Normalize(vmin=lims[0], vmax=lims[2])
cbar = ColorbarBase(ax, cmap=colormap, norm=norm, ticks=ticks,
label=label, orientation=orientation)
# make the colorbar background match the brain color
cbar.patch.set(facecolor=bgcolor)
# remove the colorbar frame except for the line containing the ticks
cbar.outline.set_visible(False)
cbar.ax.set_frame_on(True)
for key in ('left', 'top',
'bottom' if orientation == 'vertical' else 'right'):
ax.spines[key].set_visible(False)
return cbar
_3d_options = dict()
_3d_default = dict(antialias='true')
def set_3d_options(antialias=None):
"""Set 3D rendering options.
Parameters
----------
antialias : bool | None
If not None, set the default full-screen anti-aliasing setting.
False is useful when renderers have problems (such as software
MESA renderers). This option can also be controlled using an
environment variable, e.g., ``MNE_3D_OPTION_ANTIALIAS=false``.
Notes
-----
.. versionadded:: 0.21.0
"""
if antialias is not None:
_3d_options['antialias'] = str(bool(antialias)).lower()
def _get_3d_option(key):
try:
opt = _3d_options[key]
except KeyError:
opt = get_config(f'MNE_3D_OPTION_{key.upper()}', _3d_default[key])
opt = opt.lower()
_check_option(f'3D option {key}', opt, ('true', 'false'))
return opt == 'true'
|
the-stack_0_27450
|
import logging
import logging.config
import os
from logging.config import fileConfig
import threading
import copy
from dagon.TPS_api import API as TPSapi
from requests.exceptions import ConnectionError
from dagon.config import read_config
from time import time, sleep
class DAG_TPS(object):
def __init__(self, name,config=None, config_file='dagon.ini', max_threads=10):
"""
Create a meta-workflow
:param name: Workflow name
:type name: str
"""
if config is not None:
self.cfg = config
else:
self.cfg = read_config(config_file)
fileConfig(config_file)
self.workflows = []
self.name = name
self.logger = logging.getLogger()
self.workflow_id = 0
self.tasks = []
self.T_tasks_needed = []
self.T_tasks_needy = []
self.is_api_available = False
self.running = False
self.TPP=dict() #list of transversal processing points
self.tpp_counter = 0
self.is_tpsapi_available = False
try:
self.TPSapi = TPSapi(self.cfg['transversal_service']['route'],self.name)
self.is_tpsapi_available = True
self.logger.debug("TPS API is alive")
except KeyError:
self.logger.error("No transversal service URL in config file")
except ConnectionError as e:
self.logger.error(e)
def set_dry(self, dry):
for wf in self.workflows:
wf.dry = dry
def add_workflow(self, workflow):
"""
Add a workflow to this meta-workflow
:param workflow: :class:`dagon.workflow` instance
:type workflow: :class:`dagon.workflow`
"""
self.workflows.append(workflow)
workflow.set_dag_tps(self)
#if self.is_api_available:
# self.api.add_task(self.workflow_id, task)
def find_task_by_name(self, workflow_name, task_name):
"""
Search for a task of an specific workflow
:param workflow_name: Name of the workflow
:type workflow_name: str
:param task_name: Name of the task
:type task_name: str
:return: task instance
:rtype: :class:`dagon.task.Task` instance if it is found, None in other case
"""
# Check if the workflow is the current one
for wf in self.workflows:
if workflow_name == wf.name:
wf.find_task_by_name(wf.name,task_name)
return None
def find_workflow_task(self,task_name):
"""
Search for workflow name of an specific task
:param task_name: Name of the task
:type task_name: str
:return: workflow name
:rtype: string
"""
# Check if the workflow is the current one
for wf in self.workflows:
if wf.find_task_by_name(wf.name,task_name) is not None:
return wf.name
return None
def make_dependencies(self):
"""
Looks for all the dependencies between workflows
"""
for wf in self.workflows:
#check the dependencies for each workflow
wf.make_dependencies()
#add the workflow's tasks to the DAGtps
#self.Validate_WF()
# Return a json representation of the workflow
def as_json(self,json_format="mw" ):
"""
Return a json representation of the meta-workflow
:param json_format: format of the json returned (mw o wf)
:type json_format: str
:return: JSON representation
:rtype: dict(str, object) with data class
"""
if json_format == "mw":
jsonWorkflow = {"tasks": {}, "name": self.name, "id": self.workflow_id}
for wf in self.workflows:
for task in wf.tasks:
jsonWorkflow['tasks'][task.name] = task.as_json()
return jsonWorkflow
if json_format == "wf":
jsonWorkflow = {"tasks": {}, "name": self.name, "id": self.workflow_id}
for wf in self.workflows:
for task in wf.tasks:
temp = task.remove_from_workflow()
jsonWorkflow['tasks'][task.name] = task.as_json()
jsonWorkflow['tasks'][task.name]['command'] = temp
return jsonWorkflow
def run(self):
self.logger.debug("Running meta-workflow: %s", self.name)
self.running=True
start_time = time()
#print self.tasks
workflows= []
for wf in self.workflows:
workflows.append(threading.Thread(target=wf.run))
for wf in workflows: wf.start()
for wf in workflows: wf.join()
completed_in = (time() - start_time)
self.logger.info("Meta-Workflow '"+self.name+ "' completed in %s seconds ---" % completed_in)
def Validate_WF(self):
"""
Validate the transversality points to avoid any kind of cycle on the grap
:param workflow: list of declared workflows
:type workflow: list(class: dagon.workflow,...n)
Raise an Exception when a cylce is founded
self.tasks is fill with all the task of each workflow
"""
for workflow in self.workflows:
for task in workflow.tasks:
for prev in task.prevs:
if workflow.find_task_by_name(workflow.name,prev) is None: #if is a task from another workflow - 'do you need someone?'
needed = False; needy = False
self.T_tasks_needed.append(prev) #dependency task is added to the transversal ones
#if the actual task is founded in the transversal, there exist a cycle.
if task in self.T_tasks_needed or task.nexts in self.T_tasks_needed: needed=True #are you or your decendents needed?
if prev in self.T_tasks_needy: needy=True #who you need is also needed?
if needy and needed:
logging.warning('A cycle have been found')
raise Exception("A cycle have been found from %s to %s" % (prev.name, task.name))
else:
self.T_tasks_needy.append(task) #add the task and decendets to the needys array
for t in task.nexts:
self.T_tasks_needy.append(t)
temp = task.remove_from_workflow() #the command is changed, deleating the workflow reference
self.tasks.append(temp)
def Create_TPP_Double(self, DSA, DSB, keygroups, Apath= None, Bpath= None , name=None ):
"""
Create 2 csv in base of 2 task working dir and a keygroup list
:param DSA,DSB: name of a declared task
:type DSA: string}
:param keygroups: list of the keys (name of columns) of linking each task (datasources) in the form ('DSA_att1-DSB_att1,DSA_att2-DSB_att2' )
:type keygroups: string
:param DSApath,DSBpath: adittional path to add to heach task if is necesary. e.g. /output/
:type DSA: string}
Return the tpp_name
Raise an Exception when the task doen't exist or the keygroups are not correct
"""
if Apath is None: Apath = DSA
else: Apath = DSA + ":/" + Apath
if Bpath is None: Bpath = DSB
else: Bpath = DSB + ":/" + Bpath
if name is not None:
tpp_name = name
else:
tppid= str(self.tpp_counter)
tpp_name = "TP_"+tppid
self.tpp_counter+=1
tpp = { "NAME": tpp_name, "TYPE": "Double", "TPP_DS": {
DSA: {
"TYPE": "ID",
"ID": Apath,
"WF_name": self.find_workflow_task(DSA)
},
DSB: {
"TYPE": "ID",
"ID": Bpath,
"WF_name": self.find_workflow_task(DSB)
}
},"KEYGROUP": keygroups }
self.TPP[tpp_name] = tpp
return tpp_name
def Create_TPP_Single(self, DS, path= None, name=None ):
"""
Create a csv in base of a task working dir
:param DS: name of a declared task
:type DS: string
:param path: adittional path to add to each task if is necesary. e.g. /output/
:type DS: string
Return the tpp_name
"""
if path is None: path = DS
else: path = DS + ":/" + path
if name is not None:
tpp_name = name
else:
tppid= str(self.tpp_counter)
tpp_name = "TP_"+tppid
self.tpp_counter+=1
tpp = { "NAME": tpp_name, "TYPE": "Single", "TPP_DS": {
DS: {
"TYPE": "ID",
"ID": path,
"WF_name": self.find_workflow_task(DS)
}
},"KEYGROUP": "None" }
self.TPP[tpp_name] = tpp
return tpp_name
def prepare_tps(self):
"""
Initialize the TPS manager with the TPS created
:raises Exception: when there is an error with the call
"""
tppset_name= "DagonTPP_%s" % self.name
TPPset = {tppset_name: self.TPP }
if self.is_tpsapi_available == True:
dag = self.as_json()
if self.running ==False:
self.TPSapi.LoadDAGtps("dagon",TPPset,tppset_name, mode="online") #load DAGtps and tpp points to service
else:
self.TPSapi.LoadDAGtps(dag,TPPset,tppset_name) #load DAGtps and tpp points to service
|
the-stack_0_27454
|
"""
"""
import tkinter as tk
import tkinter.filedialog
import os
import re
import shutil
def undo(location,msg_text):
"""
This function move all files from the folders to root directory.
:param location(str): Folder location where tv series episode folders are located.
:param msg_text(StringVar) : notification frame message variable
:return: None
"""
msg_text.set("")
try:
os.chdir(location)
except FileNotFoundError as e:
msg_text.set(e)
return
except Exception as e:
msg_text.set("Location error")
return
cwd = os.getcwd()
for path,dir,fi in os.walk(cwd):
for f in fi:
target = os.path.join(path, f)
shutil.move(target,os.path.join(cwd,f))
for dir in os.scandir(cwd):
if(dir.is_dir()):
shutil.rmtree(os.path.join(cwd,dir))
msg_text.set("Successful...")
def placer(location,msg_text,season_flag, sub_flag,sub_folder):
"""
This function create folders for each episode and move relavant video file to relavent folder.Also this can create and place subtitles in correct folder.
:param location(str): Folder location where tv series episodes are located.
:param msg_text(str): notification frame message variable
:param season_flag(bool): folder contains multiple seasons indicator
:param sub_flag(bool): create a sub folder for each episode indicatior
:param sub_folder(string): sub folder name
:return: None;
"""
current_dir = os.getcwd()
msg_text.set("")
try:
os.chdir(location)
except FileNotFoundError as e:
msg_text.set(e)
return
except Exception as e:
msg_text.set("Location error")
return
ch_dir = os.getcwd()
d = os.listdir()
dirs = list()
for x in d:
if not os.path.isdir(x):
dirs.append(x)
len_dirs = len(dirs)
if len_dirs == 0:
msg_text.set(f"Nothing to do with \n {current_dir} directory.\n(Current working directory.)")
return
try:
x=0
pattern1 = re.compile(r"[sS](\d\d|\d)(\.|x|-|_|\s)?[eE](\d\d|\d)")
pattern2 = re.compile(r"[sS]?(\d\d|\d)(x|-|_|\s)[eE]?(\d\d|\d)")
while(x < len(dirs)):
x+=1
result = pattern1.findall(dirs[x-1])
if len(result) == 0:
result = pattern2.findall(dirs[x-1])
if len(result) > 0:
season = f"Season {str(result[0][0]).zfill(2)}"
new_dir = f"S{str(result[0][0]).zfill(2)}E{str(int(result[0][len(result[0])-1])).zfill(2)}"
if(sub_flag):
sub_dir = os.path.join(new_dir, sub_folder)
else:
sub_dir = new_dir
if(season_flag):
new_dir = os.path.join(season,new_dir)
sub_dir = os.path.join(season,sub_dir)
try:
if(sub_flag):
os.makedirs(sub_dir)
else:
os.makedirs(new_dir)
except Exception:
pass
finally:
if(os.path.splitext(dirs[x-1])[1] != ".srt"):
os.rename(os.path.join(ch_dir,dirs[x-1]),os.path.join(os.path.join(ch_dir,new_dir),dirs[x-1]))
else:
os.rename(os.path.join(ch_dir,dirs[x-1]),os.path.join(os.path.join(ch_dir,sub_dir),dirs[x-1]))
dirs.remove(dirs[x-1])
x-=1
if(len(dirs) > 0):
msg_text.set(f"{len(dirs)} file(s) not like tv series file(s).")
msg_text.set(f"{len_dirs-len(dirs)} file(s) successfully placed...\n{msg_text.get()}")
except Exception as e:
msg_text.set(f"{e} in \n {current_dir} directory.\n(Current working directory.)")
def main():
root = tk.Tk(className=" Tv Series Manupulator")
canvas = tk.Canvas(root,width=800,height=600,bg="#99ccff")
canvas.pack()
#frame for user inputs
l_frame = tk.Frame(canvas,bg="#999",bd=20)
l_frame.place(relx=0.05,rely=0.05,relwidth=0.9,relheight=0.45)
# for file location
l_label = tk.Label(l_frame,bg="#eee",text="File location",fg="black")
l_label.place(relwidth=0.2,relheight=0.15)
# for show selected location
l_label_show_text = tk.StringVar()
l_label_show_text.set("");
l_label_show = tk.Label(l_frame, textvariable=l_label_show_text, bg="#eee",text="",fg="black")
l_label_show.place(relx =0.215,relwidth=0.4,relheight=0.15)
# get tv searies file location
def open_file():
filename = tk.filedialog.askdirectory()
l_label_show_text.set(filename)
l_button = tk.Button(l_frame, text="Select folder", bg="#426367", fg="white", command=open_file)
l_button.place(relx =0.630,relwidth=0.15,relheight=0.15)
# for season checkbox
seasonChkVar = tk.BooleanVar()
seasonChkVar.set(False)
l_season_check = tk.Checkbutton(l_frame, text="This contain\n multiple seasons.",var=seasonChkVar)
l_season_check.place( relx= 0.8,relwidth=0.2)
#for subtitle
l_sub_label = tk.Label(l_frame, bg="#eee", text="Subtitle folder", fg="black")
l_sub_label.place( rely=0.415, relwidth=0.2, relheight=0.15)
chkVar = tk.BooleanVar()
chkVar.set(False)
l_sub_check = tk.Checkbutton(l_frame, text="want sub folder.", var=chkVar)
l_sub_check.place( rely=0.44, relx= 0.215, relwidth=0.2)
l_sub_box = tk.Entry(l_frame, bg="#D1EEEE", fg="black", bd=5,justify="center")
l_sub_box.insert(0,"example:- sinhala_sub or eng_sub")
l_sub_box.place( relx=0.45, rely=0.415, relwidth=0.45, relheight=0.15)
# buttons for operations
l_button_undo = tk.Button(l_frame,text="Undo", command=lambda: undo(l_label_show_text.get(),msg_text))
l_button_undo.place(rely= 0.75 ,relx=0.25,relwidth=0.2,relheight=0.15)
l_button_place = tk.Button(l_frame,text="Place", command=lambda: placer(l_label_show_text.get(),msg_text, seasonChkVar.get(), chkVar.get(), l_sub_box.get()))
l_button_place.place(rely= 0.75 ,relx=0.5,relwidth=0.2,relheight=0.15)
#end input frame
#message frame begin
msg_text = tk.StringVar()
# text variable for notifications
msg_text.set("""
This application can create folders for each episodes and put videos and subtitiles in to compatible folder.
OR
Undo this proccess.
File location : Select where tv series is located.
This contain multiple season : check this, If file loaction contain more than one season.
Want sub folder : Check this if you want subtitle folder for each episode.
If this checked, then you must specify sub folder name.
UNDO : move out all files to root(given file location) folder.
Place : Create and place tv series episodes in correct folder.
""")
msg_frame = tk.Frame(canvas,bg="red",bd=10)
msg_frame.place(relx=0.05,rely=0.5,relwidth=0.9,relheight=0.45)
msg_label = tk.Label(msg_frame,textvariable=msg_text)
msg_label.place(relwidth=1,relheight=1)
#message frame begin
root.mainloop()
if __name__ == '__main__':
main()
|
the-stack_0_27457
|
def agent(obs, config):
# Your code here: Amend the agent!
import numpy as np
import random
# Helper function for score_move: gets board at next step if agent drops piece in selected column
def drop_piece(grid, col, mark, config):
next_grid = grid.copy()
for row in range(config.rows-1, -1, -1):
if next_grid[row][col] == 0:
break
next_grid[row][col] = mark
return next_grid
# Helper function for minimax: calculates value of heuristic for grid
def get_heuristic(grid, mark, config):
num_threes = count_windows(grid, 3, mark, config)
num_fours = count_windows(grid, 4, mark, config)
num_threes_opp = count_windows(grid, 3, mark%2+1, config)
num_fours_opp = count_windows(grid, 4, mark%2+1, config)
score = num_threes - 1e2*num_threes_opp - 1e4*num_fours_opp + 1e6*num_fours
return score
# Helper function for get_heuristic: checks if window satisfies heuristic conditions
def check_window(window, num_discs, piece, config):
return (window.count(piece) == num_discs and window.count(0) == config.inarow-num_discs)
# Helper function for get_heuristic: counts number of windows satisfying specified heuristic conditions
def count_windows(grid, num_discs, piece, config):
num_windows = 0
# horizontal
for row in range(config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[row, col:col+config.inarow])
if check_window(window, num_discs, piece, config):
num_windows += 1
# vertical
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns):
window = list(grid[row:row+config.inarow, col])
if check_window(window, num_discs, piece, config):
num_windows += 1
# positive diagonal
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row+config.inarow), range(col, col+config.inarow)])
if check_window(window, num_discs, piece, config):
num_windows += 1
# negative diagonal
for row in range(config.inarow-1, config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row-config.inarow, -1), range(col, col+config.inarow)])
if check_window(window, num_discs, piece, config):
num_windows += 1
return num_windows
# Uses minimax to calculate value of dropping piece in selected column
def score_move(grid, col, mark, config, nsteps):
next_grid = drop_piece(grid, col, mark, config)
# score = negamax(next_grid, nsteps-1, False, mark, config)
score = negamax_ab(next_grid, nsteps-1, -np.Inf, np.Inf, False, mark, config)
# print(score, score2)
return score
# Helper function for minimax: checks if agent or opponent has four in a row in the window
def is_terminal_window(window, config):
return window.count(1) == config.inarow or window.count(2) == config.inarow
# Helper function for minimax: checks if game has ended
def is_terminal_node(grid, config):
# Check for draw
if list(grid[0, :]).count(0) == 0:
return True
# Check for win: horizontal, vertical, or diagonal
# horizontal
for row in range(config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[row, col:col+config.inarow])
if is_terminal_window(window, config):
return True
# vertical
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns):
window = list(grid[row:row+config.inarow, col])
if is_terminal_window(window, config):
return True
# positive diagonal
for row in range(config.rows-(config.inarow-1)):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row+config.inarow), range(col, col+config.inarow)])
if is_terminal_window(window, config):
return True
# negative diagonal
for row in range(config.inarow-1, config.rows):
for col in range(config.columns-(config.inarow-1)):
window = list(grid[range(row, row-config.inarow, -1), range(col, col+config.inarow)])
if is_terminal_window(window, config):
return True
return False
# Negamax implementation
def negamax(node, depth, maximizingPlayer, mark, config):
is_terminal = is_terminal_node(node, config)
valid_moves = [c for c in range(config.columns) if node[0][c] == 0]
if depth == 0 or is_terminal:
return get_heuristic(node, mark, config)
value = -np.Inf
for col in valid_moves:
child = drop_piece(node, col, mark if maximizingPlayer else mark%2+1, config)
value = max(value, -negamax(child, depth-1, not maximizingPlayer, mark, config))
return -value
# Negamax implementation with alpha/beta pruning
def negamax_ab(node, depth, a, b, maximizingPlayer, mark, config):
is_terminal = is_terminal_node(node, config)
valid_moves = [c for c in range(config.columns) if node[0][c] == 0]
if depth == 0 or is_terminal:
return get_heuristic(node, mark, config)
value = -np.Inf
for col in valid_moves:
child = drop_piece(node, col, mark if maximizingPlayer else mark%2+1, config)
value = max(value, -negamax_ab(child, depth-1, -b, -a, not maximizingPlayer, mark, config))
a = max(a, value)
if a >= b:
break
return -value
#########################
# Agent makes selection #
#########################
# import time
# start = time.clock()
N_STEPS = 3
# Get list of valid moves
valid_moves = [c for c in range(config.columns) if obs.board[c] == 0]
# Convert the board to a 2D grid
grid = np.asarray(obs.board).reshape(config.rows, config.columns)
# Use the heuristic to assign a score to each possible board in the next step
scores = dict(zip(valid_moves, [score_move(grid, col, obs.mark, config, N_STEPS) for col in valid_moves]))
# Get a list of columns (moves) that maximize the heuristic
max_cols = [key for key in scores.keys() if scores[key] == max(scores.values())]
# Select at random from the maximizing columns
# print(f"Time: {time.clock() - start}")
return random.choice(max_cols)
|
the-stack_0_27458
|
"""During the development of the package I realised that there is a typing
inconsistency.
The input components of a Wide and Deep model are of type nn.Module. These change type internally to nn.Sequential. While nn.Sequential is an instance of
nn.Module the oppossite is, of course, not true. This does not affect any funcionality of the package, but it is something that needs fixing. However, while
fixing is simple (simply define new attributes that are the nn.Sequential objects), its implications are quite wide within the package (involves changing a
number of tests and tutorials). Therefore, I will introduce that fix when I do a major release. For now, we live with it.
"""
import sys
import warnings
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from transformers import BertModel, BertPreTrainedModel
from .tabular.tab_mlp import MLP, TabMlp
from .vision import ImageEncoder
sys.path.append('../../')
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
class MultiModalBert(BertPreTrainedModel):
"""Bert Model transformer with a sequence classification/regression head as
well as a TabularFeatCombiner module to combine categorical and numerical
features with the Bert pooled output.
Parameters:
hf_model_config (:class:`~transformers.BertConfig`):
Model configuration class with all the parameters of the model.
This object must also have a tabular_config member variable that is a
:obj:`TabularConfig` instance specifying the configs for :obj:`TabularFeatCombiner`
"""
def __init__(self, hf_model_config):
super().__init__(hf_model_config)
text_config = hf_model_config.text_config
tabular_config = hf_model_config.tabular_config
deephead_config = hf_model_config.head_config
if type(tabular_config) is dict: # when loading from saved model
tabular_config = TabularConfig(**tabular_config)
else:
self.config.tabular_config = tabular_config.__dict__
self.text_encoder = BertModel(text_config)
self.image_encoder = ImageEncoder(is_require_grad=True)
self.tabular_encoder = TabMlp(tabular_config)
self.head_encoder = self._build_deephead(deephead_config)
if self.tabular_encoder is not None:
self.is_tabnet = self.tabular_encoder.__class__.__name__ == 'TabNet'
else:
self.is_tabnet = False
self._check_model_components(
wide=None,
deeptabular=self.tabular_encoder,
deeptext=self.text_encoder,
deepimage=self.image_encoder,
deephead=self.head_encoder,
head_hidden_dims=None,
pred_dim=None,
)
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
class_weights=None,
output_attentions=None,
output_hidden_states=None,
cat_feats=None,
numerical_feats=None,
tabular_feature=None,
image_feature=None):
text_output = self._forward_text(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
class_weights=class_weights,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states)
image_output = self._forward_deepimage(image_feature)
tabular_output = self._forward_deeptabular(tabular_feature)
outputs = torch.cat([text_output, image_output, tabular_output],
axis=1)
deephead_out = self._forward_deephead(deep_side=outputs)
return deephead_out
def _forward_deeptabular(self,
cat_feats=None,
numerical_feats=None,
tabular_feature=None):
if self.tabular_encoder is not None:
tabular_output = self.tabular_encoder()
else:
tabular_output = torch.FloatTensor()
return tabular_output
def _forward_deepimage(self, image_feature=None):
if self.image_encoder is not None:
image_output = self.image_encoder(image_feature)
else:
image_output = torch.FloatTensor()
return image_output
def _forward_deephead(self, deep_side):
deephead_out = self.head_encoder(deep_side)
fc_layer = nn.Linear(deephead_out.size(1), self.pred_dim)
output = fc_layer(deephead_out)
return output
def _forward_deeptext(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
class_weights=None,
output_attentions=None,
output_hidden_states=None):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
return pooled_output
def _build_deephead(
self,
head_hidden_dims,
head_activation,
head_dropout,
head_batchnorm,
head_batchnorm_last,
head_linear_first,
):
deep_dim = 0
if self.tabular_encoder is not None:
deep_dim += self.tabular_encoder.output_dim
if self.text_encoder is not None:
deep_dim += self.text_encoder.output_dim
if self.image_encoder is not None:
deep_dim += self.image_encoder.output_dim
head_hidden_dims = [deep_dim] + head_hidden_dims
deephead = MLP(
head_hidden_dims,
head_activation,
head_dropout,
head_batchnorm,
head_batchnorm_last,
head_linear_first,
)
deephead.add_module('head_out',
nn.Linear(head_hidden_dims[-1], self.pred_dim))
return deephead
@staticmethod # noqa: C901
def _check_model_components(
wide,
deeptabular,
deeptext,
deepimage,
deephead,
head_hidden_dims,
pred_dim,
):
if wide is not None:
assert wide.wide_linear.weight.size(1) == pred_dim, (
"the 'pred_dim' of the wide component ({}) must be equal to the 'pred_dim' "
'of the deep component and the overall model itself ({})'.
format(wide.wide_linear.weight.size(1), pred_dim))
if deeptabular is not None and not hasattr(deeptabular, 'output_dim'):
raise AttributeError(
"deeptabular model must have an 'output_dim' attribute. "
'See pytorch-widedeep.models.deep_text.DeepText')
if deeptabular is not None:
is_tabnet = deeptabular.__class__.__name__ == 'TabNet'
has_wide_text_or_image = (
wide is not None or deeptext is not None
or deepimage is not None)
if is_tabnet and has_wide_text_or_image:
warnings.warn(
"'WideDeep' is a model comprised by multiple components and the 'deeptabular'"
" component is 'TabNet'. We recommend using 'TabNet' in isolation."
" The reasons are: i)'TabNet' uses sparse regularization which partially losses"
' its purpose when used in combination with other components.'
" If you still want to use a multiple component model with 'TabNet',"
" consider setting 'lambda_sparse' to 0 during training. ii) The feature"
' importances will be computed only for TabNet but the model will comprise multiple'
" components. Therefore, such importances will partially lose their 'meaning'.",
UserWarning,
)
if deeptext is not None and not hasattr(deeptext, 'output_dim'):
raise AttributeError(
"deeptext model must have an 'output_dim' attribute. "
'See pytorch-widedeep.models.deep_text.DeepText')
if deepimage is not None and not hasattr(deepimage, 'output_dim'):
raise AttributeError(
"deepimage model must have an 'output_dim' attribute. "
'See pytorch-widedeep.models.deep_text.DeepText')
if deephead is not None and head_hidden_dims is not None:
raise ValueError(
"both 'deephead' and 'head_hidden_dims' are not None. Use one of the other, but not both"
)
if (head_hidden_dims is not None and not deeptabular and not deeptext
and not deepimage):
raise ValueError(
"if 'head_hidden_dims' is not None, at least one deep component must be used"
)
if deephead is not None:
deephead_inp_feat = next(deephead.parameters()).size(1)
output_dim = 0
if deeptabular is not None:
output_dim += deeptabular.output_dim
if deeptext is not None:
output_dim += deeptext.output_dim
if deepimage is not None:
output_dim += deepimage.output_dim
assert deephead_inp_feat == output_dim, (
"if a custom 'deephead' is used its input features ({}) must be equal to "
'the output features of the deep component ({})'.format(
deephead_inp_feat, output_dim))
class MultiModalModel(nn.Module):
r"""Main collector class that combines all ``wide``, ``deeptabular``
(which can be a number of architectures), ``deeptext`` and
``deepimage`` models.
There are two options to combine these models that correspond to the
two main architectures that ``pytorch-widedeep`` can build.
- Directly connecting the output of the model components to an ouput neuron(s).
- Adding a `Fully-Connected Head` (FC-Head) on top of the deep models.
This FC-Head will combine the output form the ``deeptabular``, ``deeptext`` and
``deepimage`` and will be then connected to the output neuron(s).
Parameters
----------
wide: ``nn.Module``, Optional, default = None
``Wide`` model. I recommend using the ``Wide`` class in this
package. However, it is possible to use a custom model as long as
is consistent with the required architecture, see
:class:`pytorch_widedeep.models.wide.Wide`
deeptabular: ``nn.Module``, Optional, default = None
currently ``pytorch-widedeep`` implements a number of possible
architectures for the ``deeptabular`` component. See the documenation
of the package. I recommend using the ``deeptabular`` components in
this package. However, it is possible to use a custom model as long
as is consistent with the required architecture.
deeptext: ``nn.Module``, Optional, default = None
Model for the text input. Must be an object of class ``DeepText``
or a custom model as long as is consistent with the required
architecture. See
:class:`pytorch_widedeep.models.deep_text.DeepText`
deepimage: ``nn.Module``, Optional, default = None
Model for the images input. Must be an object of class
``DeepImage`` or a custom model as long as is consistent with the
required architecture. See
:class:`pytorch_widedeep.models.deep_image.DeepImage`
deephead: ``nn.Module``, Optional, default = None
Custom model by the user that will receive the outtput of the deep
component. Typically a FC-Head (MLP)
head_hidden_dims: List, Optional, default = None
Alternatively, the ``head_hidden_dims`` param can be used to
specify the sizes of the stacked dense layers in the fc-head e.g:
``[128, 64]``. Use ``deephead`` or ``head_hidden_dims``, but not
both.
head_dropout: float, default = 0.1
If ``head_hidden_dims`` is not None, dropout between the layers in
``head_hidden_dims``
head_activation: str, default = "relu"
If ``head_hidden_dims`` is not None, activation function of the head
layers. One of ``tanh``, ``relu``, ``gelu`` or ``leaky_relu``
head_batchnorm: bool, default = False
If ``head_hidden_dims`` is not None, specifies if batch
normalizatin should be included in the head layers
head_batchnorm_last: bool, default = False
If ``head_hidden_dims`` is not None, boolean indicating whether or
not to apply batch normalization to the last of the dense layers
head_linear_first: bool, default = False
If ``head_hidden_dims`` is not None, boolean indicating whether
the order of the operations in the dense layer. If ``True``:
``[LIN -> ACT -> BN -> DP]``. If ``False``: ``[BN -> DP -> LIN ->
ACT]``
pred_dim: int, default = 1
Size of the final wide and deep output layer containing the
predictions. `1` for regression and binary classification or number
of classes for multiclass classification.
Examples
--------
>>> from pytorch_widedeep.models import TabResnet, DeepImage, DeepText, Wide, WideDeep
>>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]
>>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}
>>> wide = Wide(10, 1)
>>> deeptabular = TabResnet(blocks_dims=[8, 4], column_idx=column_idx, embed_input=embed_input)
>>> deeptext = DeepText(vocab_size=10, embed_dim=4, padding_idx=0)
>>> deepimage = DeepImage(pretrained=False)
>>> model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage)
.. note:: While I recommend using the ``wide`` and ``deeptabular`` components
within this package when building the corresponding model components,
it is very likely that the user will want to use custom text and image
models. That is perfectly possible. Simply, build them and pass them
as the corresponding parameters. Note that the custom models MUST
return a last layer of activations (i.e. not the final prediction) so
that these activations are collected by ``WideDeep`` and combined
accordingly. In addition, the models MUST also contain an attribute
``output_dim`` with the size of these last layers of activations. See
for example :class:`pytorch_widedeep.models.tab_mlp.TabMlp`
"""
def __init__(
self,
wide: Optional[nn.Module] = None,
deeptabular: Optional[nn.Module] = None,
deeptext: Optional[nn.Module] = None,
deepimage: Optional[nn.Module] = None,
deephead: Optional[nn.Module] = None,
head_hidden_dims: Optional[List[int]] = [256, 128],
head_activation: str = 'relu',
head_dropout: float = 0.1,
head_batchnorm: bool = False,
head_batchnorm_last: bool = False,
head_linear_first: bool = False,
pred_dim: int = 2,
):
super(MultiModalModel, self).__init__()
self._check_model_components(
wide,
deeptabular,
deeptext,
deepimage,
deephead,
head_hidden_dims,
pred_dim,
)
# required as attribute just in case we pass a deephead
self.pred_dim = pred_dim
# The main 5 components of the wide and deep assemble
self.wide = wide
self.deeptabular = deeptabular
self.deeptext = deeptext
self.deepimage = deepimage
self.deephead = deephead
if self.deeptabular is not None:
self.is_tabnet = deeptabular.__class__.__name__ == 'TabNet'
else:
self.is_tabnet = False
if self.deephead is None:
self.deephead = self._build_deephead(
head_hidden_dims,
head_activation,
head_dropout,
head_batchnorm,
head_batchnorm_last,
head_linear_first,
)
def forward(self, X: Dict[str, Tensor]):
wide_out = self._forward_wide(X)
out = self._forward_deephead(X, wide_out)
return out
def _build_deephead(
self,
head_hidden_dims,
head_activation,
head_dropout,
head_batchnorm,
head_batchnorm_last,
head_linear_first,
):
deep_dim = 0
if self.deeptabular is not None:
deep_dim += self.deeptabular.output_dim
if self.deeptext is not None:
deep_dim += self.deeptext.output_dim
if self.deepimage is not None:
deep_dim += self.deepimage.output_dim
head_hidden_dims = [deep_dim] + head_hidden_dims
deephead = MLP(
head_hidden_dims,
head_activation,
head_dropout,
head_batchnorm,
head_batchnorm_last,
head_linear_first,
)
deephead.add_module('head_out',
nn.Linear(head_hidden_dims[-1], self.pred_dim))
return deephead
def _forward_wide(self, X):
if self.wide is not None:
out = self.wide(X['wide'])
else:
batch_size = X[list(X.keys())[1]].size(0)
out = torch.zeros(batch_size, self.pred_dim).to(device)
return out
def _forward_deephead(self, X, wide_out):
if self.deeptabular is not None:
if self.is_tabnet:
tab_out = self.deeptabular(X['deeptabular'])
deepside, M_loss = tab_out[0], tab_out[1]
else:
deepside = self.deeptabular(X['deeptabular'])
else:
deepside = torch.FloatTensor()
if self.deeptext is not None:
deepside = torch.cat(
[deepside, self.deeptext(**X['deeptext'])], axis=1)
if self.deepimage is not None:
deepside = torch.cat(
[deepside, self.deepimage(X['deepimage'])], axis=1)
deephead_out = self.deephead(deepside)
deepside_out = nn.Linear(deephead_out.size(1),
self.pred_dim).to(device)
if self.is_tabnet:
res = (wide_out.add_(deepside_out(deephead_out)), M_loss)
else:
res = wide_out.add_(deepside_out(deephead_out))
return res
@staticmethod # noqa: C901
def _check_model_components(
wide,
deeptabular,
deeptext,
deepimage,
deephead,
head_hidden_dims,
pred_dim,
):
if wide is not None:
assert wide.wide_linear.weight.size(1) == pred_dim, (
"the 'pred_dim' of the wide component ({}) must be equal to the 'pred_dim' "
'of the deep component and the overall model itself ({})'.
format(wide.wide_linear.weight.size(1), pred_dim))
if deeptabular is not None and not hasattr(deeptabular, 'output_dim'):
raise AttributeError(
"deeptabular model must have an 'output_dim' attribute. "
'See pytorch-widedeep.models.deep_text.DeepText')
if deeptabular is not None:
is_tabnet = deeptabular.__class__.__name__ == 'TabNet'
has_wide_text_or_image = (
wide is not None or deeptext is not None
or deepimage is not None)
if is_tabnet and has_wide_text_or_image:
warnings.warn(
"'WideDeep' is a model comprised by multiple components and the 'deeptabular'"
" component is 'TabNet'. We recommend using 'TabNet' in isolation."
" The reasons are: i)'TabNet' uses sparse regularization which partially losses"
' its purpose when used in combination with other components.'
" If you still want to use a multiple component model with 'TabNet',"
" consider setting 'lambda_sparse' to 0 during training. ii) The feature"
' importances will be computed only for TabNet but the model will comprise multiple'
" components. Therefore, such importances will partially lose their 'meaning'.",
UserWarning,
)
if deeptext is not None and not hasattr(deeptext, 'output_dim'):
raise AttributeError(
"deeptext model must have an 'output_dim' attribute. "
'See pytorch-widedeep.models.deep_text.DeepText')
if deepimage is not None and not hasattr(deepimage, 'output_dim'):
raise AttributeError(
"deepimage model must have an 'output_dim' attribute. "
'See pytorch-widedeep.models.deep_text.DeepText')
if deephead is not None and head_hidden_dims is not None:
raise ValueError(
"both 'deephead' and 'head_hidden_dims' are not None. Use one of the other, but not both"
)
if (head_hidden_dims is not None and not deeptabular and not deeptext
and not deepimage):
raise ValueError(
"if 'head_hidden_dims' is not None, at least one deep component must be used"
)
if deephead is not None:
deephead_inp_feat = next(deephead.parameters()).size(1)
output_dim = 0
if deeptabular is not None:
output_dim += deeptabular.output_dim
if deeptext is not None:
output_dim += deeptext.output_dim
if deepimage is not None:
output_dim += deepimage.output_dim
assert deephead_inp_feat == output_dim, (
"if a custom 'deephead' is used its input features ({}) must be equal to "
'the output features of the deep component ({})'.format(
deephead_inp_feat, output_dim))
if __name__ == '__main__':
import pandas as pd
import sys
from text.deeptext import BertWithTabular
from transformers import AutoConfig
from config import TabularConfig
sys.path.append('../')
from data.preprocessor.tab_preprocessor import TabPreprocessor
df = pd.read_csv(
'/media/robin/DATA/datatsets/structure_data/titanic/Titanic.csv')
cat_cols = ['Sex', 'Embarked']
con_cols = ['Fare', 'Age']
print(df[cat_cols + con_cols])
tabpreprocessor = TabPreprocessor(
categroical_cols=cat_cols,
continuous_cols=con_cols,
continuous_transform_method='standard_scaler')
full_data_transformed = tabpreprocessor.fit_transform(df)
tabmlp = TabMlp(
mlp_hidden_dims=[8, 4],
column_idx=tabpreprocessor.column_idx,
embed_input=tabpreprocessor.embeddings_input)
tabular_config = TabularConfig(num_labels=1)
model_name = 'bert-base-uncased'
config = AutoConfig.from_pretrained(model_name)
config.tabular_config = tabular_config
deeptext = BertWithTabular(config=config)
print(deeptext)
model = MultiModalModel(deeptabular=tabmlp, deeptext=deeptext)
print(model)
print(model.deephead)
|
the-stack_0_27461
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
from datafaker.compat import safe_encode
from datafaker.constant import JSON_FORMAT, TEXT_FORMAT
from datafaker.dbs.basedb import BaseDB
from datafaker.utils import save2file, json_item
class FileDB(BaseDB):
def construct_self_rows(self):
return []
def save_data(self, lines):
filepath = os.path.join(self.args.connect, self.args.table)
items = []
for line in lines:
item = self.format_data(line)
items.append(item+os.linesep)
save2file(items, filepath)
def format_data(self, columns):
data = columns
if self.args.metaj:
data = self.metaj_content % tuple(columns)
elif self.args.format == JSON_FORMAT:
data = json_item(self.column_names, columns)
elif self.args.format == TEXT_FORMAT:
data = self.args.outspliter.join([str(safe_encode(word)) for word in columns])
return data
|
the-stack_0_27462
|
import hashlib
import PyInquirer as inquirer
import json
import os
from pyfiglet import Figlet
from aergo_wallet.wallet import (
AergoWallet,
)
from aergo_wallet.exceptions import (
InvalidArgumentsError,
TxError,
InsufficientBalanceError,
)
from aergo_cli.utils import (
confirm_transfer,
prompt_amount,
prompt_deposit_height,
prompt_new_bridge,
prompt_new_network,
prompt_aergo_keystore,
prompt_new_asset,
prompt_new_validators,
prompt_number,
aergo_style,
promptYN,
print_balance_table_header,
print_balance_table_lines,
)
class MerkleBridgeCli():
"""CLI tool for interacting with the AergoWallet.
First choose an existing config file or create one from scratch.
Once a config file is chosen, the CLI provides an interface to the
AergoWallet and has the following features:
- edit config file settings
- transfer assets between networks
- check status of transfers
- check balances for each asset on each network
"""
def __init__(self, root_path: str = './'):
"""Load the pending transfers."""
# root_path is the path from which files are tracked
with open(root_path
+ 'aergo_cli/pending_transfers.json', 'r') as file:
self.pending_transfers = json.load(file)
self.root_path = root_path
def start(self):
"""Entry point of cli : load a wallet configuration file of create a
new one
"""
f = Figlet(font='speed')
print(f.renderText('Merkel Bridge Cli'))
print("Welcome to the Merkle Bridge Interactive CLI.\n"
"This is a tool to transfer assets across the "
"Aergo Merke bridge and manage wallet "
"settings (config.json)\n")
while 1:
questions = [
{
'type': 'list',
'name': 'YesNo',
'message': "Do you have a config.json? ",
'choices': [
{
'name': 'Yes, find it with the path',
'value': 'Y'
},
{
'name': 'No, create one from scratch',
'value': 'N'
},
'Quit']
}
]
answers = inquirer.prompt(questions, style=aergo_style)
try:
if answers['YesNo'] == 'Y':
self.load_config()
self.menu()
elif answers['YesNo'] == 'N':
self.create_config()
else:
return
except KeyError:
return
def load_config(self):
"""Load the configuration file from path and create a wallet object."""
while 1:
questions = [
{
'type': 'input',
'name': 'config_file_path',
'message': 'Path to config.json (path/to/config.json)'
}
]
config_file_path = inquirer.prompt(
questions, style=aergo_style)['config_file_path']
try:
self.wallet = AergoWallet(config_file_path)
break
except (IsADirectoryError, FileNotFoundError):
print("Invalid path/to/config.json")
except KeyError:
return
def menu(self):
"""Menu for interacting with network.
Users can change settings, query balances, check pending transfers,
execute cross chain transactions
"""
while 1:
questions = [
{
'type': 'list',
'name': 'action',
'message': "What would you like to do ? ",
'choices': [
{
'name': 'Check pending transfer',
'value': 'P'
},
{
'name': 'Check balances',
'value': 'B'
},
{
'name': 'Initiate transfer (Lock/Burn)',
'value': 'I'
},
{
'name': 'Finalize transfer (Mint/Unlock)',
'value': 'F'
},
{
'name': 'Settings (Register Assets and Networks)',
'value': 'S'
},
'Back'
]
}
]
answers = inquirer.prompt(questions, style=aergo_style)
try:
if answers['action'] == 'Back':
return
elif answers['action'] == 'P':
self.check_withdrawable_balance()
elif answers['action'] == 'B':
self.check_balances()
elif answers['action'] == 'I':
self.initiate_transfer()
elif answers['action'] == 'F':
self.finalize_transfer()
elif answers['action'] == 'S':
self.edit_settings()
except (TypeError, KeyboardInterrupt, InvalidArgumentsError,
TxError, InsufficientBalanceError, KeyError) as e:
print('Someting went wrong, check the status of your pending '
'transfers\nError msg: {}'.format(e))
def check_balances(self):
"""Iterate every registered wallet, network and asset and query
balances.
"""
col_widths = [24, 55, 23]
for wallet, info in self.wallet.config_data('wallet').items():
print('\n' + wallet + ': ' + info['addr'])
print_balance_table_header()
for net_name, net in self.wallet.config_data('networks').items():
for token_name, token in net['tokens'].items():
lines = []
balance, addr = self.wallet.get_balance(
token_name, net_name, account_name=wallet
)
if balance != 0:
line = [net_name, addr,
str(balance / 10**18) + ' \U0001f4b0']
lines.append(line)
for peg in token['pegs']:
balance, addr = self.wallet.get_balance(
token_name, peg, net_name, wallet
)
if balance != 0:
line = [peg, addr,
str(balance / 10**18) + ' \U0001f4b0']
lines.append(line)
print_balance_table_lines(lines, token_name,
col_widths)
aer_balance, _ = self.wallet.get_balance('aergo', net_name,
account_name=wallet)
if aer_balance != 0:
line = [net_name, 'aergo',
str(aer_balance / 10**18) + ' \U0001f4b0']
print_balance_table_lines([line], 'aergo', col_widths)
print(' ' + '‾' * 120)
def edit_settings(self):
"""Menu for editing the config file of the currently loaded wallet"""
while 1:
questions = [
{
'type': 'list',
'name': 'action',
'message': 'What would you like to do ? ',
'choices': [
{
'name': 'Register new asset',
'value': 'A'
},
{
'name': 'Register new network',
'value': 'N'
},
{
'name': 'Register new bridge',
'value': 'B'
},
{
'name': 'Register new keystore account',
'value': 'K'
},
{
'name': 'Update validators set',
'value': 'V'
},
{
'name': 'Update anchoring periode',
'value': 'UA'
},
{
'name': 'Update finality',
'value': 'UF'
},
'Back',
]
}
]
answers = inquirer.prompt(questions, style=aergo_style)
try:
if answers['action'] == 'Back':
return
elif answers['action'] == 'A':
self.register_asset()
elif answers['action'] == 'N':
self.register_network()
elif answers['action'] == 'V':
self.register_new_validators()
elif answers['action'] == 'B':
self.register_bridge()
elif answers['action'] == 'K':
self.register_key()
elif answers['action'] == 'UA':
self.update_t_anchor()
elif answers['action'] == 'UF':
self.update_t_final()
except (TypeError, KeyboardInterrupt, InvalidArgumentsError) as e:
print('Someting went wrong, check the status of you pending '
'transfers\nError msg: {}'.format(e))
def create_config(self):
"""Create a new configuration file from scratch.
This tool registers 2 networks, bridge contracts,
a private key for each network and bridge validators
"""
new_config = {}
print("Let's register 2 networks, "
"validators(optional) and a private key for interacting with "
"each network.")
# Register 2 networks
answers = prompt_new_network()
net1 = answers['name']
new_config['networks'] = {net1: {
'ip': answers['ip'],
'tokens': {},
'bridges': {}
}}
answers = prompt_new_network()
net2 = answers['name']
new_config['networks'][net2] = {
'ip': answers['ip'],
'tokens': {},
'bridges': {}
}
# Register bridge contracts on each network
if promptYN('Would you like to register a bridge ?',
'Yes', 'No'):
answers = prompt_new_bridge(net1, net2)
new_config['networks'][net1]['bridges'] = {
net2: {'addr': answers['bridge1'],
't_anchor': int(answers['t_anchor1']),
't_final': int(answers['t_final1']),
'oracle': answers['oracle1']
}
}
new_config['networks'][net2]['bridges'] = {
net1: {'addr': answers['bridge2'],
't_anchor': int(answers['t_anchor2']),
't_final': int(answers['t_final2']),
'oracle': answers['oracle2']
}
}
# Register bridge validators
if promptYN('Would you like to register validators ? '
'(not needed for bridge users)', 'Yes', 'No'):
validators = prompt_new_validators()
new_config['validators'] = validators
else:
new_config['validators'] = {}
# Register a new private key
new_config['wallet'] = {}
print("Register a keystore for transacting on Aergo")
name, addr, keystore_path = prompt_aergo_keystore()
new_config['wallet'][name] = {"addr": addr,
"keystore": keystore_path}
questions = [
{
'type': 'input',
'name': 'path',
'message': 'Path to save new config file'
}
]
path = inquirer.prompt(questions, style=aergo_style)['path']
with open(path, "w") as f:
json.dump(new_config, f, indent=4, sort_keys=True)
print("Config file stored in: {}".format(os.path.abspath(path)))
def register_bridge(self):
"""Register bridge contracts between 2 already defined networks."""
net1, net2 = self.prompt_bridge_networks()
answers = prompt_new_bridge(net1, net2)
self.wallet.config_data(
'networks', net1, 'bridges', net2,
value={'addr': answers['bridge1'],
't_anchor': int(answers['t_anchor1']),
't_final': int(answers['t_final1'])
}
)
self.wallet.config_data(
'networks', net2, 'bridges', net1,
value={'addr': answers['bridge2'],
't_anchor': int(answers['t_anchor2']),
't_final': int(answers['t_final2'])
}
)
self.wallet.save_config()
def register_asset(self):
"""Register a new asset and it's pegs on other networks in the
wallet's config.
"""
networks = self.get_registered_networks()
name, origin, origin_addr, pegs, peg_addrs = prompt_new_asset(
networks.copy())
if name == 'aergo':
print('Not allowed : aergo is reserved for aer native asset')
return
for net in networks:
try:
self.wallet.config_data('networks', net, 'tokens', name)
print("Asset name already used")
return
except KeyError:
pass
self.wallet.config_data('networks', origin, 'tokens', name,
value={'addr': {}, 'pegs': {}})
self.wallet.config_data(
'networks', origin, 'tokens', name, 'addr', value=origin_addr)
for i, peg_net in enumerate(pegs):
self.wallet.config_data(
'networks', origin, 'tokens', name, 'pegs', peg_net,
value=peg_addrs[i])
self.wallet.save_config()
def register_network(self):
"""Register a new network in the wallet's config."""
answers = prompt_new_network()
net = answers['name']
ip = answers['ip']
self.wallet.config_data(
'networks', net, value={'ip': ip, 'tokens': {}, 'bridges': {}}
)
self.wallet.save_config()
def register_key(self):
"""Register new key in wallet's config."""
name, addr, keystore_path = prompt_aergo_keystore()
keystore_path = os.path.relpath(keystore_path, self.root_path)
try:
self.wallet.config_data('wallet', name)
print("Account name already used")
return
except KeyError:
pass
self.wallet.config_data(
'wallet', name, value={'addr': addr, 'keystore': keystore_path})
self.wallet.save_config()
def register_new_validators(self):
"""Register new validators in the wallet's config."""
print("WARNING: current validators will be overridden in the config "
"file")
validators = prompt_new_validators()
self.wallet.config_data('validators', value=validators)
self.wallet.save_config()
def update_t_anchor(self):
from_chain, to_chain = self.prompt_transfer_networks()
t_anchor = prompt_number("New anchoring periode (nb of blocks) of {} "
"onto {}".format(from_chain, to_chain))
self.wallet.config_data('networks', to_chain, 'bridges', from_chain,
't_anchor', value=t_anchor)
self.wallet.save_config()
def update_t_final(self):
from_chain, to_chain = self.prompt_transfer_networks()
t_final = prompt_number("New finality (nb of blocks) of {}"
.format(from_chain))
self.wallet.config_data('networks', to_chain, 'bridges', from_chain,
't_final', value=t_final)
self.wallet.save_config()
def get_asset_address(self, asset_name, from_chain, to_chain):
try:
addr = self.wallet.config_data(
'networks', from_chain, 'tokens', asset_name, 'addr')
return addr
except KeyError:
pass
try:
addr = self.wallet.config_data(
'networks', to_chain, 'tokens', asset_name, 'pegs', from_chain)
return addr
except KeyError:
pass
raise InvalidArgumentsError(
'asset not properly registered in config.json')
def initiate_transfer(self):
"""Initiate a new transfer of tokens between 2 networks."""
from_chain, to_chain, from_assets, to_assets, asset_name, \
receiver = self.prompt_commun_transfer_params()
amount = prompt_amount()
bridge_from = self.wallet.config_data(
'networks', from_chain, 'bridges', to_chain, 'addr')
bridge_to = self.wallet.config_data(
'networks', to_chain, 'bridges', from_chain, 'addr')
asset_addr = self.get_asset_address(asset_name, from_chain, to_chain)
summary = "Departure chain: {} ({})\n" \
"Destination chain: {} ({})\n" \
"Asset name: {} ({})\n" \
"Receiver at destination: {}\n" \
"Amount: {}\n".format(from_chain, bridge_from, to_chain,
bridge_to, asset_name, asset_addr,
receiver, amount)
deposit_height, tx_hash = 0, ""
privkey_name = self.prompt_signing_key('wallet')
if asset_name in from_assets:
# if transfering a native asset Lock
print("Lock transfer summary:\n{}".format(summary))
if not confirm_transfer():
print('Initialize transfer canceled')
return
deposit_height, tx_hash = self.wallet.initiate_transfer_lock(
from_chain, to_chain, asset_name, amount, receiver,
privkey_name
)
elif (asset_name in to_assets
and from_chain in self.wallet.config_data(
'networks', to_chain, 'tokens', asset_name, 'pegs')):
# if transfering a pegged asset Burn
print("Burn transfer summary:\n{}".format(summary))
if not confirm_transfer():
print('Initialize transfer canceled')
return
deposit_height, tx_hash = self.wallet.initiate_transfer_burn(
from_chain, to_chain, asset_name, amount, receiver,
privkey_name
)
else:
print('asset not properly registered in config.json')
return
print("Transaction Hash : {}\nBlock Height : {}\n"
.format(tx_hash, deposit_height))
pending_id = hashlib.sha256(
(from_chain + to_chain + asset_name + receiver).encode('utf-8')
).digest().hex()
self.pending_transfers[pending_id] = \
[from_chain, to_chain, asset_name, receiver, deposit_height]
self.store_pending_transfers()
def finalize_transfer_arguments(self, prompt_last_deposit=True):
"""Prompt the arguments needed to finalize a transfer.
The arguments can be taken from the pending transfers or
inputed manually by users.
Returns:
List of transfer arguments
"""
choices = [
{
'name': '{}'.format(val),
'value': val
} for _, val in self.pending_transfers.items()
]
choices.extend(["Custom transfer", "Back"])
questions = [
{
'type': 'list',
'name': 'transfer',
'message': 'Choose a pending transfer',
'choices': choices
}
]
answers = inquirer.prompt(questions, style=aergo_style)
if answers['transfer'] == 'Custom transfer':
from_chain, to_chain, from_assets, to_assets, asset_name, \
receiver = self.prompt_commun_transfer_params()
deposit_height = 0
if prompt_last_deposit:
deposit_height = prompt_deposit_height()
elif answers['transfer'] == 'Back':
return None
else:
from_chain, to_chain, asset_name, receiver, deposit_height = \
answers['transfer']
from_assets, to_assets = self.get_registered_assets(from_chain,
to_chain)
return (from_chain, to_chain, from_assets, to_assets, asset_name,
receiver, deposit_height)
def finalize_transfer(self):
"""Finalize a token transfer between 2 chains."""
arguments = self.finalize_transfer_arguments()
if arguments is None:
return
from_chain, to_chain, from_assets, to_assets, asset_name, receiver, \
deposit_height = arguments
bridge_from = self.wallet.config_data(
'networks', from_chain, 'bridges', to_chain, 'addr')
bridge_to = self.wallet.config_data(
'networks', to_chain, 'bridges', from_chain, 'addr')
asset_addr = self.get_asset_address(asset_name, from_chain, to_chain)
summary = "Departure chain: {} ({})\n" \
"Destination chain: {} ({})\n" \
"Asset name: {} ({})\n" \
"Receiver at destination: {}\n" \
"Block height of lock/burn/freeze: {}\n"\
.format(from_chain, bridge_from, to_chain, bridge_to,
asset_name, asset_addr, receiver, deposit_height)
privkey_name = self.prompt_signing_key('wallet')
if asset_name in from_assets:
# if transfering a native asset mint
print("Mint transfer summary:\n{}".format(summary))
if not confirm_transfer():
print('Finalize transfer canceled')
return
self.wallet.finalize_transfer_mint(
from_chain, to_chain, asset_name, receiver, deposit_height,
privkey_name
)
elif (asset_name in to_assets
and from_chain in self.wallet.config_data(
'networks', to_chain, 'tokens', asset_name, 'pegs')
):
# if transfering a pegged asset unlock
print("Unlock transfer summary:\n{}".format(summary))
if not confirm_transfer():
print('Finalize transfer canceled')
return
self.wallet.finalize_transfer_unlock(
from_chain, to_chain, asset_name, receiver, deposit_height,
privkey_name
)
else:
print('asset not properly registered in config.json')
return
# remove pending id from pending transfers
pending_id = hashlib.sha256(
(from_chain + to_chain + asset_name + receiver).encode('utf-8')
).digest().hex()
self.pending_transfers.pop(pending_id, None)
self.store_pending_transfers()
def check_withdrawable_balance(self):
"""Check the status of cross chain transfers."""
arguments = self.finalize_transfer_arguments(
prompt_last_deposit=False)
if arguments is None:
return
from_chain, to_chain, from_assets, to_assets, asset_name, receiver, \
_ = arguments
if asset_name in from_assets:
# if native asset check mintable
withdrawable, pending = self.wallet.get_mintable_balance(
from_chain, to_chain, asset_name, account_addr=receiver
)
elif (asset_name in to_assets
and from_chain in self.wallet.config_data(
'networks', to_chain, 'tokens', asset_name, 'pegs')
):
# if pegged asset check unlockable
withdrawable, pending = self.wallet.get_unlockable_balance(
from_chain, to_chain, asset_name, account_addr=receiver
)
else:
print('asset not properly registered in config.json')
return
print("Withdrawable: {} Pending: {}"
.format(withdrawable / 10**18, pending / 10**18))
def prompt_commun_transfer_params(self):
"""Prompt the common parameters necessary for all transfers.
Returns:
List of transfer parameters : from_chain, to_chain, from_assets,
to_assets, asset_name, receiver
"""
from_chain, to_chain = self.prompt_transfer_networks()
from_assets, to_assets = self.get_registered_assets(from_chain,
to_chain)
questions = [
{
'type': 'list',
'name': 'asset_name',
'message': 'Name of asset to transfer',
'choices': from_assets + to_assets
},
{
'type': 'input',
'name': 'receiver',
'message': 'Receiver of assets on other side of bridge'
}
]
answers = inquirer.prompt(questions, style=aergo_style)
receiver = answers['receiver']
asset_name = answers['asset_name']
return from_chain, to_chain, from_assets, to_assets, asset_name, \
receiver
def prompt_signing_key(self, wallet_name):
"""Prompt user to select a private key.
Note:
Keys are displayed by name and should have been registered in
wallet config.
"""
accounts = self.wallet.config_data(wallet_name)
questions = [
{
'type': 'list',
'name': 'privkey_name',
'message': 'Choose account to sign transaction : ',
'choices': [name for name in accounts]
}
]
answers = inquirer.prompt(questions, style=aergo_style)
return answers['privkey_name']
def prompt_bridge_networks(self):
"""Prompt user to choose 2 networks between registered networks."""
networks = self.get_registered_networks()
questions = [
{
'type': 'list',
'name': 'from_chain',
'message': 'Departure network',
'choices': networks
}
]
answers = inquirer.prompt(questions, style=aergo_style)
from_chain = answers['from_chain']
networks.remove(from_chain)
questions = [
{
'type': 'list',
'name': 'to_chain',
'message': 'Destination network',
'choices': networks
}
]
answers = inquirer.prompt(questions, style=aergo_style)
to_chain = answers['to_chain']
return from_chain, to_chain
def prompt_transfer_networks(self):
"""Prompt user to choose 2 networks between registered bridged
networks.
"""
networks = self.get_registered_networks()
questions = [
{
'type': 'list',
'name': 'from_chain',
'message': 'Departure network',
'choices': networks
}
]
answers = inquirer.prompt(questions, style=aergo_style)
from_chain = answers['from_chain']
networks = [net for net in
self.wallet.config_data('networks', from_chain, 'bridges')]
if len(networks) == 0:
raise InvalidArgumentsError('No bridge registered to this network')
questions = [
{
'type': 'list',
'name': 'to_chain',
'message': 'Destination network',
'choices': networks
}
]
answers = inquirer.prompt(questions, style=aergo_style)
to_chain = answers['to_chain']
return from_chain, to_chain
def get_registered_networks(self):
"""Get the list of networks registered in the wallet config."""
return [net for net in self.wallet.config_data('networks')]
def get_registered_assets(self, from_chain, to_chain):
"""Get the list of registered assets on each network."""
from_assets = [
asset for asset in self.wallet.config_data(
'networks', from_chain, 'tokens')
]
to_assets = [
asset for asset in self.wallet.config_data(
'networks', to_chain, 'tokens')
]
return from_assets, to_assets
def store_pending_transfers(self):
"""Record pending transfers in json file so they can be finalized
later.
"""
with open(self.root_path
+ 'aergo_cli/pending_transfers.json', 'w') as file:
json.dump(self.pending_transfers, file, indent=4)
if __name__ == '__main__':
app = MerkleBridgeCli()
app.start()
|
the-stack_0_27463
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' {} | grep -v '{}'".format(CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' {}".format(CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizeunoamount', '-dbcrashratio'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True)
docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True)
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
|
the-stack_0_27464
|
#!/usr/bin/env python
#
# rioxarray documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
import rioxarray
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx_click.ext",
"nbsphinx",
]
intersphinx_mapping = {"pyproj": ("https://pyproj4.github.io/pyproj/stable/", None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "rioxarray"
copyright = "2019, rioxarray Contributors"
author = "rioxarray Contributors"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = rioxarray.__version__
# The full version, including alpha/beta/rc tags.
release = rioxarray.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "examples/.ipynb_checkpoints"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "rioxarraydoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"rioxarray.tex",
"rioxarray Documentation",
"rioxarray Contributors",
"manual",
)
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "rioxarray", "rioxarray Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"rioxarray",
"rioxarray Documentation",
author,
"rioxarray",
"One line description of project.",
"Miscellaneous",
)
]
|
the-stack_0_27466
|
import numpy as np
import numpy.random
import time
class Ransac:
"""
A Ransac Algorithm Class
Args:
x_data (np.ndarray): a list of points' x coordinates
y_data (np.ndarray): a list of points' y coordinates
n (int): maximum number of iterations to run
threshold (float): the threshold to determine if a points is an inlier
is_baysac (bool): true if use BAYSAC, otherwise use RANSAC
min_dist (int): the minimum distance learned by the algorithm
best_model (Tuple[float]): the best model so far
likelihoods (np.ndarray): a list of likelihoods that the point at
index i is an inlier
bayes_idx (np.ndarray): a list of indices that match the likelihoods
to the point coordinates
inliers (Set[Tuple[float]]): the coordinates that are inliers
runtime (float): the runtime for execute_ransac
"""
def __init__(self, x_data, y_data, n, threshold, is_baysac=False):
assert x_data.shape == y_data.shape, "x and y shapes must match." + \
"x_data: {0}, y_data: {1}.".format(x_data.shape, y_data.shape)
assert x_data.shape[0] >= 3, "there must be at least 3 data " + \
"points to fit a circle. Given {0}.".format(x_data.shape[0])
# TODO: we may want to calculate n instead of defining it
self.x_data = x_data
self.y_data = y_data
self.n = n
self.threshold = threshold
self.is_baysac = is_baysac
self.min_dist = float("inf")
self.best_model = (0., 0., 0.)
self.likelihoods = np.repeat(0.5, x_data.shape[0])
self.inliers = set()
self.runtime = 0.
def sample_indices(self):
"""
Sample 3 points' indices.
Returns:
List[int]: 3 points' indices
"""
if self.is_baysac:
return self._likelihood_sampling()
else:
return self._random_sampling()
def _random_sampling(self):
"""
Sample 3 points' indices using random sampling.
Used when self.is_baysac is false.
Returns:
List[int]: 3 points' indices using random samping
"""
indices = np.indices(self.x_data.shape)[0]
sample_indices = np.random.choice(indices, 3, replace=False)
return sample_indices
def _likelihood_sampling(self):
"""
Sample 3 points' indices using highest likelihoods.
Used when self.is_baysac is True.
Returns:
List[int]: 3 points' indices using with maximum likelihood
"""
# sort in decreasing order
indices = np.argsort(self.likelihoods)[::-1]
sample_indices = indices[:3]
return sample_indices
def make_model(self, sample_indices):
"""
Fit a circle using the 3 sample points
Args:
sample_indices (List[int]): the 3 sample points' indices
Returns:
Tuple[float]: the centre points' x, y coord and the radius
"""
first_three_indices = sample_indices[:3]
pt1, pt2, pt3 = zip(self.x_data[first_three_indices],
self.y_data[first_three_indices])
A = np.array([[pt2[0] - pt1[0], pt2[1] - pt1[1]],
[pt3[0] - pt2[0], pt3[1] - pt2[1]]])
B = np.array([[pt2[0]**2 - pt1[0]**2 + pt2[1]**2 - pt1[1]**2],
[pt3[0]**2 - pt2[0]**2 + pt3[1]**2 - pt2[1]**2]])
inv_A = np.linalg.inv(A)
c_x, c_y = np.dot(inv_A, B) / 2
c_x, c_y = c_x[0], c_y[0]
r = np.sqrt((c_x - pt1[0])**2 + (c_y - pt1[1])**2)
return c_x, c_y, r
def eval_model(self, model):
"""
Evaluates the model and calculates the total difference of each point
being away from the data
Args:
model (Tuple[float]): the centre points' x, y coord and the radius
Returns:
float: the total difference of each point being away from the data
"""
c_x, c_y, r = model
inliers = set()
curr_dist_total = 0
for i in range(len(self.x_data)):
curr_x = self.x_data[i]
curr_y = self.y_data[i]
dist = abs(np.sqrt((curr_x - c_x) ** 2 + (curr_y - c_y) ** 2) - r)
if dist < self.threshold:
inliers.add((curr_x, curr_y))
curr_dist_total += dist
if len(inliers) > len(self.inliers):
self.inliers = inliers
self.best_model = model
self.min_dist = curr_dist_total
return
def update_likelihoods(self, sample_indices):
"""
Update the likelihoods given the current sample indices
Args:
sample_indices (List[int]): the 3 sample points' indices
"""
curr_likelihoods = self.likelihoods[sample_indices]
p_sample_subset_inlier = np.prod(curr_likelihoods)
self.likelihoods[sample_indices] = (curr_likelihoods - p_sample_subset_inlier) / \
(1 - p_sample_subset_inlier)
return
def execute_ransac(self):
"""
The top level method for executing ransac/baysac algorithm
"""
start_time = time.time()
for i in range(self.n):
curr_sample_indices = self.sample_indices()
model = self.make_model(curr_sample_indices)
self.eval_model(model)
if self.is_baysac:
self.update_likelihoods(curr_sample_indices)
end_time = time.time()
self.runtime = end_time - start_time
return
def get_best_model(self):
"""
Get the best model
Returns:
Tuple[float]: the best model
"""
return self.best_model
def get_inliers(self):
"""
Get the set of inlier points
Returns:
Set[Tuple[Float]]: the set of inlier points
"""
return self.inliers
def get_total_dist(self):
"""
Get the total distance from all points to the circle
Returns:
float: the total distance from all points to the circle
"""
return self.min_dist
def get_inlier_dist(self):
"""
Get the total distance from all inlier points to the circle
Returns:
float: the total distance from all inliers to the circle
"""
c_x, c_y, r = self.best_model
x_inlier, y_inlier = zip(*list(self.inliers))
total_dist = 0
for i in range(len(self.inliers)):
curr_x = x_inlier[i]
curr_y = y_inlier[i]
dist = abs(np.sqrt((curr_x - c_x) ** 2 + (curr_y - c_y) ** 2) - r)
total_dist += dist
return total_dist
def get_runtime(self):
"""
Get the runtime for execute_ransac method
Returns:
float: the runtime for execute_ransac method
"""
return self.runtime
|
the-stack_0_27468
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
if l1 is None and l2 is None: return l1
if l1 is not None and l2 is None: return l1
if l1 is None and l2 is not None: return l2
l = ListNode(0)
t1, t2, t = l1, l2, l
carry = 0
while t1 or t2 or carry:
val1 = (t1.val if t1 else 0)
val2 = (t2.val if t2 else 0)
t1 = (t1.next if t1 else None)
t2 = (t2.next if t2 else None)
summ = val1 + val2 + carry
carry, s = divmod(summ, 10) # summ//10, summ%10
t.next = ListNode(s)
t = t.next
return l.next
|
the-stack_0_27471
|
from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
import rorlkit.torch.pytorch_util as ptu
from rorlkit.core.eval_util import create_stats_ordered_dict
from rorlkit.torch.torch_rl_algorithm import TorchTrainer
class TD3Trainer(TorchTrainer):
"""
Twin Delayed Deep Deterministic policy gradients
"""
def __init__(
self,
policy,
qf1,
qf2,
target_qf1,
target_qf2,
target_policy,
target_policy_noise=0.2,
target_policy_noise_clip=0.5,
discount=0.99,
reward_scale=1.0,
policy_learning_rate=1e-3,
qf_learning_rate=1e-3,
policy_and_target_update_period=2,
tau=0.005,
qf_criterion=None,
optimizer_class=optim.Adam,
):
super().__init__()
if qf_criterion is None:
qf_criterion = nn.MSELoss()
self.qf1 = qf1
self.qf2 = qf2
self.policy = policy
self.target_policy = target_policy
self.target_qf1 = target_qf1
self.target_qf2 = target_qf2
self.target_policy_noise = target_policy_noise
self.target_policy_noise_clip = target_policy_noise_clip
self.discount = discount
self.reward_scale = reward_scale
self.policy_and_target_update_period = policy_and_target_update_period
self.tau = tau
self.qf_criterion = qf_criterion
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_learning_rate,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_learning_rate,
)
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_learning_rate,
)
self.eval_statistics = OrderedDict()
self._n_train_steps_total = 0
self._need_to_update_eval_statistics = True
def train_from_torch(self, batch):
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
"""
Critic operations.
"""
next_actions = self.target_policy(next_obs)
noise = ptu.randn(next_actions.shape) * self.target_policy_noise
noise = torch.clamp(
noise,
-self.target_policy_noise_clip,
self.target_policy_noise_clip
)
noisy_next_actions = next_actions + noise
target_q1_values = self.target_qf1(next_obs, noisy_next_actions)
target_q2_values = self.target_qf2(next_obs, noisy_next_actions)
target_q_values = torch.min(target_q1_values, target_q2_values)
q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values
q_target = q_target.detach()
q1_pred = self.qf1(obs, actions)
bellman_errors_1 = (q1_pred - q_target) ** 2
qf1_loss = bellman_errors_1.mean()
q2_pred = self.qf2(obs, actions)
bellman_errors_2 = (q2_pred - q_target) ** 2
qf2_loss = bellman_errors_2.mean()
"""
Update Networks
"""
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
policy_actions = policy_loss = None
if self._n_train_steps_total % self.policy_and_target_update_period == 0:
policy_actions = self.policy(obs)
q_output = self.qf1(obs, policy_actions)
policy_loss = - q_output.mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
ptu.soft_update_from_to(self.policy, self.target_policy, self.tau)
ptu.soft_update_from_to(self.qf1, self.target_qf1, self.tau)
ptu.soft_update_from_to(self.qf2, self.target_qf2, self.tau)
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
if policy_loss is None:
policy_actions = self.policy(obs)
q_output = self.qf1(obs, policy_actions)
policy_loss = - q_output.mean()
self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q1 Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q2 Predictions',
ptu.get_numpy(q2_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Bellman Errors 1',
ptu.get_numpy(bellman_errors_1),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Bellman Errors 2',
ptu.get_numpy(bellman_errors_2),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy Action',
ptu.get_numpy(policy_actions),
))
self._n_train_steps_total += 1
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
return [
self.policy,
self.qf1,
self.qf2,
self.target_policy,
self.target_qf1,
self.target_qf2,
]
def get_snapshot(self):
return dict(
qf1=self.qf1,
qf2=self.qf2,
trained_policy=self.policy,
target_policy=self.target_policy,
)
|
the-stack_0_27473
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import tempfile
import unittest
from py_utils import cloud_storage # pylint: disable=import-error
from telemetry.page import page
from telemetry.testing import system_stub
from telemetry.wpr import archive_info
class MockPage(page.Page):
def __init__(self, url, name=None):
super(MockPage, self).__init__(url, None, name=name)
page1 = MockPage('http://www.foo.com/', 'Foo')
page2 = MockPage('http://www.bar.com/', 'Bar')
page3 = MockPage('http://www.baz.com/')
recording1 = 'data_001.wpr'
recording2 = 'data_002.wpr'
archive_info_contents = ("""
{
"archives": {
"%s": ["%s", "%s"],
"%s": ["%s"]
}
}
""" % (recording1, page1.display_name, page2.display_name, recording2,
page3.display_name))
class WprArchiveInfoTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
# Write the metadata.
self.story_set_archive_info_file = os.path.join(
self.tmp_dir, 'info.json')
with open(self.story_set_archive_info_file, 'w') as f:
f.write(archive_info_contents)
# Write the existing .wpr files.
for i in [1, 2]:
with open(os.path.join(self.tmp_dir, ('data_00%d.wpr' % i)), 'w') as f:
f.write(archive_info_contents)
# Create the PageSetArchiveInfo object to be tested.
self.archive_info = archive_info.WprArchiveInfo.FromFile(
self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)
# Use cloud_storage system stub.
self.overrides = system_stub.Override(archive_info, ['cloud_storage'])
def tearDown(self):
shutil.rmtree(self.tmp_dir)
self.overrides.Restore()
def assertCorrectHashFile(self, file_path):
old_ch = cloud_storage.CalculateHash
cloud_storage.CalculateHash = self.overrides.cloud_storage.CalculateHash
try:
self.assertTrue(os.path.exists(file_path + '.sha1'))
with open(file_path + '.sha1', 'rb') as f:
self.assertEquals(cloud_storage.CalculateHash(file_path), f.read())
finally:
cloud_storage.CalculateHash = old_ch
def testDownloadArchivesIfNeeded(self):
cloud_storage_stub = self.overrides.cloud_storage
# Second hash doesn't match, need to fetch it.
cloud_storage_stub.SetRemotePathsForTesting(
{cloud_storage.PUBLIC_BUCKET: {recording1: "dummyhash",
recording2: "dummyhash22"}})
cloud_storage_stub.SetCalculatedHashesForTesting(
{os.path.join(self.tmp_dir, recording1): "dummyhash",
os.path.join(self.tmp_dir, recording2): "dummyhash2",})
self.archive_info.DownloadArchivesIfNeeded()
self.assertEquals(len(cloud_storage_stub.downloaded_files), 1)
self.assertEquals(cloud_storage_stub.downloaded_files[0], recording2)
def testReadingArchiveInfo(self):
self.assertIsNotNone(self.archive_info.WprFilePathForStory(page1))
self.assertEquals(recording1, os.path.basename(
self.archive_info.WprFilePathForStory(page1)))
self.assertIsNotNone(self.archive_info.WprFilePathForStory(page2))
self.assertEquals(recording1, os.path.basename(
self.archive_info.WprFilePathForStory(page2)))
self.assertIsNotNone(self.archive_info.WprFilePathForStory(page3))
self.assertEquals(recording2, os.path.basename(
self.archive_info.WprFilePathForStory(page3)))
def testArchiveInfoFileGetsUpdated(self):
"""Ensures that the archive info file is updated correctly."""
expected_archive_file_contents = {
u'description': (u'Describes the Web Page Replay archives for a'
u' story set. Don\'t edit by hand! Use record_wpr for'
u' updating.'),
u'archives': {
u'data_003.wpr': [u'Bar', u'http://www.baz.com/'],
u'data_001.wpr': [u'Foo']
}
}
new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
expected_archive_file_path = os.path.join(self.tmp_dir, 'data_003.wpr')
hash_dictionary = {expected_archive_file_path:'filehash'}
cloud_storage_stub = self.overrides.cloud_storage
cloud_storage_stub.SetCalculatedHashesForTesting(hash_dictionary)
with open(new_temp_recording, 'w') as f:
f.write('wpr data')
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.archive_info.AddRecordedStories([page2, page3])
with open(self.story_set_archive_info_file, 'r') as f:
archive_file_contents = json.load(f)
self.assertEquals(expected_archive_file_contents, archive_file_contents)
# Nit: Ensure the saved json does not contian trailing spaces.
with open(self.story_set_archive_info_file, 'rU') as f:
for line in f:
self.assertFalse(line.rstrip('\n').endswith(' '))
def testModifications(self):
recording1_path = os.path.join(self.tmp_dir, recording1)
recording2_path = os.path.join(self.tmp_dir, recording2)
new_recording1 = os.path.join(self.tmp_dir, 'data_003.wpr')
new_recording2 = os.path.join(self.tmp_dir, 'data_004.wpr')
hash_dictionary = {new_recording1:'file_hash1',
new_recording2:'file_hash2'}
cloud_storage_stub = self.overrides.cloud_storage
cloud_storage_stub.SetCalculatedHashesForTesting(hash_dictionary)
new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
with open(new_temp_recording, 'w') as f:
f.write('wpr data')
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForStory(page1))
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForStory(page2))
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForStory(page3))
self.archive_info.AddRecordedStories([page2])
self.assertTrue(os.path.exists(new_recording1))
self.assertFalse(os.path.exists(new_temp_recording))
self.assertTrue(os.path.exists(recording1_path))
self.assertTrue(os.path.exists(recording2_path))
self.assertCorrectHashFile(new_recording1)
with open(new_temp_recording, 'w') as f:
f.write('wpr data')
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.archive_info.AddRecordedStories([page3])
self.assertTrue(os.path.exists(new_recording2))
self.assertCorrectHashFile(new_recording2)
self.assertFalse(os.path.exists(new_temp_recording))
self.assertTrue(os.path.exists(recording1_path))
# recording2 is no longer needed, so it was deleted.
self.assertFalse(os.path.exists(recording2_path))
def testCreatingNewArchiveInfo(self):
# Write only the page set without the corresponding metadata file.
story_set_contents = ("""
{
archive_data_file": "new_archive_info.json",
"pages": [
{
"url": "%s",
}
]
}""" % page1.url)
story_set_file = os.path.join(self.tmp_dir, 'new_story_set.json')
with open(story_set_file, 'w') as f:
f.write(story_set_contents)
self.story_set_archive_info_file = os.path.join(self.tmp_dir,
'new_archive_info.json')
expected_archive_file_path = os.path.join(self.tmp_dir,
'new_archive_info_000.wpr')
hash_dictionary = {expected_archive_file_path:'filehash'}
self.overrides.cloud_storage.SetCalculatedHashesForTesting(hash_dictionary)
# Create the WprArchiveInfo object to be tested.
self.archive_info = archive_info.WprArchiveInfo.FromFile(
self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)
# Add a recording for all the pages.
new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
with open(new_temp_recording, 'w') as f:
f.write('wpr data')
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForStory(page1))
self.archive_info.AddRecordedStories([page1])
# Expected name for the recording (decided by WprArchiveInfo).
new_recording = os.path.join(self.tmp_dir, 'new_archive_info_000.wpr')
self.assertTrue(os.path.exists(new_recording))
self.assertFalse(os.path.exists(new_temp_recording))
self.assertCorrectHashFile(new_recording)
# Check that the archive info was written correctly.
self.assertTrue(os.path.exists(self.story_set_archive_info_file))
read_archive_info = archive_info.WprArchiveInfo.FromFile(
self.story_set_archive_info_file, cloud_storage.PUBLIC_BUCKET)
self.assertEquals(new_recording,
read_archive_info.WprFilePathForStory(page1))
|
the-stack_0_27474
|
'''
Import model from source_path and export it to target_path.
This script is used for testing the import and export operators.
'''
import sys
import bpy
# Collect arguments after "--"
argv = sys.argv
argv = argv[argv.index("--") + 1:]
def main(source_path: str, target_path: str):
# Remove all starting objects
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# Load model from source file
if "use_empties" in argv:
bpy.ops.object.nusiq_mcblend_import_operator(
filepath=source_path, replace_bones_with_empties=True)
else:
bpy.ops.object.nusiq_mcblend_import_operator(
filepath=source_path, replace_bones_with_empties=False)
# Save model to target file
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.nusiq_mcblend_export_operator(filepath=target_path)
if __name__ == "__main__":
main(argv[0], argv[1])
|
the-stack_0_27475
|
import colorsys
from typing import List
import numpy as np
from matplotlib import patches
from matplotlib import pyplot as plt
from matplotlib.path import Path
from tqdm import tqdm
from jmetal.core.solution import FloatSolution
def polar_to_cartesian(r, theta):
return np.array([r * np.cos(theta), r * np.sin(theta)])
def draw_sector(start_angle=0, end_angle=60, radius=1.0, width=0.2, lw=2, ls='-', ax=None, fc=(1, 0, 0), ec=(0, 0, 0),
z_order=1):
if start_angle > end_angle:
start_angle, end_angle = end_angle, start_angle
start_angle *= np.pi / 180.
end_angle *= np.pi / 180.
# https://stackoverflow.com/questions/1734745/how-to-create-circle-with-b%C3%A9zier-curves
opt = 4. / 3. * np.tan((end_angle - start_angle) / 4.) * radius
inner = radius * (1 - width)
vertsPath = [polar_to_cartesian(radius, start_angle),
polar_to_cartesian(radius, start_angle) + polar_to_cartesian(opt, start_angle + 0.5 * np.pi),
polar_to_cartesian(radius, end_angle) + polar_to_cartesian(opt, end_angle - 0.5 * np.pi),
polar_to_cartesian(radius, end_angle),
polar_to_cartesian(inner, end_angle),
polar_to_cartesian(inner, end_angle) + polar_to_cartesian(opt * (1 - width), end_angle - 0.5 * np.pi),
polar_to_cartesian(inner, start_angle) + polar_to_cartesian(opt * (1 - width),
start_angle + 0.5 * np.pi),
polar_to_cartesian(inner, start_angle),
polar_to_cartesian(radius, start_angle)]
codesPaths = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CLOSEPOLY]
if ax is None:
return vertsPath, codesPaths
else:
path = Path(vertsPath, codesPaths)
patch = patches.PathPatch(path, facecolor=fc, edgecolor=ec, lw=lw, linestyle=ls, zorder=z_order)
ax.add_patch(patch)
return (patch)
def draw_chord(start_angle1=0, end_angle1=60, start_angle2=180, end_angle2=240, radius=1.0, chord_width=0.7, ax=None,
color=(1, 0, 0), z_order=1):
if start_angle1 > end_angle1:
start_angle1, end_angle1 = end_angle1, start_angle1
if start_angle2 > end_angle2:
start_angle2, end_angle2 = end_angle2, start_angle2
start_angle1 *= np.pi / 180.
end_angle1 *= np.pi / 180.
start_angle2 *= np.pi / 180.
end_angle2 *= np.pi / 180.
optAngle1 = 4. / 3. * np.tan((end_angle1 - start_angle1) / 4.) * radius
optAngle2 = 4. / 3. * np.tan((end_angle2 - start_angle2) / 4.) * radius
rchord = radius * (1 - chord_width)
vertsPath = [polar_to_cartesian(radius, start_angle1),
polar_to_cartesian(radius, start_angle1) + polar_to_cartesian(optAngle1, start_angle1 + 0.5 * np.pi),
polar_to_cartesian(radius, end_angle1) + polar_to_cartesian(optAngle1, end_angle1 - 0.5 * np.pi),
polar_to_cartesian(radius, end_angle1),
polar_to_cartesian(rchord, end_angle1), polar_to_cartesian(rchord, start_angle2),
polar_to_cartesian(radius, start_angle2),
polar_to_cartesian(radius, start_angle2) + polar_to_cartesian(optAngle2, start_angle2 + 0.5 * np.pi),
polar_to_cartesian(radius, end_angle2) + polar_to_cartesian(optAngle2, end_angle2 - 0.5 * np.pi),
polar_to_cartesian(radius, end_angle2),
polar_to_cartesian(rchord, end_angle2), polar_to_cartesian(rchord, start_angle1),
polar_to_cartesian(radius, start_angle1)]
codesPath = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4]
if ax == None:
return vertsPath, codesPath
else:
path = Path(vertsPath, codesPath)
patch = patches.PathPatch(path, facecolor=color + (0.5,), edgecolor=color + (0.4,), lw=2, alpha=0.5)
ax.add_patch(patch)
return (patch)
def hover_over_bin(event, handle_tickers, handle_plots, colors, fig):
is_found = False
for iobj in range(len(handle_tickers)):
for ibin in range(len(handle_tickers[iobj])):
cont = False
if not is_found:
cont, ind = handle_tickers[iobj][ibin].contains(event)
if cont:
is_found = True
if cont:
plt.setp(handle_tickers[iobj][ibin], facecolor=colors[iobj])
[h.set_visible(True) for h in handle_plots[iobj][ibin]]
is_found = True
fig.canvas.draw_idle()
else:
plt.setp(handle_tickers[iobj][ibin], facecolor=(1, 1, 1))
for h in handle_plots[iobj][ibin]:
h.set_visible(False)
fig.canvas.draw_idle()
# 画和弦图的话,导入这个函数即可
def chord_diagram(solutions: List[FloatSolution], nbins='auto', ax=None, obj_labels=None,
prop_labels=dict(fontsize=13, ha='center', va='center'), pad=6):
points_matrix = np.array([s.objectives for s in solutions])
(NPOINTS, NOBJ) = np.shape(points_matrix)
HSV_tuples = [(x * 1.0 / NOBJ, 0.5, 0.5) for x in range(NOBJ)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
if ax is None:
fig = plt.figure(figsize=(6, 6))
ax = plt.axes([0, 0, 1, 1], aspect='equal')
ax.set_xlim(-2.3, 2.3)
ax.set_ylim(-2.3, 2.3)
ax.axis('off')
y = np.array([1. / NOBJ] * NOBJ) * (360 - pad * NOBJ)
sector_angles = []
labels_pos_and_ros = []
start_angle = 0
for i in range(NOBJ):
end_angle = start_angle + y[i]
sector_angles.append((start_angle, end_angle))
angle_diff = 0.5 * (start_angle + end_angle)
if -30 <= angle_diff <= 210:
angle_diff -= 90
else:
angle_diff -= 270
angleText = start_angle - 2.5
if -30 <= angleText <= 210:
angleText -= 90
else:
angleText -= 270
labels_pos_and_ros.append(
tuple(polar_to_cartesian(1.0, 0.5 * (start_angle + end_angle) * np.pi / 180.)) + (angle_diff,) +
tuple(polar_to_cartesian(0.725, (start_angle - 2.5) * np.pi / 180.)) + (angleText,) +
tuple(polar_to_cartesian(0.85, (start_angle - 2.5) * np.pi / 180.)) + (angleText,))
start_angle = end_angle + pad
arc_points = []
for point in points_matrix:
arc_points.append([])
idim = 0
for _ in point:
anglePoint = sector_angles[idim][0] + (sector_angles[idim][1] - sector_angles[idim][0]) * point[idim]
arc_points[-1].append((anglePoint, anglePoint))
idim = idim + 1
max_hist_values = []
handle_tickers = []
handle_plots = []
for iobj in tqdm(range(NOBJ), ascii=True, desc='Chord diagram'):
draw_sector(start_angle=sector_angles[iobj][0], end_angle=sector_angles[iobj][1], radius=0.925, width=0.225,
ax=ax,
fc=(1, 1, 1, 0.0), ec=(0, 0, 0), lw=2, z_order=10)
draw_sector(start_angle=sector_angles[iobj][0], end_angle=sector_angles[iobj][1], radius=0.925, width=0.05,
ax=ax,
fc=colors[iobj], ec=(0, 0, 0), lw=2, z_order=10)
draw_sector(start_angle=sector_angles[iobj][0], end_angle=sector_angles[iobj][1], radius=0.7 + 0.15, width=0.0,
ax=ax, fc=colors[iobj], ec=colors[iobj], lw=2, ls=':', z_order=5)
histValues, binsDim = np.histogram(points_matrix[:, iobj], bins=nbins)
relativeHeightBinPre = 0.025
max_hist_values.append(max(histValues))
handle_tickers.append([])
handle_plots.append([])
for indexBin in range(len(histValues)):
startAngleBin = sector_angles[iobj][0] + (sector_angles[iobj][1] - sector_angles[iobj][0]) * binsDim[
indexBin]
endAngleBin = sector_angles[iobj][0] + (sector_angles[iobj][1] - sector_angles[iobj][0]) * binsDim[
indexBin + 1]
relativeHeightBin = 0.15 * histValues[indexBin] / max(histValues)
handle_tickers[-1].append(
draw_sector(start_angle=startAngleBin, end_angle=endAngleBin, radius=0.69, width=0.08, ax=ax, lw=1,
fc=(1, 1, 1), ec=(0, 0, 0)))
handle_plots[-1].append([])
if histValues[indexBin] > 0:
draw_sector(start_angle=startAngleBin, end_angle=endAngleBin, radius=0.7 + relativeHeightBin, width=0,
ax=ax, lw=1, fc=colors[iobj], ec=colors[iobj])
plotPoint1 = polar_to_cartesian(0.7 + relativeHeightBinPre, startAngleBin * np.pi / 180.)
plotPoint2 = polar_to_cartesian(0.7 + relativeHeightBin, startAngleBin * np.pi / 180.)
plt.plot([plotPoint1[0], plotPoint2[0]], [plotPoint1[1], plotPoint2[1]], c=colors[iobj], lw=1)
relativeHeightBinPre = relativeHeightBin
else:
plotPoint1 = polar_to_cartesian(0.7 + relativeHeightBinPre, startAngleBin * np.pi / 180.)
plotPoint2 = polar_to_cartesian(0.725 + relativeHeightBin, startAngleBin * np.pi / 180.)
plt.plot([plotPoint1[0], plotPoint2[0]], [plotPoint1[1], plotPoint2[1]], c=colors[iobj], lw=1)
relativeHeightBinPre = 0.025
if indexBin == len(histValues) - 1:
plotPoint1 = polar_to_cartesian(0.7 + relativeHeightBin, endAngleBin * np.pi / 180.)
plotPoint2 = polar_to_cartesian(0.725, endAngleBin * np.pi / 180.)
plt.plot([plotPoint1[0], plotPoint2[0]], [plotPoint1[1], plotPoint2[1]], c=colors[iobj], lw=1)
for ipoint in range(len(points_matrix)):
plotPoint1 = polar_to_cartesian(0.6, arc_points[ipoint][iobj][0] * np.pi / 180.)
plotPoint2 = polar_to_cartesian(0.6, arc_points[ipoint][iobj][0] * np.pi / 180.)
plt.plot([plotPoint1[0], plotPoint2[0]], [plotPoint1[1], plotPoint2[1]], marker='o', markersize=3,
c=colors[iobj], lw=2)
if binsDim[indexBin] < points_matrix[ipoint, iobj] <= binsDim[
indexBin + 1]:
for jdim in range(NOBJ):
if jdim >= 1:
handle_plots[iobj][indexBin].append(
draw_chord(arc_points[ipoint][jdim - 1][0], arc_points[ipoint][jdim - 1][1],
arc_points[ipoint][jdim][0], arc_points[ipoint][jdim][1], radius=0.55,
color=colors[iobj], chord_width=1, ax=ax))
handle_plots[iobj][indexBin][-1].set_visible(False)
handle_plots[iobj][indexBin].append(
draw_chord(arc_points[ipoint][-1][0], arc_points[ipoint][-1][1], arc_points[ipoint][0][0],
arc_points[ipoint][0][1], radius=0.55, color=colors[iobj], chord_width=1, ax=ax))
handle_plots[iobj][indexBin][-1].set_visible(False)
if obj_labels is None:
obj_labels = ['$f_{' + str(i) + '}(\mathbf{x})$' for i in range(NOBJ)]
prop_legend_bins = dict(fontsize=9, ha='center', va='center')
for i in range(NOBJ):
p0, p1 = polar_to_cartesian(0.975, sector_angles[i][0] * np.pi / 180.)
ax.text(p0, p1, '0', **prop_legend_bins)
p0, p1 = polar_to_cartesian(0.975, sector_angles[i][1] * np.pi / 180.)
ax.text(p0, p1, '1', **prop_legend_bins)
ax.text(labels_pos_and_ros[i][0], labels_pos_and_ros[i][1], obj_labels[i], rotation=labels_pos_and_ros[i][2],
**prop_labels)
ax.text(labels_pos_and_ros[i][3], labels_pos_and_ros[i][4], '0', **prop_legend_bins, color=colors[i])
ax.text(labels_pos_and_ros[i][6], labels_pos_and_ros[i][7], str(max_hist_values[i]), **prop_legend_bins,
color=colors[i])
plt.axis([-1.2, 1.2, -1.2, 1.2])
fig.canvas.mpl_connect("motion_notify_event",
lambda event: hover_over_bin(event, handle_tickers, handle_plots, colors, fig))
plt.show()
|
the-stack_0_27478
|
#script calculates and saves correlation matrix of reference subject for later use(embedding)
#input:frmi time series of reference subject
#output: correlation matrix of fmri time series
import numpy as np
import nibabel as nib
import pandas as pd
from glob import glob
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import settings as s
from myfuns import calc_vertex_correlations_combined_runs
from myfuns import calc_vertex_correlations
outfileLR=s.projectfolder + 'long_corr_matricesLR/reference'# output folder
outfileRL=s.projectfolder +'long_corr_matricesRL/reference'# output folder
reference_subjectpath=s.HCProot+'HCP_3T_RESTA_fmri/101915/' #folder containg fmri runs
files=glob(os.path.join(reference_subjectpath, "*.mgh"))
if not os.path.exists(s.projectfolder +'long_corr_matricesLR/'):
os.mkdir(s.projectfolder +'long_corr_matricesLR/')
if not os.path.exists(s.projectfolder +'long_corr_matricesRL/'):
os.mkdir(s.projectfolder +'long_corr_matricesRL/')
fmri_LH_LR_R1=reference_subjectpath+'/lh.rfMRI_REST1_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh'
fmri_RH_LR_R1=reference_subjectpath+'/rh.rfMRI_REST1_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh'
fmri_LH_RL_R1=reference_subjectpath+'/lh.rfMRI_REST1_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh'
fmri_RH_RL_R1=reference_subjectpath+'/rh.rfMRI_REST1_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh'
fmri_LH_LR_R2=reference_subjectpath+'/lh.rfMRI_REST2_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh'
fmri_RH_LR_R2=reference_subjectpath+'/rh.rfMRI_REST2_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh'
fmri_LH_RL_R2=reference_subjectpath+'/lh.rfMRI_REST2_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh'
fmri_RH_RL_R2=reference_subjectpath+'/rh.rfMRI_REST2_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh'
cor_LR=calc_vertex_correlations_combined_runs(fmri_LH_LR_R1,fmri_RH_LR_R1,fmri_LH_LR_R2,fmri_RH_LR_R2)
cor_RL=calc_vertex_correlations_combined_runs(fmri_LH_RL_R1,fmri_RH_RL_R1,fmri_LH_RL_R2,fmri_RH_RL_R2)
np.save(outfileLR,cor_LR['fullcor'])
np.save(outfileRL,cor_RL['fullcor'])
|
the-stack_0_27479
|
from corehq.apps.sms.api import (
MessageMetadata,
add_msg_tags,
send_sms_to_verified_number,
log_sms_exception,
)
from corehq.apps.sms.messages import *
from corehq.apps.sms.util import format_message_list
from touchforms.formplayer.api import current_question
from corehq.apps.smsforms.app import (
_get_responses,
_responses_to_text,
)
from dateutil.parser import parse
from corehq.apps.smsforms.models import SQLXFormsSession
def form_session_handler(v, text, msg):
"""
The form session handler will use the inbound text to answer the next question
in the open SQLXformsSession for the associated contact. If no session is open,
the handler passes. If multiple sessions are open, they are all closed and an
error message is displayed to the user.
"""
multiple, session = get_single_open_session_or_close_multiple(v.domain, v.owner_id)
if multiple:
send_sms_to_verified_number(v, get_message(MSG_MULTIPLE_SESSIONS, v))
return True
if session:
# Metadata to be applied to the inbound message
inbound_metadata = MessageMetadata(
workflow=session.workflow,
reminder_id=session.reminder_id,
xforms_session_couch_id=session._id,
)
add_msg_tags(msg, inbound_metadata)
try:
answer_next_question(v, text, msg, session)
except Exception:
# Catch any touchforms errors
log_sms_exception(msg)
send_sms_to_verified_number(v, get_message(MSG_TOUCHFORMS_DOWN, v))
return True
else:
return False
def get_single_open_session_or_close_multiple(domain, contact_id):
"""
Retrieves the current open SQLXFormsSession for the given contact.
If multiple sessions are open, it closes all of them and returns
None for the session.
The return value is a tuple of (multiple, session), where multiple
is True if there were multiple sessions, and session is the session if
there was a single open session available.
"""
sessions = SQLXFormsSession.get_all_open_sms_sessions(domain, contact_id)
count = sessions.count()
if count > 1:
for session in sessions:
session.end(False)
session.save()
return (True, None)
session = sessions[0] if count == 1 else None
return (False, session)
def answer_next_question(v, text, msg, session):
resp = current_question(session.session_id)
event = resp.event
valid, text, error_msg = validate_answer(event, text, v)
# metadata to be applied to the reply message
outbound_metadata = MessageMetadata(
workflow=session.workflow,
reminder_id=session.reminder_id,
xforms_session_couch_id=session._id,
)
if valid:
responses = _get_responses(v.domain, v.owner_id, text,
yield_responses=True)
if has_invalid_response(responses):
mark_as_invalid_response(msg)
text_responses = _responses_to_text(responses)
if len(text_responses) > 0:
response_text = format_message_list(text_responses)
send_sms_to_verified_number(v, response_text,
metadata=outbound_metadata)
else:
mark_as_invalid_response(msg)
response_text = "%s %s" % (error_msg, event.text_prompt)
send_sms_to_verified_number(v, response_text,
metadata=outbound_metadata)
def validate_answer(event, text, v):
text = text.strip()
upper_text = text.upper()
valid = False
error_msg = ""
if text == "" and event._dict.get("required", False):
return (False, text, get_message(MSG_FIELD_REQUIRED, v))
# Validate select
if event.datatype == "select":
# Try to match on phrase (i.e., "Yes" or "No")
choices = format_choices(event._dict["choices"])
if upper_text in choices:
text = str(choices[upper_text])
valid = True
else:
try:
answer = int(text)
if answer >= 1 and answer <= len(event._dict["choices"]):
valid = True
else:
error_msg = get_message(MSG_CHOICE_OUT_OF_RANGE, v)
except ValueError:
error_msg = get_message(MSG_INVALID_CHOICE, v)
# Validate multiselect
elif event.datatype == "multiselect":
choices = format_choices(event._dict["choices"])
max_index = len(event._dict["choices"])
proposed_answers = text.split()
final_answers = {}
try:
for answer in proposed_answers:
upper_answer = answer.upper()
if upper_answer in choices:
final_answers[str(choices[upper_answer])] = ""
else:
int_answer = int(answer)
assert int_answer >= 1 and int_answer <= max_index
final_answers[str(int_answer)] = ""
text = " ".join(final_answers.keys())
valid = True
except Exception:
error_msg = get_message(MSG_INVALID_CHOICE, v)
# Validate int
elif event.datatype == "int":
try:
int(text)
valid = True
except ValueError:
error_msg = get_message(MSG_INVALID_INT, v)
# Validate float
elif event.datatype == "float":
try:
float(text)
valid = True
except ValueError:
error_msg = get_message(MSG_INVALID_FLOAT, v)
# Validate longint
elif event.datatype == "longint":
try:
long(text)
valid = True
except ValueError:
error_msg = get_message(MSG_INVALID_LONG, v)
# Validate date (Format: YYYYMMDD)
elif event.datatype == "date":
try:
assert len(text) == 8
int(text)
text = "%s-%s-%s" % (text[0:4], text[4:6], text[6:])
parse(text)
valid = True
except Exception:
error_msg = get_message(MSG_INVALID_DATE, v)
# Validate time (Format: HHMM, 24-hour)
elif event.datatype == "time":
try:
assert len(text) == 4
hour = int(text[0:2])
minute = int(text[2:])
assert hour >= 0 and hour <= 23
assert minute >= 0 and minute <= 59
text = "%s:%s" % (hour, str(minute).zfill(2))
valid = True
except Exception:
error_msg = get_message(MSG_INVALID_TIME, v)
# Other question types pass
else:
valid = True
return (valid, text, error_msg)
def format_choices(choices_list):
choices = {}
for idx, choice in enumerate(choices_list):
choices[choice.strip().upper()] = idx + 1
return choices
def has_invalid_response(responses):
for r in responses:
if r.status == "validation-error":
return True
return False
def mark_as_invalid_response(msg):
msg.invalid_survey_response = True
msg.save()
|
the-stack_0_27480
|
""" Module for flexure routines
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import inspect
from pkg_resources import resource_filename
import numpy as np
import copy, os
from matplotlib import pyplot as plt
from matplotlib import gridspec
import matplotlib
from astropy import stats
from astropy import units
from astropy.io import ascii
import scipy.signal
import scipy.optimize as opt
from scipy import interpolate
from linetools.spectra import xspectrum1d
from pypeit import msgs
from pypeit import utils
from pypeit.display import display
from pypeit.core import arc
from pypeit.core import qa
from pypeit.core import fitting
from pypeit.datamodel import DataContainer
from pypeit import specobjs
from IPython import embed
def spat_flexure_shift(sciimg, slits, debug=False, maxlag=20):
"""
Calculate a rigid flexure shift in the spatial dimension
between the slitmask and the science image.
It is *important* to use original=True when defining the
slitmask as everything should be relative to the initial slits
Otherwise, the WaveTilts could get out of sync with science images
Args:
sciimg (`numpy.ndarray`_):
slits (:class:`pypeit.slittrace.SlitTraceSet`):
maxlag (:obj:`int`, optional):
Maximum flexure searched for
Returns:
float: The spatial flexure shift relative to the initial slits
"""
# Mask -- Includes short slits and those excluded by the user (e.g. ['rdx']['slitspatnum'])
slitmask = slits.slit_img(initial=True, exclude_flag=slits.bitmask.exclude_for_flexure)
_sciimg = sciimg if slitmask.shape == sciimg.shape \
else arc.resize_mask2arc(slitmask.shape, sciimg)
onslits = slitmask > -1
corr_slits = onslits.astype(float).flatten()
# Compute
mean_sci, med_sci, stddev_sci = stats.sigma_clipped_stats(_sciimg[onslits])
thresh = med_sci + 5.0*stddev_sci
corr_sci = np.fmin(_sciimg.flatten(), thresh)
lags, xcorr = utils.cross_correlate(corr_sci, corr_slits, maxlag)
xcorr_denom = np.sqrt(np.sum(corr_sci*corr_sci)*np.sum(corr_slits*corr_slits))
xcorr_norm = xcorr / xcorr_denom
# TODO -- Generate a QA plot
tampl_true, tampl, pix_max, twid, centerr, ww, arc_cont, nsig \
= arc.detect_lines(xcorr_norm, sigdetect=3.0, fit_frac_fwhm=1.5, fwhm=5.0,
cont_frac_fwhm=1.0, cont_samp=30, nfind=1, debug=debug)
# No peak? -- e.g. data fills the entire detector
if len(tampl) == 0:
msgs.warn('No peak found in spatial flexure. Assuming there is none..')
if debug:
embed(header='68 of flexure')
return 0.
# Find the peak
xcorr_max = np.interp(pix_max, np.arange(lags.shape[0]), xcorr_norm)
lag_max = np.interp(pix_max, np.arange(lags.shape[0]), lags)
msgs.info('Spatial flexure measured: {}'.format(lag_max[0]))
if debug:
plt.figure(figsize=(14, 6))
plt.plot(lags, xcorr_norm, color='black', drawstyle='steps-mid', lw=3, label='x-corr', linewidth=1.0)
plt.plot(lag_max[0], xcorr_max[0], 'g+', markersize=6.0, label='peak')
plt.title('Best shift = {:5.3f}'.format(lag_max[0]) + ', corr_max = {:5.3f}'.format(xcorr_max[0]))
plt.legend()
plt.show()
#tslits_shift = trace_slits.shift_slits(tslits_dict, lag_max)
# Now translate the tilts
#slitmask_shift = pixels.tslits2mask(tslits_shift)
#slitmask_shift = slits.slit_img(flexure=lag_max[0])
if debug:
# Now translate the slits in the tslits_dict
all_left_flexure, all_right_flexure, mask = slits.select_edges(flexure=lag_max[0])
gpm = mask == 0
viewer, ch = display.show_image(_sciimg)
#display.show_slits(viewer, ch, left_flexure[:,gpm], right_flexure)[:,gpm]#, slits.id) #, args.det)
embed(header='83 of flexure.py')
return lag_max[0]
def load_sky_spectrum(sky_file):
"""
Load a sky spectrum into an XSpectrum1D object
.. todo::
Try to eliminate the XSpectrum1D dependancy
Args:
sky_file: str
Returns:
sky_spec: XSpectrum1D
spectrum
"""
return xspectrum1d.XSpectrum1D.from_file(sky_file)
def spec_flex_shift(obj_skyspec, arx_skyspec, arx_lines, mxshft=20):
""" Calculate shift between object sky spectrum and archive sky spectrum
Args:
obj_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`):
Spectrum of the sky related to our object
arx_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`):
Archived sky spectrum
arx_lines (tuple): Line information returned by arc.detect_lines for
the Archived sky spectrum
mxshft (float, optional):
Maximum allowed shift from flexure; note there are cases that
have been known to exceed even 30 pixels..
Returns:
dict: Contains flexure info
"""
# TODO None of these routines should have dependencies on XSpectrum1d!
# Determine the brightest emission lines
msgs.warn("If we use Paranal, cut down on wavelength early on")
arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig \
= arx_lines
obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj \
= arc.detect_lines(obj_skyspec.flux.value)
# Keep only 5 brightest amplitude lines (xxx_keep is array of
# indices within arx_w of the 5 brightest)
arx_keep = np.argsort(arx_amp[arx_w])[-5:]
obj_keep = np.argsort(obj_amp[obj_w])[-5:]
# Calculate wavelength (Angstrom per pixel)
arx_disp = np.append(arx_skyspec.wavelength.value[1]-arx_skyspec.wavelength.value[0],
arx_skyspec.wavelength.value[1:]-arx_skyspec.wavelength.value[:-1])
obj_disp = np.append(obj_skyspec.wavelength.value[1]-obj_skyspec.wavelength.value[0],
obj_skyspec.wavelength.value[1:]-obj_skyspec.wavelength.value[:-1])
# Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need
# this? can just use sigmas
arx_idx = (arx_cent+0.5).astype(np.int)[arx_w][arx_keep] # The +0.5 is for rounding
arx_res = arx_skyspec.wavelength.value[arx_idx]/\
(arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep])
obj_idx = (obj_cent+0.5).astype(np.int)[obj_w][obj_keep] # The +0.5 is for rounding
obj_res = obj_skyspec.wavelength.value[obj_idx]/ \
(obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])
if not np.all(np.isfinite(obj_res)):
msgs.warn('Failed to measure the resolution of the object spectrum, likely due to error '
'in the wavelength image.')
return None
msgs.info("Resolution of Archive={0} and Observation={1}".format(np.median(arx_res),
np.median(obj_res)))
# Determine sigma of gaussian for smoothing
arx_sig2 = np.power(arx_disp[arx_idx]*arx_wid[arx_w][arx_keep], 2)
obj_sig2 = np.power(obj_disp[obj_idx]*obj_wid[obj_w][obj_keep], 2)
arx_med_sig2 = np.median(arx_sig2)
obj_med_sig2 = np.median(obj_sig2)
if obj_med_sig2 >= arx_med_sig2:
smooth_sig = np.sqrt(obj_med_sig2-arx_med_sig2) # Ang
smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx])
arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))
else:
msgs.warn("Prefer archival sky spectrum to have higher resolution")
smooth_sig_pix = 0.
msgs.warn("New Sky has higher resolution than Archive. Not smoothing")
#smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2)
#Determine region of wavelength overlap
min_wave = max(np.amin(arx_skyspec.wavelength.value), np.amin(obj_skyspec.wavelength.value))
max_wave = min(np.amax(arx_skyspec.wavelength.value), np.amax(obj_skyspec.wavelength.value))
#Smooth higher resolution spectrum by smooth_sig (flux is conserved!)
# if np.median(obj_res) >= np.median(arx_res):
# msgs.warn("New Sky has higher resolution than Archive. Not smoothing")
#obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig)
# else:
#tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)
# arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))
#arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)
# Define wavelengths of overlapping spectra
keep_idx = np.where((obj_skyspec.wavelength.value>=min_wave) &
(obj_skyspec.wavelength.value<=max_wave))[0]
#keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave]
#Rebin both spectra onto overlapped wavelength range
if len(keep_idx) <= 50:
msgs.warn("Not enough overlap between sky spectra")
return None
# rebin onto object ALWAYS
keep_wave = obj_skyspec.wavelength[keep_idx]
arx_skyspec = arx_skyspec.rebin(keep_wave)
obj_skyspec = obj_skyspec.rebin(keep_wave)
# Trim edges (rebinning is junk there)
arx_skyspec.data['flux'][0,:2] = 0.
arx_skyspec.data['flux'][0,-2:] = 0.
obj_skyspec.data['flux'][0,:2] = 0.
obj_skyspec.data['flux'][0,-2:] = 0.
# Set minimum to 0. For bad rebinning and for pernicious extractions
obj_skyspec.data['flux'][0,:] = np.maximum(obj_skyspec.data['flux'][0,:], 0.)
arx_skyspec.data['flux'][0,:] = np.maximum(arx_skyspec.data['flux'][0,:], 0.)
# Normalize spectra to unit average sky count
norm = np.sum(obj_skyspec.flux.value)/obj_skyspec.npix
norm2 = np.sum(arx_skyspec.flux.value)/arx_skyspec.npix
if norm <= 0:
msgs.warn("Bad normalization of object in flexure algorithm")
msgs.warn("Will try the median")
norm = np.median(obj_skyspec.flux.value)
if norm <= 0:
msgs.warn("Improper sky spectrum for flexure. Is it too faint??")
return None
if norm2 <= 0:
msgs.warn('Bad normalization of archive in flexure. You are probably using wavelengths '
'well beyond the archive.')
return None
obj_skyspec.flux = obj_skyspec.flux / norm
arx_skyspec.flux = arx_skyspec.flux / norm2
# Deal with bad pixels
msgs.work("Need to mask bad pixels")
# Deal with underlying continuum
msgs.work("Consider taking median first [5 pixel]")
everyn = obj_skyspec.npix // 20
pypeitFit_obj, _ = fitting.iterfit(obj_skyspec.wavelength.value, obj_skyspec.flux.value,
nord = 3, kwargs_bspline={'everyn': everyn}, kwargs_reject={'groupbadpix':True,'maxrej':1},
maxiter = 15, upper = 3.0, lower = 3.0)
obj_sky_cont, _ = pypeitFit_obj.value(obj_skyspec.wavelength.value)
obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont
pypeitFit_sky, _ = fitting.iterfit(arx_skyspec.wavelength.value, arx_skyspec.flux.value,
nord = 3, kwargs_bspline={'everyn': everyn}, kwargs_reject={'groupbadpix':True,'maxrej':1},
maxiter = 15, upper = 3.0, lower = 3.0)
arx_sky_cont, _ = pypeitFit_sky.value(arx_skyspec.wavelength.value)
arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont
# Consider sharpness filtering (e.g. LowRedux)
msgs.work("Consider taking median first [5 pixel]")
#Cross correlation of spectra
#corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, "same")
corr = np.correlate(arx_sky_flux, obj_sky_flux, "same")
#Create array around the max of the correlation function for fitting for subpixel max
# Restrict to pixels within maxshift of zero lag
lag0 = corr.size//2
#mxshft = settings.argflag['reduce']['flexure']['maxshift']
max_corr = np.argmax(corr[lag0-mxshft:lag0+mxshft]) + lag0-mxshft
subpix_grid = np.linspace(max_corr-3., max_corr+3., 7)
#Fit a 2-degree polynomial to peak of correlation function. JFH added this if/else to not crash for bad slits
if np.any(np.isfinite(corr[subpix_grid.astype(np.int)])):
fit = fitting.PypeItFit(xval=subpix_grid, yval=corr[subpix_grid.astype(np.int)],
func='polynomial', order=np.atleast_1d(2))
fit.fit()
success = True
max_fit = -0.5 * fit.fitc[1] / fit.fitc[2]
else:
fit = fitting.PypeItFit(xval=subpix_grid, yval=0.0*subpix_grid,
func='polynomial', order=np.atleast_1d(2))
fit.fit()
success = False
max_fit = 0.0
msgs.warn('Flexure compensation failed for one of your objects')
#Calculate and apply shift in wavelength
shift = float(max_fit)-lag0
msgs.info("Flexure correction of {:g} pixels".format(shift))
#model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0]
return dict(polyfit=fit, shift=shift, subpix=subpix_grid,
corr=corr[subpix_grid.astype(np.int)], sky_spec=obj_skyspec, arx_spec=arx_skyspec,
corr_cen=corr.size/2, smooth=smooth_sig_pix, success=success)
def flexure_interp(shift, wave):
"""
Perform interpolation on wave given a shift in pixels
Args:
shift (float):
Shift in pixels
wave (`numpy.ndarray`_):
extracted wave of size nspec
wavein (`numpy.ndarray`_, optional):
Apply the shift to this array of wavelengths
Returns:
`numpy.ndarray`_: Wavelength scale corrected for spectral flexure
"""
npix = wave.size
x = np.linspace(0., 1., npix)
f = interpolate.interp1d(x, wave, bounds_error=False, fill_value="extrapolate")
twave = f(x + shift / (npix - 1))
return twave
def spec_flexure_slit(slits, slitord, slit_bpm, sky_file, method="boxcar", specobjs=None,
slit_specs=None, mxshft=None):
"""Calculate the spectral flexure for every slit (global) or object (local)
Args:
slits (:class:`~pypeit.slittrace.SlitTraceSet`):
Slit trace set
slitord (`numpy.ndarray`_):
Array of slit/order numbers
slit_bpm (`numpy.ndarray`_):
True = masked slit
sky_file (str):
Sky file
method (:obj:`str`, optional):
Two methods are available:
- 'boxcar': Recommended for object extractions. This
method uses the boxcar extracted sky and wavelength
spectra from the input specobjs
- 'slitcen': Recommended when no objects are being
extracted. This method uses a spectrum (stored in
slitspecs) that is extracted from the center of
each slit.
specobjs (:class:`~pypeit.specobjs.Specobjs`, optional):
Spectral extractions
slit_specs (list, optional):
A list of linetools.xspectrum1d, one for each slit. The spectra stored in
this list are sky spectra, extracted from the center of each slit.
mxshft (int, optional):
Passed to flex_shift()
Returns:
:obj:`list`: A list of :obj:`dict` objects containing flexure
results of each slit. This is filled with a basically empty
dict if the slit is skipped.
"""
sv_fdict = None
msgs.work("Consider doing 2 passes in flexure as in LowRedux")
# Determine the method
slit_cen = True if (specobjs is None) or (method == "slitcen") else False
# Load Archive. Save the line information to avoid the performance hit from calling it on the archive sky spectrum
# multiple times
sky_spectrum = load_sky_spectrum(sky_file)
sky_lines = arc.detect_lines(sky_spectrum.flux.value)
nslits = slits.nslits
gpm = np.logical_not(slit_bpm)
gdslits = np.where(gpm)[0]
# Initialise the flexure list for each slit
flex_list = []
# Slit/objects to come back to
return_later_sobjs = []
# Loop over slits, and then over objects
for islit in range(nslits):
msgs.info("Working on spectral flexure of slit: {:d}".format(islit))
# Reset
flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
corr_cen=[], spec_file=sky_file, smooth=[],
arx_spec=[], sky_spec=[], method=[])
# If no objects on this slit append an empty dictionary
if islit not in gdslits:
flex_list.append(flex_dict.copy())
continue
if slit_cen:
sky_wave = slit_specs[islit].wavelength.value
sky_flux = slit_specs[islit].flux.value
# Calculate the shift
fdict = spec_flex_shift(slit_specs[islit], sky_spectrum, sky_lines, mxshft=mxshft)
# Failed?
if fdict is not None:
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
flex_dict[key].append(fdict[key])
# Interpolate
sky_wave_new = flexure_interp(fdict['shift'], sky_wave)
flex_dict['sky_spec'].append(xspectrum1d.XSpectrum1D.from_tuple((sky_wave_new, sky_flux)))
flex_dict['method'].append("slitcen")
else:
i_slitord = slitord[islit]
indx = specobjs.slitorder_indices(i_slitord)
this_specobjs = specobjs[indx]
# Loop through objects
for ss, sobj in enumerate(this_specobjs):
if sobj is None:
continue
if sobj['BOX_WAVE'] is None: #len(specobj._data.keys()) == 1: # Nothing extracted; only the trace exists
continue
msgs.info("Working on flexure for object # {:d}".format(sobj.OBJID) + "in slit # {:d}".format(islit))
# Using boxcar
sky_wave = sobj.BOX_WAVE
sky_flux = sobj.BOX_COUNTS_SKY
# Generate 1D spectrum for object
obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))
# Calculate the shift
fdict = spec_flex_shift(obj_sky, sky_spectrum, sky_lines, mxshft=mxshft)
punt = False
if fdict is None:
msgs.warn("Flexure shift calculation failed for this spectrum.")
if sv_fdict is not None:
msgs.warn("Will used saved estimate from a previous slit/object")
fdict = copy.deepcopy(sv_fdict)
else:
# One does not exist yet
# Save it for later
return_later_sobjs.append([islit, ss])
punt = True
else:
sv_fdict = copy.deepcopy(fdict)
# Punt?
if punt:
break
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec', 'sky_spec']:
flex_dict[key].append(fdict[key])
flex_dict['method'].append("boxcar")
# Check if we need to go back
# TODO :: This code just throws an error... probably need to delete or fix this "local" spectral flexure code
if not slit_cen:
# Do we need to go back?
for items in return_later_sobjs:
if sv_fdict is None:
msgs.info("No flexure corrections could be made")
break
# Setup
msgs.error("This probably needs to be updated")
slit, ss = items
flex_dict = flex_list[slit]
sobj = specobjs[ss]
# Copy me
fdict = copy.deepcopy(sv_fdict)
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec', 'sky_spec']:
flex_dict[key].append(fdict[key])
flex_dict['method'].append("boxcar")
# Append, this will be an empty dictionary if the flexure failed
flex_list.append(flex_dict.copy())
return flex_list
def spec_flexure_corrQA(ax, this_flex_dict, cntr, name):
# Fit
fit = this_flex_dict['polyfit'][cntr]
xval = np.linspace(-10., 10, 100) + this_flex_dict['corr_cen'][cntr] # + flex_dict['shift'][o]
# model = (fit[2]*(xval**2.))+(fit[1]*xval)+fit[0]
model = fit.eval(xval)
# model = utils.func_val(fit, xval, 'polynomial')
mxmod = np.max(model)
ylim_min = np.min(model / mxmod) if np.isfinite(np.min(model / mxmod)) else 0.0
ylim = [ylim_min, 1.3]
ax.plot(xval - this_flex_dict['corr_cen'][cntr], model / mxmod, 'k-')
# Measurements
ax.scatter(this_flex_dict['subpix'][cntr] - this_flex_dict['corr_cen'][cntr],
this_flex_dict['corr'][cntr] / mxmod, marker='o')
# Final shift
ax.plot([this_flex_dict['shift'][cntr]] * 2, ylim, 'g:')
# Label
ax.text(0.5, 0.25, name, transform=ax.transAxes, size='large', ha='center')
ax.text(0.5, 0.15, 'flex_shift = {:g}'.format(this_flex_dict['shift'][cntr]),
transform=ax.transAxes, size='large', ha='center') # , bbox={'facecolor':'white'})
# Axes
ax.set_ylim(ylim)
ax.set_xlabel('Lag')
def spec_flexure_qa(slitords, bpm, basename, det, flex_list, specobjs=None, out_dir=None):
"""
Args:
slitords (`numpy.ndarray`_):
Array of slit/order numbers
bpm (`numpy.ndarray`_):
True = masked slit
basename (str):
det (int):
flex_list (list):
specobjs: (:class:`pypeit.specobjs.Specobjs`)
Spectrally extracted objects
out_dir:
"""
plt.rcdefaults()
plt.rcParams['font.family'] = 'times new roman'
# What type of QA are we doing
slit_cen = False
if specobjs is None: slit_cen = True
# Grab the named of the method
method = inspect.stack()[0][3]
# Mask
gdslits = np.where(np.invert(bpm))[0]
# Loop over slits, and then over objects here
for islit in gdslits:
# Slit/order number
slitord = slitords[islit]
# Parse and Setup
if slit_cen:
nobj = 1
ncol = 1
else:
indx = specobjs.slitorder_indices(slitord)
this_specobjs = specobjs[indx]
nobj = np.sum(indx)
if nobj == 0:
continue
ncol = min(3, nobj)
this_flex_dict = flex_list[islit]
# Check that the default was overwritten
if len(this_flex_dict['shift']) == 0:
continue
nrow = nobj // ncol + ((nobj % ncol) > 0)
# Outfile, one QA file per slit
outfile = qa.set_qa_filename(basename, method + '_corr', det=det, slit=slitord, out_dir=out_dir)
plt.figure(figsize=(8, 5.0))
plt.clf()
gs = gridspec.GridSpec(nrow, ncol)
# TODO -- This cntr is crummy and needs to be replaced by a DataContainer
# for flex_dict and flex_list
cntr = 0
# Correlation QA
if slit_cen:
ax = plt.subplot(gs[0, 0])
spec_flexure_corrQA(ax, this_flex_dict, cntr, 'Slit Center')
else:
for specobj in this_specobjs:
if specobj is None or (specobj.BOX_WAVE is None and specobj.OPT_WAVE is None):
continue
ax = plt.subplot(gs[cntr//ncol, cntr % ncol])
spec_flexure_corrQA(ax, this_flex_dict, cntr, '{:s}'.format(specobj.NAME))
cntr += 1
# Finish
plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)
plt.savefig(outfile, dpi=400)
plt.close()
# Sky line QA (just one object)
if slit_cen:
iobj = 0
else:
iobj = 0
specobj = this_specobjs[iobj]
# Repackage
sky_spec = this_flex_dict['sky_spec'][iobj]
arx_spec = this_flex_dict['arx_spec'][iobj]
min_wave = max(np.amin(arx_spec.wavelength.value), np.amin(sky_spec.wavelength.value))*units.AA
max_wave = min(np.amax(arx_spec.wavelength.value), np.amax(sky_spec.wavelength.value))*units.AA
# Sky lines
sky_lines = np.array([3370.0, 3914.0, 4046.56, 4358.34, 5577.338, 6300.304,
7340.885, 7993.332, 8430.174, 8919.610, 9439.660,
10013.99, 10372.88])*units.AA
dwv = 20.*units.AA
gdsky = np.where((sky_lines > min_wave) & (sky_lines < max_wave))[0]
if len(gdsky) == 0:
msgs.warn("No sky lines for Flexure QA")
continue
if len(gdsky) > 6:
idx = np.array([0, 1, len(gdsky)//2, len(gdsky)//2+1, -2, -1])
gdsky = gdsky[idx]
# Outfile
outfile = qa.set_qa_filename(basename, method+'_sky', det=det, slit=slitord, out_dir=out_dir)
# Figure
plt.figure(figsize=(8, 5.0))
plt.clf()
nrow, ncol = 2, 3
gs = gridspec.GridSpec(nrow, ncol)
if slit_cen:
plt.suptitle('Sky Comparison for Slit Center', y=1.05)
else:
plt.suptitle('Sky Comparison for {:s}'.format(specobj.NAME), y=1.05)
for ii, igdsky in enumerate(gdsky):
skyline = sky_lines[igdsky]
ax = plt.subplot(gs[ii//ncol, ii % ncol])
# Norm
pix1 = np.where(np.abs(sky_spec.wavelength-skyline) < dwv)[0]
pix2 = np.where(np.abs(arx_spec.wavelength-skyline) < dwv)[0]
f1 = np.sum(sky_spec.flux[pix1])
f2 = np.sum(arx_spec.flux[pix2])
norm = f1/f2
# Plot
ax.plot(sky_spec.wavelength[pix1], sky_spec.flux[pix1], 'k-', label='Obj',
drawstyle='steps-mid')
ax.plot(arx_spec.wavelength[pix2], arx_spec.flux[pix2]*norm, 'r-', label='Arx',
drawstyle='steps-mid')
# Axes
ax.xaxis.set_major_locator(plt.MultipleLocator(dwv.value))
ax.set_xlabel('Wavelength')
ax.set_ylabel('Counts')
# Legend
plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='small', numpoints=1)
# Finish
plt.savefig(outfile, dpi=400)
plt.close()
msgs.info("Wrote spectral flexure QA: {}".format(outfile))
#plt.close()
plt.rcdefaults()
def calculate_image_offset(image, im_ref, nfit=3):
"""Calculate the x,y offset between two images
Args:
image (`numpy.ndarray`_):
Image that we want to measure the shift of (relative to im_ref)
im_ref (`numpy.ndarray`_):
Reference image
nfit (int, optional):
Number of pixels (left and right of the maximum) to include in
fitting the peak of the cross correlation.
Returns:
ra_diff (float):
Relative shift (in pixels) of image relative to im_ref (x direction).
In order to align image with im_ref, ra_diff should be added to the
x-coordinates of image
dec_diff (float):
Relative shift (in pixels) of image relative to im_ref (y direction).
In order to align image with im_ref, dec_diff should be added to the
y-coordinates of image
"""
# Subtract median (should be close to zero, anyway)
image -= np.median(image)
im_ref -= np.median(im_ref)
# cross correlate (note, convolving seems faster)
ccorr = scipy.signal.correlate2d(im_ref, image, boundary='fill', mode='same')
#ccorr = scipy.signal.fftconvolve(im_ref, image[::-1, ::-1], mode='same')
# Find the maximum
amax = np.unravel_index(np.argmax(ccorr), ccorr.shape)
# Perform a 2D Gaussian fit
x = np.arange(amax[0]-nfit, amax[0] + nfit+1)
y = np.arange(amax[1]-nfit, amax[1] + nfit+1)
initial_guess = (np.max(ccorr), amax[0], amax[1], 3, 3, 0, 0)
xx, yy = np.meshgrid(x, y, indexing='ij')
# Fit the neighborhood of the maximum to calculate the offset
popt, _ = opt.curve_fit(fitting.twoD_Gaussian, (xx, yy),
ccorr[amax[0]-nfit:amax[0]+nfit+1, amax[1]-nfit:amax[1]+nfit+1].ravel(),
p0=initial_guess)
# Return the RA and DEC shift, in pixels
return popt[1] - ccorr.shape[0]//2, popt[2] - ccorr.shape[1]//2
def sky_em_residuals(wave:np.ndarray, flux:np.ndarray,
ivar:np.ndarray, sky_waves:np.ndarray,
plot=False, noff=5., nfit_min=20):
"""Calculate residuals and other metrics for a set of
input sky emission lines
Args:
wave (np.ndarray): Wavelengths (in air!)
flux (np.ndarray):
ivar (np.ndarray):
sky_waves (np.ndarray): Skyline wavelengths (in air!)
plot (bool, optional): Defaults to False.
noff (int, optional): Range in Ang to analyze labout emission line. Defaults to 5.
nfit_min (int, optional): Minimum number of pixels required to do a fit. Defaults to 20.
Returns:
tuple: np.ndarray's -- sky line wavelength of good lines, wavelength offset,
error in wavelength offset, sky line width,
error in sky line width
"""
dwave = []
diff = []
diff_err = []
los = []
los_err= []
# Loop on known sky lines
for line in sky_waves:
wline = [line-noff,line+noff]
mw = (wave > wline[0]) & (wave < wline[1])
# Reuire minimum number
if np.sum(mw) <= nfit_min:
continue
p=[0,0,0,0]
# Guess
p0 = list(fitting.guess_gauss(wave[mw],flux[mw]))
# Fit
p, pcov = fitting.fit_gauss(wave[mw],flux[mw],
w_out = 1./np.sqrt(ivar[mw]),
guesses=p0, nparam=4)
perr = np.sqrt(np.diag(pcov))
#except:
# p=p0
# p[2] = -99
# perr=p0
# Continue
d = p[2] - line
# For debugging
if plot:
gfit = fitting.gauss_4deg(wave[mw],*p)
plt.figure(figsize=(8,3))
plt.plot(wave[mw],gfit,'g')
plt.plot(wave[mw],flux[mw])
plt.title('{} {:0.2f} diff= {:0.3f}'.format(line,p[3],d))
plt.show()
# Check
if not np.isfinite(perr[2]):
perr[2] = 1000.
# Save
dwave = np.append(dwave,line)
diff = np.append(diff,d)
diff_err = np.append(diff_err,perr[2])
los = np.append(los,p[3])
los_err = np.append(los_err,perr[3])
# Cut on quality
m=(diff_err < 0.1) & (diff_err > 0.0)
# Return
return dwave[m],diff[m],diff_err[m],los[m],los_err[m]
# TODO -- Consider separating the methods from the DataContainer as per calibrations
class MultiSlitFlexure(DataContainer):
# Class to perform Multi-Detector flexure analysis
# Based on codes written by Marla Geha for DEIMOS
# Set the version of this class
version = '1.0.0'
datamodel = {'s1dfile': dict(otype=str, descr='spec1d filename'),
'PYP_SPEC': dict(otype=str, descr='PypeIt spectrograph name'),
'ndet': dict(otype=int, descr='Number of detectors per spectrum'),
'nslits': dict(otype=int, descr='Number of slits'),
'det': dict(otype=np.ndarray, atype=np.integer, descr='Detector number (ndet, nslits)'),
'SN': dict(otype=np.ndarray, atype=np.floating, descr='S/N (ndet, nslits)'),
'slitid': dict(otype=np.ndarray, atype=np.floating, descr='Slit ID (nslits)'),
'mn_wv': dict(otype=np.ndarray, atype=np.floating, descr='Mininum wavelength of the slit [Ang] (nslits)'),
'indiv_fit_slope': dict(otype=np.ndarray, atype=np.floating, descr='Fits to each slit individually (nslits)'),
'indiv_fit_b': dict(otype=np.ndarray, atype=np.floating, descr='Same as above but for b (nslits)'),
'indiv_fit_los': dict(otype=np.ndarray, atype=np.floating, descr='Same as above but for line width (nslits)'),
'fit_slope': dict(otype=np.ndarray, atype=np.floating, descr='Fitted slope (nslits)'),
'fit_b': dict(otype=np.ndarray, atype=np.floating, descr='Fitted b value(nslits)'),
'fit_los': dict(otype=np.ndarray, atype=np.floating, descr='Fitted line width(nslits)'),
'resid_sky': dict(otype=np.ndarray, atype=np.floating, descr='Residuals of flexure model on sky lines (nslits)'),
'objra': dict(otype=np.ndarray, atype=np.floating, descr='Object RA (nslits)'),
'objdec': dict(otype=np.ndarray, atype=np.floating, descr='Object DEC (nslits)'),
'maskdef_id': dict(otype=np.ndarray, atype=np.integer, descr='Mask ID (nslits)'),
'rms_arc': dict(otype=np.ndarray, atype=np.floating, descr='RMS of fit (ndet, nslits)'),
}
def __init__(self, s1dfile=None, PYP_SPEC=None, nslits=None, det=None,
SN=None, slitid=None, mn_wv=None, fit_slope=None, fit_b=None,
fit_los=None, objra=None, objdec=None, maskdef_id=None, rms_arc=None,
resid_sky=None, indiv_fit_slope=None, indiv_fit_b=None,
indiv_fit_los=None):
# Setup the DataContainer
args, _, _, values = inspect.getargvalues(inspect.currentframe())
_d = {k: values[k] for k in args[1:]}
# Init
super(MultiSlitFlexure, self).__init__(d=_d)
# Load up specobjs
self.specobjs = specobjs.SpecObjs.from_fitsfile(self.s1dfile,
chk_version=False)
# Sky lines
sky_file = os.path.join(resource_filename('pypeit', 'data'),
'sky_spec', 'sky_single_mg.dat')
self.sky_table = ascii.read(sky_file)
def _init_internals(self):
# Parameters (FlexurePar)
self.flex_par = None
# spectrograph
self.spectrograph = None
# Specobjs object
self.specobjs = None
# Index to specobjs (tuple of arrays)
self.sobj_idx = None # (ndet, nslits)
# Sky line table
self.sky_table = None
# 2D models
self.pmodel_m = None
self.pmodel_b = None
self.pmodel_l = None
def init(self, spectrograph, par):
""" Initialize this and that about the slits, par, spectrograph
e.g. RA, DEC, S/N
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph` or None):
The `Spectrograph` instance that sets the
instrument used to take the observations. Used to set
:attr:`spectrograph`.
par (:class:`pypeit.par.pypeitpar.FlexurePar`):
The parameters used for the flexure processing
"""
# Internals
self.spectrograph = spectrograph
self.flex_par = par
# Set
self.PYP_SPEC = self.spectrograph.name
self.sobj_idx = self.spectrograph.spec1d_match_spectra(self.specobjs)
#
self.nslits = len(self.sobj_idx[0])
self.ndet = len(self.sobj_idx)
# Fill in 1D
self['slitid'] = self.specobjs[self.sobj_idx[0]]['SLITID'].astype(float)
self['objra'] = self.specobjs[self.sobj_idx[0]]['RA']
self['objdec'] = self.specobjs[self.sobj_idx[0]]['DEC']
#self['slitname'] = self.specobjs[self.sobj_idx[0]]['MASKDEF_OBJNAME']
self['maskdef_id'] = self.specobjs[self.sobj_idx[0]]['MASKDEF_ID']
# Fill in 2D
#for new_key, key, dtype in zip(['objname', 'det'],
# ['NAME', 'DET'],
# [str, int]):
for new_key, key, dtype in zip(['det'],
['DET'],
[int]):
# Init
if self.datamodel[new_key]['atype'] == np.str:
slist = []
for det in range(self.ndet):
slist.append(self.specobjs[self.sobj_idx[det]][key])
self[new_key] = np.array(slist)
else:
self[new_key] = np.zeros((self.ndet, self.nslits), dtype=dtype)
for det in range(self.ndet):
self[new_key][det] = self.specobjs[self.sobj_idx[det]][key]
# S/N and mn_wv from the spectra
self['SN'] = np.zeros((self.ndet, self.nslits), dtype=float)
self['mn_wv'] = np.zeros((self.ndet, self.nslits), dtype=float)
for det in range(self.ndet):
self['SN'][det] = [sobj.med_s2n for sobj in self.specobjs[self.sobj_idx[det]]]
self['mn_wv'][det] = [sobj.mnx_wave[0] for sobj in self.specobjs[self.sobj_idx[det]]]
# Return
return
def fit_mask_surfaces(self):
"""Fit 2D model to linear flexure models
from each slit as a function of RA, DEC
"""
# Cut on S/N
good_SN = self['SN'] > self.flex_par['multi_min_SN']
good_slit = np.sum(good_SN, axis=0) == self.ndet
# Basic stats
mu = np.median(self['indiv_fit_slope'][good_slit])
sd = np.std(self['indiv_fit_slope'][good_slit])
mu2 = np.median(self['indiv_fit_b'][good_slit])
sd2 = np.std(self['indiv_fit_b'][good_slit])
# Cut down to +/- 2sigma
mgood=(np.abs(self['indiv_fit_slope']-mu) < 2.*sd) & (
np.abs(self['indiv_fit_b']-mu2) < 2.*sd2) & good_slit
# Fit me (without additional rejection)
# TODO -- Allow for x,y position instead of RA, DEC
self.pmodel_m = fitting.robust_fit(self['objra'][mgood],
self['indiv_fit_slope'][mgood], (2,2),
function='polynomial2d',
x2=self['objdec'][mgood])
self.pmodel_b = fitting.robust_fit(self['objra'][mgood],
self['indiv_fit_b'][mgood], (2,2),
function='polynomial2d',
x2=self['objdec'][mgood])
self.pmodel_l = fitting.robust_fit(self['objra'][mgood],
self['indiv_fit_los'][mgood], (2,2),
function='polynomial2d',
x2=self['objdec'][mgood])
def measure_sky_lines(self):
"""Main method to analyze the sky lines for all the slits
"""
# Init
for key in ['indiv_fit_slope', 'indiv_fit_b', 'indiv_fit_los']:
self[key] = np.zeros(self.nslits)
# Loop on slits
for i in np.arange(0,self.nslits,1):
if (i % 10) == 0:
msgs.info("Working on slit {} of {}".format(i, self.nslits))
if not np.all(self['SN'][:,i] > 1.):
continue
#
# Loop on detectors
sky_lines, sky_diffs, sky_ediffs, sky_loss = [], [], [], []
for det in range(self.ndet):
sobj = self.specobjs[self.sobj_idx[det][i]]
# Measure em
# The following will break if only boxcar...
# TODO -- Allow for boxcar
sky_line, sky_diff, sky_ediff, los, _ = sky_em_residuals(
sobj['OPT_WAVE'],
sobj['OPT_COUNTS_SKY'],
sobj['OPT_COUNTS_IVAR'],
self.sky_table['Wave'])
# Hold em
sky_lines.append(sky_line)
sky_diffs.append(sky_diff)
sky_ediffs.append(sky_ediff)
sky_loss.append(los)
# Concatenate
sky_lines = np.concatenate(sky_lines)
sky_diffs = np.concatenate(sky_diffs)
sky_ediffs = np.concatenate(sky_ediffs)
sky_loss = np.concatenate(sky_loss)
# FIT SINGLE SLIT SKY LINES WITH A LINE
linear_fit = fitting.robust_fit(sky_lines,
sky_diffs,
weights=1./sky_ediffs**2,
function='polynomial',
order=1,
maxrej=1, # Might increase
lower=3., upper=3.)
# Save
self['indiv_fit_b'][i] = linear_fit.fitc[0]
self['indiv_fit_slope'][i] = linear_fit.fitc[1]
self['indiv_fit_los'][i] = np.median(sky_loss)
# Return
return
def update_fit(self):
"""Update fits for each slit based on 2D model
"""
# Do it
self['fit_slope'] = self.pmodel_m.eval(self['objra'],x2=self['objdec'])
self['fit_b'] = self.pmodel_b.eval(self['objra'],x2=self['objdec'])
self['fit_los'] = self.pmodel_l.eval(self['objra'],x2=self['objdec'])
# CALCULATE RESIDUALS FROM FIT
# Only for QA (I think)
resid_sky = []
for i in np.arange(0,self.nslits,1):
# Require sufficient S/N in reddest detector
if self['SN'][-1,i] > 0:
# Load up the full spectrum
#all_wave,all_flux,all_ivar,all_sky = dmost_utils.load_spectrum(f,hdu,vacuum = 1)
tmp_wave, all_flux, all_sky, all_ivar = np.ndarray(0), \
np.ndarray(0), np.ndarray(0), np.ndarray(0)
# TODO -- Allow for Boxcar
for det in range(self.ndet):
sobj = self.specobjs[self.sobj_idx[det][i]]
tmp_wave = np.concatenate((tmp_wave, sobj.OPT_WAVE))
all_flux = np.concatenate((all_flux, sobj.OPT_COUNTS))
all_sky = np.concatenate((all_sky, sobj.OPT_COUNTS_SKY))
all_ivar = np.concatenate((all_ivar, sobj.OPT_COUNTS_IVAR))
# Massage
fitwave = self['fit_slope'][i]*tmp_wave + self['fit_b'][i]
all_wave = tmp_wave - fitwave
# TRIM ENDS
all_wave=all_wave[5:-15]
all_flux=all_flux[5:-15]
all_ivar=all_ivar[5:-15]
all_sky=all_sky[5:-15]
# REMOVE CRAZY 500-SIGMA VALUES
cmask = (all_sky > np.percentile(all_sky,0.1)) & (all_sky < np.percentile(all_sky,99.9))
m=np.median(all_sky[cmask])
s=np.std(all_sky[cmask])
mm = (all_sky > 500.*s + m) | (all_sky < m-50.*s)
all_sky[mm] = m
all_ivar[mm] = 1e6
if (np.sum(mm) > 10):
msgs.warn('Removing more than 10 pixels of data')
_,diff,diff_err,_,_ = sky_em_residuals(
all_wave,all_sky,all_ivar,
self.sky_table['Wave'])
m=np.isfinite(diff)
sky_mean = np.average(np.abs(diff[m]),
weights = 1./diff_err[m]**2)
resid_sky = np.append(resid_sky,sky_mean)
else:
resid_sky = np.append(resid_sky,-1)
self['resid_sky'] = resid_sky
def qa_plots(self, plot_dir:str, root:str):
"""Generate QA plots
Args:
plot_dir (str): Top-lvel folder for QA
QA/ is generated beneath this, as needed
root (str): Root for output files
"""
# Generate QA folder as need be
qa_dir = os.path.join(plot_dir, 'QA')
if not os.path.isdir(qa_dir):
os.mkdir(qa_dir)
'''
# Slopes
pdf2 = matplotlib.backends.backend_pdf.PdfPages(os.path.join(qa_dir, 'flex_slits_'+root+'.pdf'))
plt.rcParams.update({'figure.max_open_warning': 0})
for i in np.arange(0,self.nslits,1):
if not np.all(self['SN'][:,i] > 0.):
continue
# SKY LINES FIRST
r_sky_line, r_sky_diff,r_sky_ediff,r_los,r_elos = sky_em_residuals(hdu[r].data['OPT_WAVE'], \
hdu[r].data['OPT_COUNTS_SKY'],\
hdu[r].data['OPT_COUNTS_IVAR'])
b_sky_line, b_sky_diff,b_sky_ediff,b_los,b_elos = sky_em_residuals(hdu[b].data['OPT_WAVE'], \
hdu[b].data['OPT_COUNTS_SKY'],\
hdu[b].data['OPT_COUNTS_IVAR'])
fig, (ax1,ax2) = plt.subplots(1, 2,figsize=(20,4))
ax1.plot(r_sky_line,r_sky_diff,'ro',alpha=0.8,label='Red chip: Sky Emission')
ax1.plot(b_sky_line,b_sky_diff,'bo',alpha=0.8,label='Blue chip: Sky Emission')
ax1.errorbar(b_sky_line,b_sky_diff,yerr=b_sky_ediff,fmt='none',ecolor='b',alpha=0.5)
ax1.errorbar(r_sky_line,r_sky_diff,yerr=r_sky_ediff,fmt='none',ecolor='r',alpha=0.5)
ax1.text(6320,0,'{}'.format(b),fontsize=11)
ax1.text(8500,0,'{}'.format(r),fontsize=11)
ax1.set_ylim(-0.45,0.45)
x=np.arange(6000,9000,1)
l1 = slits['fit_slope'][i]*x + slits['fit_b'][i]
l2 = fslits['fit_slope'][i]*x + fslits['fit_b'][i]
ax1.plot(x,l1,'-')
ax1.plot(x,l2,'--')
ax1.axhline(linewidth=1, color='grey',alpha=0.5)
ax1.set_ylabel('Wavelength offset (AA)')
ax1.set_xlabel('Wavelength (AA)')
ax1.set_xlim(6300,9100)
t = 'Sky Line Fits , resid = {:0.4f} AA, arc = {:0.2f}'.format(slits['resid_sky'][i],0.32*slits['rms_arc_r'][i])
ax1.set_title(t)
sky_diff = np.concatenate((r_sky_diff,b_sky_diff),axis=None)
sky_lines = np.concatenate((r_sky_line,b_sky_line),axis=None)
sky_ediff = np.concatenate((r_sky_ediff,b_sky_ediff),axis=None)
sky_los = np.concatenate((r_los,b_los),axis=None)
ax2.plot(r_sky_line,r_los,'ro',alpha=0.8,label='Red chip: Sky Emission')
ax2.plot(b_sky_line,b_los,'bo',alpha=0.8,label='Blue chip: Sky Emission')
ax2.errorbar(r_sky_line,r_los,yerr=r_elos,fmt='none',ecolor='r',alpha=0.5)
ax2.errorbar(b_sky_line,b_los,yerr=b_elos,fmt='none',ecolor='b',alpha=0.5)
ax2.axhline(fslits['fit_los'][i],linewidth=1, color='grey',alpha=0.5)
ax2.set_title('Line widths')
ax2.set_xlabel('Wavelength (AA)')
ax2.set_ylim(0.3,0.8)
ax2.set_xlim(6300,9100)
pdf2.savefig()
pdf2.close()
plt.close('all')
'''
#########################################################################
# CREATE FULL MASK FITS
pdf = matplotlib.backends.backend_pdf.PdfPages(
plot_dir+'QA/flex_mask_'+root+'.pdf')
xslit = self['objra']
yslit = self['objdec']
t=2.
mu = np.median(self['indiv_fit_slope'])
sd = np.std(self['indiv_fit_slope'])
mu2 = np.median(self['indiv_fit_b'])
sd2 = np.std(self['indiv_fit_b'])
mu3 = np.median(self['indiv_fit_los'])
sd3 = np.std(self['indiv_fit_los'])
# PLOT FITTED VALUES
fig, (ax1,ax2,ax3) = plt.subplots(1, 3,figsize=(22,5))
mm1=-0.00005
mm2=0.00005
print(mu-t*sd,mu+t*sd)
ax1.scatter(xslit,yslit,c=self['indiv_fit_slope'],
cmap="cool",vmin = mm1,vmax=mm2 )# mu-t*sd,vmax=mu+t*sd)
ax1.set_ylabel('Dec [deg]')
ax1.set_xlabel('RA [deg]')
ax1.set_title('Wave MEASURE: line slope')
#cax, _ = matplotlib.colorbar.make_axes(ax1)
#normalize = matplotlib.colors.Normalize(vmin = mu-t*sd,vmax=mu+t*sd)
#cbar = matplotlib.colorbar.ColorbarBase(cax, cmap='cool',norm=normalize)
ax2.scatter(xslit,yslit,c=self['indiv_fit_b'],cmap="summer",
vmin = mu2-t*sd2,vmax=mu2+t*sd2)
ax2.set_ylabel('Dec [deg]')
ax2.set_xlabel('RA [deg]')
ax2.set_title('Wave MEASURE: line intercept')
cax, _ = matplotlib.colorbar.make_axes(ax2)
normalize = matplotlib.colors.Normalize(vmin = mu2-t*sd2,vmax=mu2+t*sd2)
#cbar = matplotlib.colorbar.ColorbarBase(cax, cmap='summer',norm=normalize)
ax3.scatter(xslit,yslit,c=self['indiv_fit_los'],cmap="cool",vmin = mu3-t*sd3,vmax=mu3+t*sd3)
ax3.set_ylabel('Dec [deg]')
ax3.set_xlabel('RA [deg]')
ax3.set_title('Wave MEASURE: line width')
cax, _ = matplotlib.colorbar.make_axes(ax3)
normalize = matplotlib.colors.Normalize(vmin = mu3-t*sd3,vmax=mu3+t*sd3)
#cbar = matplotlib.colorbar.ColorbarBase(cax, cmap='cool',norm=normalize)
pdf.savefig()
#######################
# PLOT MEASURED VALUES
fig, (ax1,ax2,ax3) = plt.subplots(1, 3,figsize=(22,5))
ax1.scatter(xslit,yslit,c=self['fit_slope'],
cmap="cool",vmin = mu-t*sd,vmax=mu+t*sd)
ax1.set_ylabel('Dec [deg]')
ax1.set_xlabel('RA [deg]')
ax1.set_title('Wave fit: line slope')
cax, _ = matplotlib.colorbar.make_axes(ax1)
normalize = matplotlib.colors.Normalize(vmin = mu-t*sd,vmax=mu+t*sd)
#cbar = matplotlib.colorbar.ColorbarBase(cax, cmap='cool',norm=normalize)
ax2.scatter(xslit,yslit,c=self['fit_b'],
cmap="summer",vmin = mu2-t*sd2,vmax=mu2+t*sd2)
ax2.set_ylabel('Dec [deg]')
ax2.set_xlabel('RA [deg]')
ax2.set_title('Wave fit: line intercept')
cax, _ = matplotlib.colorbar.make_axes(ax2)
normalize = matplotlib.colors.Normalize(vmin = mu2-t*sd2,vmax=mu2+t*sd2)
#cbar = matplotlib.colorbar.ColorbarBase(cax, cmap='summer',norm=normalize)
ax3.scatter(xslit,yslit,c=self['fit_los'],
cmap="cool",vmin = mu3-t*sd3,vmax=mu3+t*sd3)
ax3.set_ylabel('Dec [deg]')
ax3.set_xlabel('RA [deg]')
ax3.set_title('Wave fit: line width')
cax, _ = matplotlib.colorbar.make_axes(ax3)
normalize = matplotlib.colors.Normalize(vmin = mu3-t*sd3,vmax=mu3+t*sd3)
#cbar = matplotlib.colorbar.ColorbarBase(cax, cmap='cool',norm=normalize)
pdf.close()
|
the-stack_0_27481
|
import re
import pytest
from hypothesis import given
from hypothesis.strategies import from_regex
from miteclock.config import (
ApiKey,
Config,
SettingsLoadError,
load_api_key,
load_config,
to_toml,
)
@pytest.mark.parametrize("ws", ["{}", "{}\n", "\n{}\n"])
@given(raw=from_regex(re.compile(r"^[0-9a-f]{16}$", re.IGNORECASE)))
def test_api_key_valid(raw, ws):
"""Hexadecimal keys of the right length should work.
Surrounding whitespace doesn't matter, upper/lower case also.
"""
assert str(ApiKey(ws.format(raw))) == raw.strip().lower()
@pytest.mark.parametrize("ws", ["{}", "{}\n", "\n{}\n"])
@given(
raw=from_regex(re.compile(r"^[0-9a-f]{,15}$", re.IGNORECASE)) # Too short.
| from_regex(re.compile(r"^[0-9a-f]{17,}$", re.IGNORECASE)) # Too long.
| from_regex(re.compile(r"[^0-9a-f]+", re.IGNORECASE)) # At least one non-hex char.
)
def test_api_key_invalid(raw, ws):
"""Make sure invalid inputs do fail."""
with pytest.raises(ValueError):
ApiKey(ws.format(raw))
def prompt_for_testing(ret_value):
def prompter(prompt_msg):
print(prompt_msg, end="")
return ret_value
return prompter
def test_load_api_key_success(tmp_path, capsys):
"""File with api key is present and contains reasonable data."""
apikey_pth = tmp_path / "apikey"
key_val = "6d12e0bf974df0e9\n"
apikey_pth.write_text(key_val)
assert (
str(load_api_key(apikey_pth, prompt=prompt_for_testing("6d12e0bf974df0e9")))
== "6d12e0bf974df0e9"
)
out, _ = capsys.readouterr()
assert out == ""
assert apikey_pth.read_text() == key_val
def test_load_api_key_file_missing(tmp_path, capsys):
"""Handle missing api key file.
We should prompt user for the key and add the file.
"""
key_val = "6d12e0bf974df0e9"
apikey_path = tmp_path / "apikey"
assert str(load_api_key(apikey_path, prompt=prompt_for_testing(key_val))) == key_val
out, _ = capsys.readouterr()
assert out == "Key not found, please enter it"
assert apikey_path.read_text() == key_val + "\n"
@pytest.mark.parametrize(
"key, errmsg",
[
("abc", "API key must be exactly 16 characters long, this one is 3."),
("6p12e0bf974df0e9", "API key must only consist of hexadecimal characters."),
],
)
def test_load_api_key_invalid(key, errmsg, tmp_path):
"""The key is missing, we prompt for it, user enters something invalid."""
with pytest.raises(SettingsLoadError) as excinfo:
load_api_key(tmp_path / "apikey", prompt_for_testing(key))
assert str(excinfo.value) == errmsg
def test_default_toml_content():
"""Toml content for the default config."""
url = "https://abc.mite.yo.lk"
assert to_toml(Config(url=url)) == (
f'url = "{url}"\n'
'menu_keys = "asdfjkl;"\n\n'
"[shortcuts] # Add your shortcuts inside this section.\n"
)
def test_load_config_does_not_exist(conf_path, capsys):
"""Handle missing config file.
Prompt for mandatory information and use it to instantiate the current config type.
"""
config = load_config(
conf_path,
prompt=prompt_for_testing("https://abc.mite.yo.lk"),
)
assert config == Config(
url="https://abc.mite.yo.lk", menu_keys="asdfjkl;", shortcuts={}
)
out, _ = capsys.readouterr()
assert out == "Please copy/paste your mite URL"
def test_load_config_does_not_exist_invalid_url_input(conf_path, capsys):
"""Config file is missing and user provides invalid mite URL.
We throw an exception, assuming that we will prompt again for it next time.
"""
with pytest.raises(SettingsLoadError) as excinfo:
load_config(
conf_path,
prompt=prompt_for_testing("http://abc.mite.yo.lk"),
)
assert str(excinfo.value) == (
"Detected the following problems with your configuration:\n"
"url: HTTPS is required for security."
)
out, _ = capsys.readouterr()
assert out == "Please copy/paste your mite URL"
@pytest.fixture
def conf_path(tmp_path):
return tmp_path / "config.toml"
def test_duplicate_toml_keys(conf_path):
"""Duplicate keys are caught by toml parser, make sure we report them nicely."""
conf_path.write_text('[shortcuts]\nm="test"\nm="another"')
with pytest.raises(SettingsLoadError) as excinfo:
load_config(conf_path)
assert str(excinfo.value) == (
'There was a problem parsing your configuration file: Key "m" already exists.'
)
def test_load_config_toml_parse_error(conf_path):
conf_path.write_text('a="valid"\nb=invalid\nc="valid"')
with pytest.raises(SettingsLoadError) as excinfo:
load_config(conf_path)
assert str(excinfo.value) == (
"There was a problem parsing your configuration file: "
"Unexpected character: 'i' at line 2 col 2\n"
"b=invalid\n"
" ^"
)
def test_load_valid_config(conf_path):
base_url = "https://abc.mite.yo.lk"
conf_path.write_text(
f'url="{base_url}"\n'
'menu_keys="abc"\n\n'
"[shortcuts]\n"
'a="test"\nb="test2"\nc = ["a", "test3"]'
'\nd = {"pattern"= "QA", "match"="strict"}'
)
config = load_config(conf_path)
assert config == Config(
base_url,
menu_keys="abc",
shortcuts={
"a": "test",
"b": "test2",
"c": ["a", "test3"],
"d": dict(pattern="QA", match="strict"),
},
)
def test_load_valid_legacy_config(conf_path):
base_url = "https://abc.mite.yo.lk"
conf_path.write_text(
'account="abc"\n'
'menu_keys="abc"\n\n'
"[shortcuts]\n"
'a="test"\nb="test2"\nc = ["a", "test3"]'
'\nd = {"pattern"= "QA", "match"="strict"}'
)
config = load_config(conf_path)
assert config == Config(
url=base_url,
menu_keys="abc",
shortcuts={
"a": "test",
"b": "test2",
"c": ["a", "test3"],
"d": dict(pattern="QA", match="strict"),
},
)
def test_legacy_config(conf_path):
"""When config file has legacy structure, we should still return latest version."""
conf_path.write_text(
"\n".join(
[
'account="abc"',
'menu_keys="abc"',
"",
"[shortcuts]",
]
)
)
config = load_config(conf_path)
assert config == Config(
url="https://abc.mite.yo.lk",
menu_keys="abc",
)
@pytest.mark.parametrize(
"url_path", ["/", "/daily", "/daily/#2021/12/26", "/#2021/12/26"]
)
def test_url_remove_path(url_path):
"""If we get any kind of path added to a URL we keep only the base."""
base_url = "https://abc.mite.yo.lk"
assert str(Config(url=base_url + url_path).url) == base_url
def test_non_mite_url():
"""Only mite urls are supported."""
with pytest.raises(ValueError) as excinfo:
Config(url="https://google.com")
assert str(excinfo.value) == "url: Make sure you are using a mite url."
def test_no_menu_keys():
"""At least one menu key should be specified, otherwise we crash early."""
with pytest.raises(ValueError) as excinfo:
Config(url="https://abc.mite.yo.lk", menu_keys="")
assert str(excinfo.value) == "menu_keys: At least one key must be provided."
|
the-stack_0_27482
|
import errno
import logging
import subprocess
from contextlib import contextmanager
import ctypes
import io
import os
import sys
import tempfile
def count_file_lines(filename): # Might be a bit faster with a call to "wc -l"
""" Count the number of lines in a given file """
i = 0
with open(filename) as f:
for i, _ in enumerate(f, 1):
pass
return i
def remove_duplicate_lines(filename):
""" Removes in-place any duplicate line in the file. Will also reorder the lines as a side-effect """
subprocess.call(['sort', '-u', '-o', filename, filename])
def read_file(filename):
""" Read a file, line by line, ignoring end-of-line characters"""
with open(filename) as f:
for line in f:
yield line.rstrip('\n')
def execute(command, **kwargs):
stdout, stderr = kwargs.get("stdout", None), kwargs.get("stderr", None)
if isinstance(stdout, str):
stdout = open(stdout, 'w')
if isinstance(stderr, str):
stderr = open(stderr, 'w')
cwd = kwargs["cwd"] if "cwd" in kwargs else os.getcwd()
msg = 'Executing "{}" on directory "{}"'.format(' '.join(command), cwd)
if stdout:
msg += '. Standard output redirected to "{}"'.format(stdout.name)
if stderr:
msg += '. Standard error redirected to "{}"'.format(stderr.name)
logging.debug(msg)
retcode = subprocess.call(command, cwd=cwd, stdout=stdout, stderr=stderr)
if stdout:
stdout.close()
if stderr:
stderr.close()
if stderr is not None and os.path.getsize(stderr.name) == 0: # Delete error log if empty
os.remove(stderr.name)
return retcode
def silentremove(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
libc = ctypes.CDLL(None)
if sys.platform == "darwin":
stdout_name = '__stdoutp'
else:
stdout_name = 'stdout'
c_stdout = ctypes.c_void_p.in_dll(libc, stdout_name)
@contextmanager
def stdout_redirector(stream):
# From: https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
# The original fd stdout points to. Usually 1 on POSIX systems.
original_stdout_fd = sys.stdout.fileno()
def _redirect_stdout(to_fd):
"""Redirect stdout to the given file descriptor."""
# Flush the C-level buffer stdout
libc.fflush(c_stdout)
# Flush and close sys.stdout - also closes the file descriptor (fd)
sys.stdout.close()
# Make original_stdout_fd point to the same file as to_fd
os.dup2(to_fd, original_stdout_fd)
# Create a new sys.stdout that points to the redirected fd
sys.stdout = io.TextIOWrapper(os.fdopen(original_stdout_fd, 'wb'))
# Save a copy of the original stdout fd in saved_stdout_fd
saved_stdout_fd = os.dup(original_stdout_fd)
tfile = None
try:
# Create a temporary file and redirect stdout to it
tfile = tempfile.TemporaryFile(mode='w+b')
_redirect_stdout(tfile.fileno())
# Yield to caller, then redirect stdout back to the saved fd
yield
_redirect_stdout(saved_stdout_fd)
# Copy contents of temporary file to the given stream
tfile.flush()
tfile.seek(0, io.SEEK_SET)
stream.write(tfile.read())
finally:
if tfile:
tfile.close()
os.close(saved_stdout_fd)
|
the-stack_0_27483
|
# coding: utf-8 -*-
'''
Make me some salt!
'''
# Import python libs
from __future__ import absolute_import
import os
import warnings
from salt.utils.verify import verify_log
# All salt related deprecation warnings should be shown once each!
warnings.filterwarnings(
'once', # Show once
'', # No deprecation message match
DeprecationWarning, # This filter is for DeprecationWarnings
r'^(salt|salt\.(.*))$' # Match module(s) 'salt' and 'salt.<whatever>'
)
# While we are supporting Python2.6, hide nested with-statements warnings
warnings.filterwarnings(
'ignore',
'With-statements now directly support multiple context managers',
DeprecationWarning
)
# Filter the backports package UserWarning about being re-imported
warnings.filterwarnings(
'ignore',
'^Module backports was already imported from (.*), but (.*) is being added to sys.path$',
UserWarning
)
# Import salt libs
# We import log ASAP because we NEED to make sure that any logger instance salt
# instantiates is using salt.log.setup.SaltLoggingClass
import salt.log.setup
# the try block below bypasses an issue at build time so that modules don't
# cause the build to fail
from salt.utils import migrations
from salt.utils import kinds
try:
from salt.utils import parsers, ip_bracket
from salt.utils.verify import check_user, verify_env, verify_socket
except ImportError as exc:
if exc.args[0] != 'No module named _msgpack':
raise
from salt.exceptions import SaltSystemExit, SaltClientError, get_error_message
# Let's instantiate log using salt.log.setup.logging.getLogger() so pylint
# leaves us alone and stops complaining about an un-used import
log = salt.log.setup.logging.getLogger(__name__)
class DaemonsMixin(object): # pylint: disable=no-init
'''
Uses the same functions for all daemons
'''
def verify_hash_type(self):
'''
Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return:
'''
if self.config['hash_type'].lower() in ['md5', 'sha1']:
log.warning('IMPORTANT: Do not use {h_type} hashing algorithm! Please set "hash_type" to '
'sha256 in Salt {d_name} config!'.format(
h_type=self.config['hash_type'], d_name=self.__class__.__name__))
def action_log_info(self, action):
'''
Say daemon starting.
:param action
:return:
'''
log.info('{action} the Salt {d_name}'.format(d_name=self.__class__.__name__, action=action))
def start_log_info(self):
'''
Say daemon starting.
:return:
'''
log.info('The Salt {d_name} is starting up'.format(d_name=self.__class__.__name__))
def shutdown_log_info(self):
'''
Say daemon shutting down.
:return:
'''
log.info('The Salt {d_name} is shut down'.format(d_name=self.__class__.__name__))
def environment_failure(self, error):
'''
Log environment failure for the daemon and exit with the error code.
:param error:
:return:
'''
log.exception('Failed to create environment for {d_name}: {reason}'.format(
d_name=self.__class__.__name__, reason=get_error_message(error)))
self.shutdown(error)
class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-init
'''
Creates a master server
'''
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate signal to the process manager processes
self.master.process_manager.stop_restarting()
self.master.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.master.process_manager.kill_children()
super(Master, self)._handle_signals(signum, sigframe)
def prepare(self):
'''
Run the preparation sequence required to start a salt master server.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(Master, self).prepare()
try:
if self.config['verify_env']:
v_dirs = [
self.config['pki_dir'],
os.path.join(self.config['pki_dir'], 'minions'),
os.path.join(self.config['pki_dir'], 'minions_pre'),
os.path.join(self.config['pki_dir'], 'minions_denied'),
os.path.join(self.config['pki_dir'],
'minions_autosign'),
os.path.join(self.config['pki_dir'],
'minions_rejected'),
self.config['cachedir'],
os.path.join(self.config['cachedir'], 'jobs'),
os.path.join(self.config['cachedir'], 'proc'),
self.config['sock_dir'],
self.config['token_dir'],
self.config['syndic_dir'],
self.config['sqlite_queue_dir'],
]
if self.config.get('transport') == 'raet':
v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'pending'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected'))
v_dirs.append(os.path.join(self.config['cachedir'], 'raet'))
verify_env(
v_dirs,
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
)
# Clear out syndics from cachedir
for syndic_file in os.listdir(self.config['syndic_dir']):
os.remove(os.path.join(self.config['syndic_dir'], syndic_file))
except OSError as error:
self.environment_failure(error)
self.setup_logfile_logger()
verify_log(self.config)
self.action_log_info('Setting up')
# TODO: AIO core is separate from transport
if self.config['transport'].lower() in ('zeromq', 'tcp'):
if not verify_socket(self.config['interface'],
self.config['publish_port'],
self.config['ret_port']):
self.shutdown(4, 'The ports are not available to bind')
self.config['interface'] = ip_bracket(self.config['interface'])
migrations.migrate_paths(self.config)
# Late import so logging works correctly
import salt.master
self.master = salt.master.Master(self.config)
else:
# Add a udp port check here
import salt.daemons.flo
self.master = salt.daemons.flo.IofloMaster(self.config)
self.daemonize_if_required()
self.set_pidfile()
salt.utils.process.notify_systemd()
def start(self):
'''
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(Master, self).start()
if check_user(self.config['user']):
self.action_log_info('Starting up')
self.verify_hash_type()
self.master.start()
def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
'''
self.shutdown_log_info()
msg = 'The salt master is shutdown. '
if exitmsg is not None:
exitmsg = msg + exitmsg
else:
exitmsg = msg.strip()
super(Master, self).shutdown(exitcode, exitmsg)
class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-init
'''
Create a minion server
'''
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate signal to the process manager processes
self.minion.stop(signum)
super(Minion, self)._handle_signals(signum, sigframe)
# pylint: disable=no-member
def prepare(self):
'''
Run the preparation sequence required to start a salt minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(Minion, self).prepare()
try:
if self.config['verify_env']:
confd = self.config.get('default_include')
if confd:
# If 'default_include' is specified in config, then use it
if '*' in confd:
# Value is of the form "minion.d/*.conf"
confd = os.path.dirname(confd)
if not os.path.isabs(confd):
# If configured 'default_include' is not an absolute
# path, consider it relative to folder of 'conf_file'
# (/etc/salt by default)
confd = os.path.join(
os.path.dirname(self.config['conf_file']), confd
)
else:
confd = os.path.join(
os.path.dirname(self.config['conf_file']), 'minion.d'
)
v_dirs = [
self.config['pki_dir'],
self.config['cachedir'],
self.config['sock_dir'],
self.config['extension_modules'],
confd,
]
if self.config.get('transport') == 'raet':
v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'pending'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected'))
v_dirs.append(os.path.join(self.config['cachedir'], 'raet'))
verify_env(
v_dirs,
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)
self.setup_logfile_logger()
verify_log(self.config)
log.info(
'Setting up the Salt Minion "{0}"'.format(
self.config['id']
)
)
migrations.migrate_paths(self.config)
# Bail out if we find a process running and it matches out pidfile
if self.check_running():
self.action_log_info('An instance is already running. Exiting')
self.shutdown(1)
transport = self.config.get('transport').lower()
# TODO: AIO core is separate from transport
if transport in ('zeromq', 'tcp', 'detect'):
# Late import so logging works correctly
import salt.minion
# If the minion key has not been accepted, then Salt enters a loop
# waiting for it, if we daemonize later then the minion could halt
# the boot process waiting for a key to be accepted on the master.
# This is the latest safe place to daemonize
self.daemonize_if_required()
self.set_pidfile()
if self.config.get('master_type') == 'func':
salt.minion.eval_master_func(self.config)
self.minion = salt.minion.MinionManager(self.config)
elif transport == 'raet':
import salt.daemons.flo
self.daemonize_if_required()
self.set_pidfile()
self.minion = salt.daemons.flo.IofloMinion(self.config)
else:
log.error(
'The transport \'{0}\' is not supported. Please use one of the following: '
'tcp, '
'raet, '
'or zeromq.'.format(transport)
)
self.shutdown(1)
def start(self):
'''
Start the actual minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(Minion, self).start()
try:
if check_user(self.config['user']):
self.action_log_info('Starting up')
self.verify_hash_type()
self.minion.tune_in()
if self.minion.restart:
raise SaltClientError('Minion could not connect to Master')
except (KeyboardInterrupt, SaltSystemExit) as error:
self.action_log_info('Stopping')
if isinstance(error, KeyboardInterrupt):
log.warning('Exiting on Ctrl-c')
self.shutdown()
else:
log.error(str(error))
self.shutdown(error.code)
def call(self, cleanup_protecteds):
'''
Start the actual minion as a caller minion.
cleanup_protecteds is list of yard host addresses that should not be
cleaned up this is to fix race condition when salt-caller minion starts up
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
try:
self.prepare()
if check_user(self.config['user']):
self.minion.opts['__role'] = kinds.APPL_KIND_NAMES[kinds.applKinds.caller]
self.minion.opts['raet_cleanup_protecteds'] = cleanup_protecteds
self.minion.call_in()
except (KeyboardInterrupt, SaltSystemExit) as exc:
self.action_log_info('Stopping')
if isinstance(exc, KeyboardInterrupt):
log.warning('Exiting on Ctrl-c')
self.shutdown()
else:
log.error(str(exc))
self.shutdown(exc.code)
def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg
'''
self.action_log_info('Shutting down')
if hasattr(self, 'minion'):
self.minion.destroy()
super(Minion, self).shutdown(
exitcode, ('The Salt {0} is shutdown. {1}'.format(
self.__class__.__name__, (exitmsg or '')).strip()))
# pylint: enable=no-member
class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: disable=no-init
'''
Create a proxy minion server
'''
# pylint: disable=no-member
def prepare(self):
'''
Run the preparation sequence required to start a salt minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(ProxyMinion, self).prepare()
if not self.values.proxyid:
self.error('salt-proxy requires --proxyid')
# Proxies get their ID from the command line. This may need to change in
# the future.
# We used to set this here. Now it is set in ProxyMinionOptionParser
# by passing it via setup_config to config.minion_config
# self.config['id'] = self.values.proxyid
try:
if self.config['verify_env']:
confd = self.config.get('default_include')
if confd:
# If 'default_include' is specified in config, then use it
if '*' in confd:
# Value is of the form "minion.d/*.conf"
confd = os.path.dirname(confd)
if not os.path.isabs(confd):
# If configured 'default_include' is not an absolute
# path, consider it relative to folder of 'conf_file'
# (/etc/salt by default)
confd = os.path.join(
os.path.dirname(self.config['conf_file']), confd
)
else:
confd = os.path.join(
os.path.dirname(self.config['conf_file']), 'proxy.d'
)
v_dirs = [
self.config['pki_dir'],
self.config['cachedir'],
self.config['sock_dir'],
self.config['extension_modules'],
confd,
]
if self.config.get('transport') == 'raet':
v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'pending'))
v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected'))
v_dirs.append(os.path.join(self.config['cachedir'], 'raet'))
verify_env(
v_dirs,
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)
self.setup_logfile_logger()
verify_log(self.config)
self.action_log_info('Setting up "{0}"'.format(self.config['id']))
migrations.migrate_paths(self.config)
# TODO: AIO core is separate from transport
if self.config['transport'].lower() in ('zeromq', 'tcp'):
# Late import so logging works correctly
import salt.minion
# If the minion key has not been accepted, then Salt enters a loop
# waiting for it, if we daemonize later then the minion could halt
# the boot process waiting for a key to be accepted on the master.
# This is the latest safe place to daemonize
self.daemonize_if_required()
self.set_pidfile()
# TODO Proxy minions don't currently support failover
self.minion = salt.minion.ProxyMinion(self.config)
else:
# For proxy minions, this doesn't work yet.
import salt.daemons.flo
self.daemonize_if_required()
self.set_pidfile()
self.minion = salt.daemons.flo.IofloMinion(self.config)
def start(self):
'''
Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(ProxyMinion, self).start()
try:
if check_user(self.config['user']):
log.info('The proxy minion is starting up')
self.verify_hash_type()
self.action_log_info('Starting up')
self.minion.tune_in()
except (KeyboardInterrupt, SaltSystemExit) as exc:
self.action_log_info('Stopping')
if isinstance(exc, KeyboardInterrupt):
log.warning('Exiting on Ctrl-c')
self.shutdown()
else:
log.error(str(exc))
self.shutdown(exc.code)
def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg
'''
if hasattr(self, 'minion') and 'proxymodule' in self.minion.opts:
proxy_fn = self.minion.opts['proxymodule'].loaded_base_name + '.shutdown'
self.minion.opts['proxymodule'][proxy_fn](self.minion.opts)
self.action_log_info('Shutting down')
super(ProxyMinion, self).shutdown(
exitcode, ('The Salt {0} is shutdown. {1}'.format(
self.__class__.__name__, (exitmsg or '')).strip()))
# pylint: enable=no-member
class Syndic(parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-init
'''
Create a syndic server
'''
def prepare(self):
'''
Run the preparation sequence required to start a salt syndic minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(Syndic, self).prepare()
try:
if self.config['verify_env']:
verify_env(
[
self.config['pki_dir'],
self.config['cachedir'],
self.config['sock_dir'],
self.config['extension_modules'],
],
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)
self.setup_logfile_logger()
verify_log(self.config)
self.action_log_info('Setting up "{0}"'.format(self.config['id']))
# Late import so logging works correctly
import salt.minion
self.daemonize_if_required()
self.syndic = salt.minion.SyndicManager(self.config)
self.set_pidfile()
def start(self):
'''
Start the actual syndic.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(Syndic, self).start()
if check_user(self.config['user']):
self.action_log_info('Starting up')
self.verify_hash_type()
try:
self.syndic.tune_in()
except KeyboardInterrupt:
self.action_log_info('Stopping')
self.shutdown()
def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
:param exitcode
:param exitmsg
'''
self.action_log_info('Shutting down')
super(Syndic, self).shutdown(
exitcode, ('The Salt {0} is shutdown. {1}'.format(
self.__class__.__name__, (exitmsg or '')).strip()))
|
the-stack_0_27484
|
# -*- coding: UTF-8 -*-
import time
import json
from multiprocessing import Value
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import dash_html_components as html
import dash_core_components as dcc
from dash import Dash
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
from .IntegrationTests import IntegrationTests
from .utils import wait_for
TIMEOUT = 20
class Tests(IntegrationTests):
def setUp(self):
pass
def wait_for_element_by_css_selector(self, selector, timeout=TIMEOUT):
return WebDriverWait(self.driver, timeout).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector)),
'Could not find element with selector "{}"'.format(selector),
)
def wait_for_text_to_equal(self, selector, assertion_text, timeout=TIMEOUT):
self.wait_for_element_by_css_selector(selector)
WebDriverWait(self.driver, timeout).until(
lambda *args: (
(
str(self.wait_for_element_by_css_selector(selector).text)
== assertion_text
)
or (
str(
self.wait_for_element_by_css_selector(selector).get_attribute(
"value"
)
)
== assertion_text
)
),
"Element '{}' text expects to equal '{}' but it didn't".format(
selector, assertion_text
),
)
def click_undo(self):
undo_selector = "._dash-undo-redo span:first-child div:last-child"
undo = self.wait_for_element_by_css_selector(undo_selector)
self.wait_for_text_to_equal(undo_selector, "undo")
undo.click()
def click_redo(self):
redo_selector = "._dash-undo-redo span:last-child div:last-child"
self.wait_for_text_to_equal(redo_selector, "redo")
redo = self.wait_for_element_by_css_selector(redo_selector)
redo.click()
def check_undo_redo_exist(self, has_undo, has_redo):
selector = "._dash-undo-redo span div:last-child"
els = self.driver.find_elements_by_css_selector(selector)
texts = (["undo"] if has_undo else []) + (["redo"] if has_redo else [])
self.assertEqual(len(els), len(texts))
for el, text in zip(els, texts):
self.assertEqual(el.text, text)
def test_undo_redo(self):
app = Dash(__name__, show_undo_redo=True)
app.layout = html.Div([dcc.Input(id="a"), html.Div(id="b")])
@app.callback(Output("b", "children"), [Input("a", "value")])
def set_b(a):
return a
self.startServer(app)
a = self.wait_for_element_by_css_selector("#a")
a.send_keys("xyz")
self.wait_for_text_to_equal("#b", "xyz")
self.check_undo_redo_exist(True, False)
self.click_undo()
self.wait_for_text_to_equal("#b", "xy")
self.check_undo_redo_exist(True, True)
self.click_undo()
self.wait_for_text_to_equal("#b", "x")
self.check_undo_redo_exist(True, True)
self.click_redo()
self.wait_for_text_to_equal("#b", "xy")
self.check_undo_redo_exist(True, True)
self.percy_snapshot(name="undo-redo")
self.click_undo()
self.click_undo()
self.wait_for_text_to_equal("#b", "")
self.check_undo_redo_exist(False, True)
def test_no_undo_redo(self):
app = Dash(__name__)
app.layout = html.Div([dcc.Input(id="a"), html.Div(id="b")])
@app.callback(Output("b", "children"), [Input("a", "value")])
def set_b(a):
return a
self.startServer(app)
a = self.wait_for_element_by_css_selector("#a")
a.send_keys("xyz")
self.wait_for_text_to_equal("#b", "xyz")
toolbar = self.driver.find_elements_by_css_selector("._dash-undo-redo")
self.assertEqual(len(toolbar), 0)
def test_array_of_falsy_child(self):
app = Dash(__name__)
app.layout = html.Div(id="nully-wrapper", children=[0])
self.startServer(app)
self.wait_for_text_to_equal("#nully-wrapper", "0")
self.assertTrue(self.is_console_clean())
def test_of_falsy_child(self):
app = Dash(__name__)
app.layout = html.Div(id="nully-wrapper", children=0)
self.startServer(app)
self.wait_for_text_to_equal("#nully-wrapper", "0")
self.assertTrue(self.is_console_clean())
def test_event_properties(self):
app = Dash(__name__)
app.layout = html.Div(
[html.Button("Click Me", id="button"), html.Div(id="output")]
)
call_count = Value("i", 0)
@app.callback(Output("output", "children"), [Input("button", "n_clicks")])
def update_output(n_clicks):
if not n_clicks:
raise PreventUpdate
call_count.value += 1
return "Click"
self.startServer(app)
btn = self.driver.find_element_by_id("button")
output = lambda: self.driver.find_element_by_id("output")
self.assertEqual(call_count.value, 0)
self.assertEqual(output().text, "")
btn.click()
wait_for(lambda: output().text == "Click")
self.assertEqual(call_count.value, 1)
def test_chained_dependencies_direct_lineage(self):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="input-1", value="input 1"),
dcc.Input(id="input-2"),
html.Div("test", id="output"),
]
)
input1 = lambda: self.driver.find_element_by_id("input-1")
input2 = lambda: self.driver.find_element_by_id("input-2")
output = lambda: self.driver.find_element_by_id("output")
call_counts = {"output": Value("i", 0), "input-2": Value("i", 0)}
@app.callback(Output("input-2", "value"), [Input("input-1", "value")])
def update_input(input1):
call_counts["input-2"].value += 1
return "<<{}>>".format(input1)
@app.callback(
Output("output", "children"),
[Input("input-1", "value"), Input("input-2", "value")],
)
def update_output(input1, input2):
call_counts["output"].value += 1
return "{} + {}".format(input1, input2)
self.startServer(app)
wait_for(lambda: call_counts["output"].value == 1)
wait_for(lambda: call_counts["input-2"].value == 1)
self.assertEqual(input1().get_attribute("value"), "input 1")
self.assertEqual(input2().get_attribute("value"), "<<input 1>>")
self.assertEqual(output().text, "input 1 + <<input 1>>")
input1().send_keys("x")
wait_for(lambda: call_counts["output"].value == 2)
wait_for(lambda: call_counts["input-2"].value == 2)
self.assertEqual(input1().get_attribute("value"), "input 1x")
self.assertEqual(input2().get_attribute("value"), "<<input 1x>>")
self.assertEqual(output().text, "input 1x + <<input 1x>>")
input2().send_keys("y")
wait_for(lambda: call_counts["output"].value == 3)
wait_for(lambda: call_counts["input-2"].value == 2)
self.assertEqual(input1().get_attribute("value"), "input 1x")
self.assertEqual(input2().get_attribute("value"), "<<input 1x>>y")
self.assertEqual(output().text, "input 1x + <<input 1x>>y")
def test_chained_dependencies_branched_lineage(self):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="grandparent", value="input 1"),
dcc.Input(id="parent-a"),
dcc.Input(id="parent-b"),
html.Div(id="child-a"),
html.Div(id="child-b"),
]
)
parenta = lambda: self.driver.find_element_by_id("parent-a")
parentb = lambda: self.driver.find_element_by_id("parent-b")
childa = lambda: self.driver.find_element_by_id("child-a")
childb = lambda: self.driver.find_element_by_id("child-b")
call_counts = {
"parent-a": Value("i", 0),
"parent-b": Value("i", 0),
"child-a": Value("i", 0),
"child-b": Value("i", 0),
}
@app.callback(Output("parent-a", "value"), [Input("grandparent", "value")])
def update_parenta(value):
call_counts["parent-a"].value += 1
return "a: {}".format(value)
@app.callback(Output("parent-b", "value"), [Input("grandparent", "value")])
def update_parentb(value):
time.sleep(0.5)
call_counts["parent-b"].value += 1
return "b: {}".format(value)
@app.callback(
Output("child-a", "children"),
[Input("parent-a", "value"), Input("parent-b", "value")],
)
def update_childa(parenta_value, parentb_value):
time.sleep(1)
call_counts["child-a"].value += 1
return "{} + {}".format(parenta_value, parentb_value)
@app.callback(
Output("child-b", "children"),
[
Input("parent-a", "value"),
Input("parent-b", "value"),
Input("grandparent", "value"),
],
)
def update_childb(parenta_value, parentb_value, grandparent_value):
call_counts["child-b"].value += 1
return "{} + {} + {}".format(
parenta_value, parentb_value, grandparent_value
)
self.startServer(app)
wait_for(lambda: childa().text == "a: input 1 + b: input 1")
wait_for(lambda: childb().text == "a: input 1 + b: input 1 + input 1")
time.sleep(1) # wait for potential requests of app to settle down
self.assertEqual(parenta().get_attribute("value"), "a: input 1")
self.assertEqual(parentb().get_attribute("value"), "b: input 1")
self.assertEqual(call_counts["parent-a"].value, 1)
self.assertEqual(call_counts["parent-b"].value, 1)
self.assertEqual(call_counts["child-a"].value, 1)
self.assertEqual(call_counts["child-b"].value, 1)
def test_removing_component_while_its_getting_updated(self):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.RadioItems(
id="toc",
options=[{"label": i, "value": i} for i in ["1", "2"]],
value="1",
),
html.Div(id="body"),
]
)
app.config.suppress_callback_exceptions = True
call_counts = {"body": Value("i", 0), "button-output": Value("i", 0)}
@app.callback(Output("body", "children"), [Input("toc", "value")])
def update_body(chapter):
call_counts["body"].value += 1
if chapter == "1":
return [
html.Div("Chapter 1"),
html.Button("clicking this button takes forever", id="button"),
html.Div(id="button-output"),
]
elif chapter == "2":
return "Chapter 2"
else:
raise Exception("chapter is {}".format(chapter))
@app.callback(
Output("button-output", "children"), [Input("button", "n_clicks")]
)
def this_callback_takes_forever(n_clicks):
if not n_clicks:
# initial value is quick, only new value is slow
# also don't let the initial value increment call_counts
return "Initial Value"
time.sleep(5)
call_counts["button-output"].value += 1
return "New value!"
body = lambda: self.driver.find_element_by_id("body")
self.startServer(app)
wait_for(lambda: call_counts["body"].value == 1)
time.sleep(0.5)
self.driver.find_element_by_id("button").click()
# while that callback is resolving, switch the chapter,
# hiding the `button-output` tag
def chapter2_assertions():
wait_for(lambda: body().text == "Chapter 2")
layout = self.driver.execute_script(
"return JSON.parse(JSON.stringify("
"window.store.getState().layout"
"))"
)
dcc_radio = layout["props"]["children"][0]
html_body = layout["props"]["children"][1]
self.assertEqual(dcc_radio["props"]["id"], "toc")
self.assertEqual(dcc_radio["props"]["value"], "2")
self.assertEqual(html_body["props"]["id"], "body")
self.assertEqual(html_body["props"]["children"], "Chapter 2")
(self.driver.find_elements_by_css_selector('input[type="radio"]')[1]).click()
chapter2_assertions()
self.assertEqual(call_counts["button-output"].value, 0)
time.sleep(5)
wait_for(lambda: call_counts["button-output"].value == 1)
time.sleep(2) # liberally wait for the front-end to process request
chapter2_assertions()
self.assertTrue(self.is_console_clean())
def test_rendering_layout_calls_callback_once_per_output(self):
app = Dash(__name__)
call_count = Value("i", 0)
app.config["suppress_callback_exceptions"] = True
app.layout = html.Div(
[
html.Div(
[
dcc.Input(value="Input {}".format(i), id="input-{}".format(i))
for i in range(10)
]
),
html.Div(id="container"),
dcc.RadioItems(),
]
)
@app.callback(
Output("container", "children"),
[Input("input-{}".format(i), "value") for i in range(10)],
)
def dynamic_output(*args):
call_count.value += 1
return json.dumps(args, indent=2)
self.startServer(app)
time.sleep(5)
self.percy_snapshot(name="test_rendering_layout_calls_callback_once_per_output")
self.assertEqual(call_count.value, 1)
def test_rendering_new_content_calls_callback_once_per_output(self):
app = Dash(__name__)
call_count = Value("i", 0)
app.config["suppress_callback_exceptions"] = True
app.layout = html.Div(
[
html.Button(
id="display-content", children="Display Content", n_clicks=0
),
html.Div(id="container"),
dcc.RadioItems(),
]
)
@app.callback(
Output("container", "children"), [Input("display-content", "n_clicks")]
)
def display_output(n_clicks):
if n_clicks == 0:
return ""
return html.Div(
[
html.Div(
[
dcc.Input(
value="Input {}".format(i), id="input-{}".format(i)
)
for i in range(10)
]
),
html.Div(id="dynamic-output"),
]
)
@app.callback(
Output("dynamic-output", "children"),
[Input("input-{}".format(i), "value") for i in range(10)],
)
def dynamic_output(*args):
call_count.value += 1
return json.dumps(args, indent=2)
self.startServer(app)
self.wait_for_element_by_css_selector("#display-content").click()
time.sleep(5)
self.percy_snapshot(
name="test_rendering_new_content_calls_callback_once_per_output"
)
self.assertEqual(call_count.value, 1)
def test_callbacks_called_multiple_times_and_out_of_order_multi_output(self):
app = Dash(__name__)
app.layout = html.Div(
[
html.Button(id="input", n_clicks=0),
html.Div(id="output1"),
html.Div(id="output2"),
]
)
call_count = Value("i", 0)
@app.callback(
[Output("output1", "children"), Output("output2", "children")],
[Input("input", "n_clicks")],
)
def update_output(n_clicks):
call_count.value = call_count.value + 1
if n_clicks == 1:
time.sleep(4)
return n_clicks, n_clicks + 1
self.startServer(app)
button = self.wait_for_element_by_css_selector("#input")
button.click()
button.click()
time.sleep(8)
self.percy_snapshot(
name="test_callbacks_called_multiple_times" "_and_out_of_order_multi_output"
)
self.assertEqual(call_count.value, 3)
self.wait_for_text_to_equal("#output1", "2")
self.wait_for_text_to_equal("#output2", "3")
ready = self.driver.execute_script(
"""
return !window.store.getState().isLoading;
"""
)
assert ready
def test_callbacks_with_shared_grandparent(self):
app = Dash()
app.layout = html.Div(
[
html.Div(id="session-id", children="id"),
dcc.Dropdown(id="dropdown-1"),
dcc.Dropdown(id="dropdown-2"),
]
)
options = [{"value": "a", "label": "a"}]
call_counts = {"dropdown_1": Value("i", 0), "dropdown_2": Value("i", 0)}
@app.callback(
Output("dropdown-1", "options"),
[Input("dropdown-1", "value"), Input("session-id", "children")],
)
def dropdown_1(value, session_id):
call_counts["dropdown_1"].value += 1
return options
@app.callback(
Output("dropdown-2", "options"),
[Input("dropdown-2", "value"), Input("session-id", "children")],
)
def dropdown_2(value, session_id):
call_counts["dropdown_2"].value += 1
return options
self.startServer(app)
self.wait_for_element_by_css_selector("#session-id")
time.sleep(2)
self.assertEqual(call_counts["dropdown_1"].value, 1)
self.assertEqual(call_counts["dropdown_2"].value, 1)
self.assertTrue(self.is_console_clean())
def test_callbacks_triggered_on_generated_output(self):
app = Dash()
app.config["suppress_callback_exceptions"] = True
call_counts = {"tab1": Value("i", 0), "tab2": Value("i", 0)}
app.layout = html.Div(
[
dcc.Dropdown(
id="outer-controls",
options=[{"label": i, "value": i} for i in ["a", "b"]],
value="a",
),
dcc.RadioItems(
options=[
{"label": "Tab 1", "value": 1},
{"label": "Tab 2", "value": 2},
],
value=1,
id="tabs",
),
html.Div(id="tab-output"),
]
)
@app.callback(Output("tab-output", "children"), [Input("tabs", "value")])
def display_content(value):
return html.Div([html.Div(id="tab-{}-output".format(value))])
@app.callback(
Output("tab-1-output", "children"), [Input("outer-controls", "value")]
)
def display_tab1_output(value):
call_counts["tab1"].value += 1
return 'Selected "{}" in tab 1'.format(value)
@app.callback(
Output("tab-2-output", "children"), [Input("outer-controls", "value")]
)
def display_tab2_output(value):
call_counts["tab2"].value += 1
return 'Selected "{}" in tab 2'.format(value)
self.startServer(app)
self.wait_for_element_by_css_selector("#tab-output")
time.sleep(2)
self.assertEqual(call_counts["tab1"].value, 1)
self.assertEqual(call_counts["tab2"].value, 0)
self.wait_for_text_to_equal("#tab-output", 'Selected "a" in tab 1')
self.wait_for_text_to_equal("#tab-1-output", 'Selected "a" in tab 1')
(self.driver.find_elements_by_css_selector('input[type="radio"]')[1]).click()
time.sleep(2)
self.wait_for_text_to_equal("#tab-output", 'Selected "a" in tab 2')
self.wait_for_text_to_equal("#tab-2-output", 'Selected "a" in tab 2')
self.assertEqual(call_counts["tab1"].value, 1)
self.assertEqual(call_counts["tab2"].value, 1)
self.assertTrue(self.is_console_clean())
def test_initialization_with_overlapping_outputs(self):
app = Dash()
app.layout = html.Div(
[
html.Div(id="input-1", children="input-1"),
html.Div(id="input-2", children="input-2"),
html.Div(id="input-3", children="input-3"),
html.Div(id="input-4", children="input-4"),
html.Div(id="input-5", children="input-5"),
html.Div(id="output-1"),
html.Div(id="output-2"),
html.Div(id="output-3"),
html.Div(id="output-4"),
]
)
call_counts = {
"output-1": Value("i", 0),
"output-2": Value("i", 0),
"output-3": Value("i", 0),
"output-4": Value("i", 0),
}
def generate_callback(outputid):
def callback(*args):
call_counts[outputid].value += 1
return "{}, {}".format(*args)
return callback
for i in range(1, 5):
outputid = "output-{}".format(i)
app.callback(
Output(outputid, "children"),
[
Input("input-{}".format(i), "children"),
Input("input-{}".format(i + 1), "children"),
],
)(generate_callback(outputid))
self.startServer(app)
self.wait_for_element_by_css_selector("#output-1")
time.sleep(5)
for i in range(1, 5):
outputid = "output-{}".format(i)
self.assertEqual(call_counts[outputid].value, 1)
self.wait_for_text_to_equal(
"#{}".format(outputid), "input-{}, input-{}".format(i, i + 1)
)
def test_generate_overlapping_outputs(self):
app = Dash()
app.config["suppress_callback_exceptions"] = True
block = html.Div(
[
html.Div(id="input-1", children="input-1"),
html.Div(id="input-2", children="input-2"),
html.Div(id="input-3", children="input-3"),
html.Div(id="input-4", children="input-4"),
html.Div(id="input-5", children="input-5"),
html.Div(id="output-1"),
html.Div(id="output-2"),
html.Div(id="output-3"),
html.Div(id="output-4"),
]
)
app.layout = html.Div([html.Div(id="input"), html.Div(id="container")])
call_counts = {
"container": Value("i", 0),
"output-1": Value("i", 0),
"output-2": Value("i", 0),
"output-3": Value("i", 0),
"output-4": Value("i", 0),
}
@app.callback(Output("container", "children"), [Input("input", "children")])
def display_output(*args):
call_counts["container"].value += 1
return block
def generate_callback(outputid):
def callback(*args):
call_counts[outputid].value += 1
return "{}, {}".format(*args)
return callback
for i in range(1, 5):
outputid = "output-{}".format(i)
app.callback(
Output(outputid, "children"),
[
Input("input-{}".format(i), "children"),
Input("input-{}".format(i + 1), "children"),
],
)(generate_callback(outputid))
self.startServer(app)
wait_for(lambda: call_counts["container"].value == 1)
self.wait_for_element_by_css_selector("#output-1")
time.sleep(5)
for i in range(1, 5):
outputid = "output-{}".format(i)
self.assertEqual(call_counts[outputid].value, 1)
self.wait_for_text_to_equal(
"#{}".format(outputid), "input-{}, input-{}".format(i, i + 1)
)
self.assertEqual(call_counts["container"].value, 1)
def test_multiple_properties_update_at_same_time_on_same_component(self):
call_count = Value("i", 0)
timestamp_1 = Value("d", -5)
timestamp_2 = Value("d", -5)
app = Dash()
app.layout = html.Div(
[
html.Div(id="container"),
html.Button("Click", id="button-1", n_clicks=0, n_clicks_timestamp=-1),
html.Button("Click", id="button-2", n_clicks=0, n_clicks_timestamp=-1),
]
)
@app.callback(
Output("container", "children"),
[
Input("button-1", "n_clicks"),
Input("button-1", "n_clicks_timestamp"),
Input("button-2", "n_clicks"),
Input("button-2", "n_clicks_timestamp"),
],
)
def update_output(*args):
call_count.value += 1
timestamp_1.value = args[1]
timestamp_2.value = args[3]
return "{}, {}".format(args[0], args[2])
self.startServer(app)
self.wait_for_element_by_css_selector("#container")
time.sleep(2)
self.wait_for_text_to_equal("#container", "0, 0")
self.assertEqual(timestamp_1.value, -1)
self.assertEqual(timestamp_2.value, -1)
self.assertEqual(call_count.value, 1)
self.percy_snapshot("button initialization 1")
self.driver.find_element_by_css_selector("#button-1").click()
time.sleep(2)
self.wait_for_text_to_equal("#container", "1, 0")
self.assertTrue(timestamp_1.value > ((time.time() - (24 * 60 * 60)) * 1000))
self.assertEqual(timestamp_2.value, -1)
self.assertEqual(call_count.value, 2)
self.percy_snapshot("button-1 click")
prev_timestamp_1 = timestamp_1.value
self.driver.find_element_by_css_selector("#button-2").click()
time.sleep(2)
self.wait_for_text_to_equal("#container", "1, 1")
self.assertEqual(timestamp_1.value, prev_timestamp_1)
self.assertTrue(timestamp_2.value > ((time.time() - 24 * 60 * 60) * 1000))
self.assertEqual(call_count.value, 3)
self.percy_snapshot("button-2 click")
prev_timestamp_2 = timestamp_2.value
self.driver.find_element_by_css_selector("#button-2").click()
time.sleep(2)
self.wait_for_text_to_equal("#container", "1, 2")
self.assertEqual(timestamp_1.value, prev_timestamp_1)
self.assertTrue(timestamp_2.value > prev_timestamp_2)
self.assertTrue(timestamp_2.value > timestamp_1.value)
self.assertEqual(call_count.value, 4)
self.percy_snapshot("button-2 click again")
def test_request_hooks(self):
app = Dash(__name__)
app.index_string = """<!DOCTYPE html>
<html>
<head>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
</head>
<body>
<div>Testing custom DashRenderer</div>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
<script id="_dash-renderer" type"application/json">
const renderer = new DashRenderer({
request_pre: (payload) => {
var output = document.getElementById('output-pre')
var outputPayload = document.getElementById('output-pre-payload')
if(output) {
output.innerHTML = 'request_pre changed this text!';
}
if(outputPayload) {
outputPayload.innerHTML = JSON.stringify(payload);
}
},
request_post: (payload, response) => {
var output = document.getElementById('output-post')
var outputPayload = document.getElementById('output-post-payload')
var outputResponse = document.getElementById('output-post-response')
if(output) {
output.innerHTML = 'request_post changed this text!';
}
if(outputPayload) {
outputPayload.innerHTML = JSON.stringify(payload);
}
if(outputResponse) {
outputResponse.innerHTML = JSON.stringify(response);
}
}
})
</script>
</footer>
<div>With request hooks</div>
</body>
</html>"""
app.layout = html.Div(
[
dcc.Input(id="input", value="initial value"),
html.Div(
html.Div(
[
html.Div(id="output-1"),
html.Div(id="output-pre"),
html.Div(id="output-pre-payload"),
html.Div(id="output-post"),
html.Div(id="output-post-payload"),
html.Div(id="output-post-response"),
]
)
),
]
)
@app.callback(Output("output-1", "children"), [Input("input", "value")])
def update_output(value):
return value
self.startServer(app)
input1 = self.wait_for_element_by_css_selector("#input")
initialValue = input1.get_attribute("value")
action = ActionChains(self.driver)
action.click(input1)
action = action.send_keys(Keys.BACKSPACE * len(initialValue))
action.send_keys("fire request hooks").perform()
self.wait_for_text_to_equal("#output-1", "fire request hooks")
self.wait_for_text_to_equal("#output-pre", "request_pre changed this text!")
self.wait_for_text_to_equal("#output-post", "request_post changed this text!")
pre_payload = self.wait_for_element_by_css_selector("#output-pre-payload").text
post_payload = self.wait_for_element_by_css_selector(
"#output-post-payload"
).text
post_response = self.wait_for_element_by_css_selector(
"#output-post-response"
).text
self.assertEqual(
json.loads(pre_payload),
{
"output": "output-1.children",
"outputs": {"id": "output-1", "property": "children"},
"changedPropIds": ["input.value"],
"inputs": [
{"id": "input", "property": "value", "value": "fire request hooks"}
],
},
)
self.assertEqual(
json.loads(post_payload),
{
"output": "output-1.children",
"outputs": {"id": "output-1", "property": "children"},
"changedPropIds": ["input.value"],
"inputs": [
{"id": "input", "property": "value", "value": "fire request hooks"}
],
},
)
self.assertEqual(
json.loads(post_response), {"output-1": {"children": "fire request hooks"}}
)
self.percy_snapshot(name="request-hooks render")
def test_graphs_in_tabs_do_not_share_state(self):
app = Dash()
app.config.suppress_callback_exceptions = True
app.layout = html.Div(
[
dcc.Tabs(
id="tabs",
children=[
dcc.Tab(label="Tab 1", value="tab1", id="tab1"),
dcc.Tab(label="Tab 2", value="tab2", id="tab2"),
],
value="tab1",
),
# Tab content
html.Div(id="tab_content"),
]
)
tab1_layout = [
html.Div(
[
dcc.Graph(
id="graph1",
figure={
"data": [{"x": [1, 2, 3], "y": [5, 10, 6], "type": "bar"}]
},
)
]
),
html.Pre(id="graph1_info"),
]
tab2_layout = [
html.Div(
[
dcc.Graph(
id="graph2",
figure={
"data": [{"x": [4, 3, 2], "y": [5, 10, 6], "type": "bar"}]
},
)
]
),
html.Pre(id="graph2_info"),
]
@app.callback(
Output(component_id="graph1_info", component_property="children"),
[Input(component_id="graph1", component_property="clickData")],
)
def display_hover_data(hover_data):
return json.dumps(hover_data)
@app.callback(
Output(component_id="graph2_info", component_property="children"),
[Input(component_id="graph2", component_property="clickData")],
)
def display_hover_data_2(hover_data):
return json.dumps(hover_data)
@app.callback(Output("tab_content", "children"), [Input("tabs", "value")])
def render_content(tab):
if tab == "tab1":
return tab1_layout
elif tab == "tab2":
return tab2_layout
else:
return tab1_layout
self.startServer(app)
self.wait_for_element_by_css_selector("#graph1:not(.dash-graph--pending)")
self.driver.find_elements_by_css_selector("#graph1:not(.dash-graph--pending)")[
0
].click()
graph_1_expected_clickdata = {
"points": [
{
"curveNumber": 0,
"pointNumber": 1,
"pointIndex": 1,
"x": 2,
"y": 10,
"label": 2,
"value": 10,
}
]
}
graph_2_expected_clickdata = {
"points": [
{
"curveNumber": 0,
"pointNumber": 1,
"pointIndex": 1,
"x": 3,
"y": 10,
"label": 3,
"value": 10,
}
]
}
self.wait_for_text_to_equal(
"#graph1_info", json.dumps(graph_1_expected_clickdata)
)
self.driver.find_elements_by_css_selector("#tab2")[0].click()
self.wait_for_element_by_css_selector("#graph2:not(.dash-graph--pending)")
self.driver.find_elements_by_css_selector("#graph2:not(.dash-graph--pending)")[
0
].click()
self.wait_for_text_to_equal(
"#graph2_info", json.dumps(graph_2_expected_clickdata)
)
|
the-stack_0_27487
|
from django.contrib.auth.models import User
from rest_framework import permissions, status
from rest_framework.decorators import (
api_view,
authentication_classes,
permission_classes,
throttle_classes,
)
from rest_framework.response import Response
from rest_framework_expiring_authtoken.authentication import (
ExpiringTokenAuthentication,
)
from rest_framework.throttling import UserRateThrottle
from accounts.permissions import HasVerifiedEmail
from base.utils import team_paginated_queryset
from challenges.models import Challenge
from challenges.serializers import ChallengeSerializer
from challenges.utils import (
get_challenge_model,
is_user_in_allowed_email_domains,
is_user_in_blocked_email_domains,
)
from hosts.utils import is_user_a_host_of_challenge
from .filters import ParticipantTeamsFilter
from .models import Participant, ParticipantTeam
from .serializers import (
InviteParticipantToTeamSerializer,
ParticipantTeamSerializer,
ChallengeParticipantTeam,
ChallengeParticipantTeamList,
ChallengeParticipantTeamListSerializer,
ParticipantTeamDetailSerializer,
)
from .utils import (
get_list_of_challenges_for_participant_team,
get_list_of_challenges_participated_by_a_user,
get_participant_team_of_user_for_a_challenge,
has_user_participated_in_challenge,
is_user_part_of_participant_team,
)
@api_view(["GET", "POST"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def participant_team_list(request):
if request.method == "GET":
participant_teams_id = Participant.objects.filter(
user_id=request.user
).values_list("team_id", flat=True)
participant_teams = ParticipantTeam.objects.filter(
id__in=participant_teams_id
).order_by("-id")
filtered_teams = ParticipantTeamsFilter(
request.GET, queryset=participant_teams
)
paginator, result_page = team_paginated_queryset(
filtered_teams.qs, request
)
serializer = ParticipantTeamDetailSerializer(result_page, many=True)
response_data = serializer.data
return paginator.get_paginated_response(response_data)
elif request.method == "POST":
serializer = ParticipantTeamSerializer(
data=request.data, context={"request": request}
)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
participant_team = serializer.instance
participant = Participant(
user=request.user,
status=Participant.SELF,
team=participant_team,
)
participant.save()
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def get_participant_team_challenge_list(request, participant_team_pk):
"""
Returns a challenge list in which the participant team has participated.
"""
try:
participant_team = ParticipantTeam.objects.get(pk=participant_team_pk)
except ParticipantTeam.DoesNotExist:
response_data = {"error": "Participant Team does not exist"}
return Response(response_data, status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
challenge = Challenge.objects.filter(
participant_teams=participant_team
).order_by("-id")
paginator, result_page = team_paginated_queryset(challenge, request)
serializer = ChallengeSerializer(
result_page, many=True, context={"request": request}
)
response_data = serializer.data
return paginator.get_paginated_response(response_data)
@api_view(["GET", "PUT", "PATCH", "DELETE"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def participant_team_detail(request, pk):
try:
participant_team = ParticipantTeam.objects.get(pk=pk)
except ParticipantTeam.DoesNotExist:
response_data = {"error": "ParticipantTeam does not exist"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
if request.method == "GET":
serializer = ParticipantTeamDetailSerializer(participant_team)
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
elif request.method in ["PUT", "PATCH"]:
if request.method == "PATCH":
serializer = ParticipantTeamSerializer(
participant_team,
data=request.data,
context={"request": request},
partial=True,
)
else:
serializer = ParticipantTeamSerializer(
participant_team,
data=request.data,
context={"request": request},
)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
elif request.method == "DELETE":
participant_team.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(["POST"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def invite_participant_to_team(request, pk):
try:
participant_team = ParticipantTeam.objects.get(pk=pk)
except ParticipantTeam.DoesNotExist:
response_data = {"error": "Participant Team does not exist"}
return Response(response_data, status=status.HTTP_404_NOT_FOUND)
if not is_user_part_of_participant_team(request.user, participant_team):
response_data = {"error": "You are not a member of this team!"}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
email = request.data.get("email")
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
response_data = {
"error": "User does not exist with this email address!"
}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
participant = Participant.objects.filter(team=participant_team, user=user)
if participant.exists():
response_data = {"error": "User is already part of the team!"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
invited_user_participated_challenges = get_list_of_challenges_participated_by_a_user(
user
).values_list(
"id", flat=True
)
team_participated_challenges = get_list_of_challenges_for_participant_team(
[participant_team]
).values_list("id", flat=True)
if set(invited_user_participated_challenges) & set(
team_participated_challenges
):
"""
Check if the user has already participated in
challenges where the inviting participant has participated.
If this is the case, then the user cannot be invited since
he cannot participate in a challenge via two teams.
"""
response_data = {
"error": "Sorry, the invited user has already participated"
" in atleast one of the challenges which you are already a"
" part of. Please try creating a new team and then invite."
}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
if len(team_participated_challenges) > 0:
for challenge_pk in team_participated_challenges:
challenge = get_challenge_model(challenge_pk)
if len(challenge.banned_email_ids) > 0:
# Check if team participants emails are banned
for (
participant_email
) in participant_team.get_all_participants_email():
if participant_email in challenge.banned_email_ids:
message = "You cannot invite as you're a part of {} team and it has been banned "
"from this challenge. Please contact the challenge host."
response_data = {
"error": message.format(participant_team.team_name)
}
return Response(
response_data,
status=status.HTTP_406_NOT_ACCEPTABLE,
)
# Check if invited user is banned
if email in challenge.banned_email_ids:
message = "You cannot invite as the invited user has been banned "
"from this challenge. Please contact the challenge host."
response_data = {"error": message}
return Response(
response_data, status=status.HTTP_406_NOT_ACCEPTABLE
)
# Check if user is in allowed list.
if len(challenge.allowed_email_domains) > 0:
if not is_user_in_allowed_email_domains(email, challenge_pk):
message = "Sorry, users with {} email domain(s) are only allowed to participate in this challenge."
domains = ""
for domain in challenge.allowed_email_domains:
domains = "{}{}{}".format(domains, "/", domain)
domains = domains[1:]
response_data = {"error": message.format(domains)}
return Response(
response_data, status=status.HTTP_406_NOT_ACCEPTABLE
)
# Check if user is in blocked list.
if is_user_in_blocked_email_domains(email, challenge_pk):
message = "Sorry, users with {} email domain(s) are not allowed to participate in this challenge."
domains = ""
for domain in challenge.blocked_email_domains:
domains = "{}{}{}".format(domains, "/", domain)
domains = domains[1:]
response_data = {"error": message.format(domains)}
return Response(
response_data, status=status.HTTP_406_NOT_ACCEPTABLE
)
serializer = InviteParticipantToTeamSerializer(
data=request.data,
context={"participant_team": participant_team, "request": request},
)
if serializer.is_valid():
serializer.save()
response_data = {
"message": "User has been successfully added to the team!"
}
return Response(response_data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["DELETE"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def delete_participant_from_team(request, participant_team_pk, participant_pk):
"""
Deletes a participant from a Participant Team
"""
try:
participant_team = ParticipantTeam.objects.get(pk=participant_team_pk)
except ParticipantTeam.DoesNotExist:
response_data = {"error": "ParticipantTeam does not exist"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
participant = Participant.objects.get(pk=participant_pk)
except Participant.DoesNotExist:
response_data = {"error": "Participant does not exist"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
if participant_team.created_by == request.user:
if (
participant.user == request.user
): # when the user tries to remove himself
response_data = {
"error": "You are not allowed to remove yourself since you are admin. Please delete the team if you want to do so!"
} # noqa: ignore=E501
return Response(
response_data, status=status.HTTP_406_NOT_ACCEPTABLE
)
else:
participant.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
response_data = {
"error": "Sorry, you do not have permissions to remove this participant"
}
return Response(response_data, status=status.HTTP_401_UNAUTHORIZED)
@api_view(["GET"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def get_teams_and_corresponding_challenges_for_a_participant(
request, challenge_pk
):
"""
Returns list of teams and corresponding challenges for a participant
"""
# first get list of all the participants and teams related to the user
participant_objs = Participant.objects.filter(
user=request.user
).prefetch_related("team")
is_challenge_host = is_user_a_host_of_challenge(
user=request.user, challenge_pk=challenge_pk
)
challenge_participated_teams = []
for participant_obj in participant_objs:
participant_team = participant_obj.team
challenges = Challenge.objects.filter(
participant_teams=participant_team
)
if challenges.count():
for challenge in challenges:
challenge_participated_teams.append(
ChallengeParticipantTeam(challenge, participant_team)
)
else:
challenge = None
challenge_participated_teams.append(
ChallengeParticipantTeam(challenge, participant_team)
)
serializer = ChallengeParticipantTeamListSerializer(
ChallengeParticipantTeamList(challenge_participated_teams)
)
response_data = serializer.data
response_data["is_challenge_host"] = is_challenge_host
return Response(response_data, status=status.HTTP_200_OK)
@api_view(["DELETE"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def remove_self_from_participant_team(request, participant_team_pk):
"""
A user can remove himself from the participant team.
"""
try:
participant_team = ParticipantTeam.objects.get(pk=participant_team_pk)
except ParticipantTeam.DoesNotExist:
response_data = {"error": "ParticipantTeam does not exist!"}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
participant = Participant.objects.get(
user=request.user, team=participant_team
)
except Participant.DoesNotExist:
response_data = {"error": "Sorry, you do not belong to this team!"}
return Response(response_data, status=status.HTTP_401_UNAUTHORIZED)
if get_list_of_challenges_for_participant_team(
[participant_team]
).exists():
response_data = {
"error": "Sorry, you cannot delete this team since it has taken part in challenge(s)!"
}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
else:
participant.delete()
participants = Participant.objects.filter(team=participant_team)
if participants.count() == 0:
participant_team.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(["GET"])
@throttle_classes([UserRateThrottle])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def get_participant_team_details_for_challenge(request, challenge_pk):
"""
API to get the participant team detail
Arguments:
request {HttpRequest} -- The request object
challenge_pk {[int]} -- Challenge primary key
Returns:
{dict} -- Participant team detail that has participated in the challenge
"""
challenge = get_challenge_model(challenge_pk)
if has_user_participated_in_challenge(request.user, challenge_pk):
participant_team = get_participant_team_of_user_for_a_challenge(
request.user, challenge_pk
)
serializer = ParticipantTeamSerializer(participant_team)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
response_data = {
"error": f"The user {request.user.username} has not participanted in {challenge.title}"
}
return Response(response_data, status=status.HTTP_404_NOT_FOUND)
|
the-stack_0_27491
|
import cv2,h5py
import json
import random
import numpy as np
import os
from tqdm import tqdm
from skimage.transform import resize
from skimage.filters import gaussian
import matplotlib as mpl
mpl.use('TkAgg') # or whatever other backend that you want
import matplotlib.pyplot as plt
import utilities.config as config
import utilities.utils as utils
class Visual:
""" Visualize the results
"""
def __init__(self, results_path, finetune_results_path, img_path, dest_dir='visuals/%s' % (config.version),
show_words=True, skip_results=False, preprocess_results=True):
if skip_results:
answers_json = utils.get_file(split="val", answer=True)
self.results = {idx: [item['question_id'], item['multiple_choice_answer']] for idx, item in
enumerate(answers_json)}
else:
self.results = []
if not preprocess_results:
with open(os.path.join(results_path, 'cross_results.json'), 'r') as f:
self.results = json.load(f)
with open(os.path.join(results_path, 'qid_to_iq.json'), 'r') as f:
self.q_iq = json.load(f)
else:
questions_json = utils.get_file(split="val", question=True)
answers_json = utils.get_file(split="val", answer=True)
self.q_iq = {ques['question_id']: [ques['image_id'], ques['question'], ans['multiple_choice_answer'],
ans['question_type'], ans['answer_type']] for ques, ans in
zip(questions_json, answers_json)}
with open(os.path.join(results_path, 'qid_to_iq.json'), 'w') as f:
json.dump(self.q_iq, f)
with open(os.path.join(results_path, 'eval_results.json'), 'r') as f:
false_results = json.load(f)
with open(os.path.join(finetune_results_path, 'eval_results.json'), 'r') as f:
true_results = json.load(f)
for f_idx, f_ques in enumerate(false_results):
for t_idx, t_ques in enumerate(true_results):
if f_ques['question_id'] == t_ques['question_id']:
self.results.append({'question_id': t_ques['question_id'], 'true_answer': t_ques['answer'],
'false_answer': f_ques['answer'], 'true_idx': t_idx, 'false_idx': f_idx})
print(len(self.results))
with open(os.path.join(results_path, 'cross_results.json'), 'w') as f:
json.dump(self.results, f)
self.train_img_ids = utils.load_imageid(img_path + "train2014")
self.val_img_ids = utils.load_imageid(img_path + "val2014")
with h5py.File(os.path.join(finetune_results_path, 'att_weights.h5'), 'r') as f:
self.true_weight = f['weights'][:]
self.true_spatials = f['spatials'][:]
self.hints = f['hints'][:]
with h5py.File(os.path.join(results_path, 'att_weights.h5'), 'r') as f:
self.false_weight = f['weights'][:]
self.false_spatials = f['spatials'][:]
self.train_image_fmt = img_path + "train2014/COCO_train2014_%s.jpg"
self.val_image_fmt = img_path + "val2014/COCO_val2014_%s.jpg"
self.skip_results = skip_results
self.dest_dir = dest_dir
if not os.path.isdir(dest_dir + ''):
os.system('mkdir -p ' + dest_dir)
self.show_words = show_words
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.length = self.__len__()
self.color = {"blue": (255, 0, 0), "yellow":(0, 255, 255), "green":(0, 255, 0), "red":(0, 0, 255)}
def _write_img(self, img, q_id, mode='normal'):
cv2.imwrite('{}/{}_{}.png'.format(self.dest_dir, str(q_id).zfill(12), mode), img * 255.0)
return
def __len__(self):
return len(self.results) - 1
def _att_visual(self, img, att_map, att_spatial, hint_score=None, answer='', type="", top=2):
"""
Visualize the attention map on the image and save the visualization.
"""
def draw_max_weightbox(map, color, shift=0.0):
list_map = list(map)
weight = max(list_map)
idx = list_map.index(weight)
list_map.pop(idx)
spatial = att_spatial[idx]
spatial[:2] -= shift
spatial[2:] += shift
cv2.rectangle(img, (spatial[0], spatial[1]), (spatial[2], spatial[3]), self.color[color], 2)
cv2.putText(img, str(round(weight,2)), (spatial[0], spatial[1]), self.font, 0.5, self.color["yellow"], 2) # true green
# box_width = spatial[2] - spatial[0]
# box_height = spatial[3] - spatial[1]
# print(weight)
# xx, yy = int(spatial[0] // region_w), int(spatial[1] // region_h)
# xw, yh = int(box_width // region_w), int(box_height // region_h)
## blend map box
# demo = np.zeros([32, 32])
# demo[yy: yy + yh, xx: xx + xw] = weight / 2.0
return list_map, weight, idx
img_h, img_w, img_c = img.shape
# print(img_h, img_w)
weights, shifts = [], []
# att_map -= att_map.min()
# if att_map.max() > 0:
# att_map /= att_map.max()
region_h = img_h // 32
region_w = img_w // 32
for i in range(top):
att_map, weight, idx = draw_max_weightbox(att_map, "red")
print("att", weight, idx)
# print("hint", hint_score[idx], idx)
hint_score, weight, idx = draw_max_weightbox(hint_score, "green", shift=20.0)
print("hint", weight, idx)
# weights.append(demo)
# shifts.append(idx)
# print(weight_min, weight_max)
img = downsample_image(img)
# for att_map in weights:
# img = get_blend_map(img, att_map)
if self.show_words:
img_h = img.shape[0]
if type == "True":
cv2.putText(img, answer, (20, img_h - 20), self.font, 0.7, (0, 255, 0), 2) # true green
elif type == "False":
cv2.putText(img, answer, (20, img_h - 20), self.font, 0.7, (0, 0, 255), 2) # false red
return img
def rcnn_cross_attention(self, sample_nums=0):
mod = 1000 if config.version == 'v2' else 10
if not sample_nums:
sample_nums = self.length
# samples = tqdm(random.sample(range(0, self.length), sample_nums), ncols=0)
samples = tqdm(range(1000, sample_nums), ncols=0)
for idx in samples:
q_id = self.results[idx]['question_id']
if q_id != 248744008:
continue
true_answer = self.results[idx]['true_answer']
false_answer = self.results[idx]['false_answer']
true_idx = self.results[idx]['true_idx']
false_idx = self.results[idx]['false_idx']
img_id, question, gta, qty, aty = self.q_iq[str(q_id)]
if aty == "yes/no":
continue
if aty == "number":
continue
if img_id in self.train_img_ids:
img_path = self.train_image_fmt % (str(img_id).zfill(12))
elif img_id in self.val_img_ids:
img_path = self.val_image_fmt % (str(img_id).zfill(12))
else:
print(img_id, 'error')
continue
print(img_path)
assert ((self.true_spatials[true_idx] == self.false_spatials[false_idx]).all())
# if qty != 'what color':
# continue
img = cv2.imread(img_path)
if self.show_words:
cv2.putText(img, question, (20, 20), self.font, 0.7, self.color['blue'], 2)
cv2.putText(img, gta, (20, 40), self.font, 0.7, self.color['green'], 2)
else:
print(question, gta, false_answer, true_answer)
bb = self.true_spatials[true_idx]
hint = self.hints[idx]
amap = self.true_weight[idx][:, 0]
self._write_img(self._att_visual(img.copy(), amap, bb, hint_score=hint*amap, answer=true_answer, type="True"), q_id, mode='True_Attention')
# amap = self.false_weight[idx][:, 0]
# self._write_img(self._att_visual(img.copy(), amap, bb, hint_score=hint*amap, answer=false_answer, type="False"), q_id,
# mode='False_Attention')
# amap = self.hints[idx]
# self._write_img(self._att_visual(img.copy(), amap, bb, true_answer, type="Hint"), q_id, mode='Hint')
self._write_img(downsample_image(img), q_id, mode='normal')
return
def rcnn_attention(self, sample_nums=0):
if not sample_nums:
sample_nums = self.length
samples = tqdm(random.sample(range(0, self.length), sample_nums), ncols=0)
for idx in samples:
q_id, answer = self.results[idx]
img_ids, questions, gta, qty = self.q_iq[q_id]
if qty != 'what color':
continue
img_path = self.img_path_fmt % (str(img_ids).zfill(12))
img = cv2.imread(img_path)
if self.show_words:
cv2.putText(img, questions, (20, 20), self.font, 0.7, (255, 0, 0), 2)
cv2.putText(img, gta, (20, 40), self.font, 0.7, (0, 255, 0), 2)
# bb = np.transpose(self.spatials[idx])
# amap = self.weights[idx]
# self._write_img(self._att_visual(img.copy(), amap, bb, answer, str(kd)), q_id, mode='att')
self._write_img(downsample_image(img), q_id, mode='normal')
# print('question:{}, predict_answer: {}, fact: {}'.format(questions,answer,str(kd)))
return
def downsample_image(img):
img_h, img_w, img_c = img.shape
img = resize(img, (int(448 * img_h / img_w), 448), mode='constant', anti_aliasing=True) # 22x22 regions
return img
def get_blend_map(img, att_map, blur=True, overlap=True):
# att_map -= att_map.min()
att_map = resize(att_map, (img.shape[:2]), order=3, mode='constant', anti_aliasing=True)
if blur:
att_map = gaussian(att_map, 0.02 * max(img.shape[:2]))
cmap = plt.get_cmap('jet')
att_map_v = cmap(att_map)
att_map_v = np.delete(att_map_v, 3, 2)
if overlap:
att_map = 1 * (1 - att_map ** 0.7).reshape(att_map.shape + (1,)) * img + (att_map ** 0.7).reshape(
att_map.shape + (1,)) * att_map_v
return att_map
|
the-stack_0_27493
|
"""This module contains the general information for FaultDomainEp ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class FaultDomainEpConsts():
pass
class FaultDomainEp(ManagedObject):
"""This is FaultDomainEp class."""
consts = FaultDomainEpConsts()
naming_props = set([])
mo_meta = MoMeta("FaultDomainEp", "faultDomainEp", "domainEp", VersionMeta.Version131a, "InputOutput", 0xf, [], ["admin", "fault", "operations"], [u'computeSystem'], [u'faultDomainInst', u'faultSummaryInst'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.status = None
ManagedObject.__init__(self, "FaultDomainEp", parent_mo_or_dn, **kwargs)
|
the-stack_0_27494
|
import unittest
from hwt.code import If
from hwt.hdl.statements.statement import HwtSyntaxError
from hwt.interfaces.std import VectSignal, HandshakeSync, Signal
from hwt.interfaces.utils import addClkRstn, propagateClkRstn
from hwt.serializer.combLoopAnalyzer import CombLoopAnalyzer
from hwt.serializer.combLoopAnalyzer.tarjan import StronglyConnectedComponentSearchTarjan
from hwt.synthesizer.hObjList import HObjList
from hwt.synthesizer.unit import Unit
from hwt.synthesizer.utils import synthesised, to_rtl_str
from hwtLib.handshaked.reg import HandshakedReg
def freeze_set_of_sets(obj):
return frozenset(map(frozenset, obj))
class CntrCombLoop(Unit):
"""
A direct combinational loop which is detected immediately
"""
def _declr(self):
self.a = Signal()
self.c = VectSignal(8, signed=False)._m()
def _impl(self) -> None:
b = self._sig("b", self.c._dtype, def_val=0)
If(self.a,
b(b + 1)
)
self.c(b)
class HandshakeWire0(Unit):
def _declr(self):
addClkRstn(self)
self.dataIn = HandshakeSync()
self.dataOut = HandshakeSync()._m()
def _impl(self) -> None:
self.dataOut(self.dataIn)
class HandshakeWire1(HandshakeWire0):
"""
HandshakeWire0 with register on rd signal
"""
def _impl(self) -> None:
self.dataOut.vld(self.dataIn.vld)
rd = self._reg("rd", def_val=1)
rd(self.dataOut.rd)
self.dataIn.rd(rd)
class WrongHandshakeCheckExample0(HandshakeWire0):
def _impl(self):
dataIn, dataOut = self.dataIn, self.dataOut
dataIn.rd(dataIn.vld & dataOut.rd)
dataOut.vld(dataIn.vld)
class WrongHandshakeCheckExample1(HandshakeWire0):
def _impl(self):
dataIn, dataOut = self.dataIn, self.dataOut
dataIn.rd(dataIn.vld & dataOut.rd)
dataOut.vld(dataIn.vld & dataOut.rd)
class HandshakeRegLoop(Unit):
def __init__(self, loop_connector_cls):
self.loop_connector_cls = loop_connector_cls
super(HandshakeRegLoop, self).__init__()
def _declr(self):
addClkRstn(self)
self.rd, self.vld = Signal()._m(), Signal()._m()
def _impl(self) -> None:
r = HandshakedReg(HandshakeSync)
# r.DELAY = 1
# r.LATENCY = 2 # to break ready signal chain
self.reg = r
if self.loop_connector_cls == HandshakedReg:
c = self.loop_connector_cls(HandshakeSync)
else:
c = self.loop_connector_cls()
self.con = c
# circle r <-> c
r.dataIn(c.dataOut)
c.dataIn(r.dataOut)
self.rd(r.dataOut.rd)
self.vld(r.dataOut.vld)
propagateClkRstn(self)
class DoubleHandshakeReg(HandshakeWire0):
def _impl(self) -> None:
regs = self.regs = HObjList(HandshakedReg(HandshakeSync) for _ in range(2))
regs[0].dataIn(self.dataIn)
regs[1].dataIn(regs[0].dataOut)
self.dataOut(regs[1].dataOut)
propagateClkRstn(self)
class CombLoopAnalysisTC(unittest.TestCase):
def test_tarjan(self):
g = {1:[2], 2:[1, 5], 3:[4], 4:[3, 5], 5:[6], 6:[7], 7:[8], 8:[6, 9], 9:[]}
scc_search = StronglyConnectedComponentSearchTarjan(g)
res = scc_search.search_strongly_connected_components()
res = freeze_set_of_sets(res)
self.assertEqual(res, freeze_set_of_sets([[9], [8, 7, 6], [5], [2, 1], [4, 3]]))
def test_CntrCombLoop(self):
u = CntrCombLoop()
with self.assertRaises(HwtSyntaxError):
to_rtl_str(u)
def get_comb_loops(self, u: Unit):
s = CombLoopAnalyzer()
synthesised(u)
s.visit_Unit(u)
return freeze_set_of_sets(
set(str(member.resolve()[1:]) for member in loop)
for loop in s.report()
)
def test_HandshakeWire0(self):
u = HandshakeWire0()
comb_loops = self.get_comb_loops(u)
self.assertEqual(comb_loops, frozenset())
def test_HandshakeWire1(self):
u = HandshakeWire1()
comb_loops = self.get_comb_loops(u)
self.assertEqual(comb_loops, frozenset())
def test_HandshakeRegLoop_HandshakeWire0(self):
u = HandshakeRegLoop(HandshakeWire0)
comb_loops = self.get_comb_loops(u)
self.assertEqual(comb_loops,
freeze_set_of_sets([
[
'sig_con_dataIn_rd',
'reg/dataOut_rd',
'sig_con_dataOut_rd',
'sig_reg_dataOut_rd',
'con/dataOut_rd',
'con/dataIn_rd',
'reg/dataIn_rd',
'sig_reg_dataIn_rd',
],
]))
def test_HandshakeRegLoop_HandshakeWire1(self):
u = HandshakeRegLoop(HandshakeWire1)
comb_loops = self.get_comb_loops(u)
self.assertEqual(comb_loops, frozenset())
def test_shared_component_instance_no_comb_loops(self):
u = DoubleHandshakeReg()
comb_loops = self.get_comb_loops(u)
self.assertEqual(comb_loops, frozenset())
def test_shared_component_instance_with_comb_loops(self):
u = HandshakeRegLoop(HandshakedReg)
comb_loops = self.get_comb_loops(u)
ref = [
[
'sig_con_dataIn_rd',
'reg/dataOut_rd',
'sig_con_dataOut_rd',
'sig_reg_dataOut_rd',
'con/dataOut_rd',
'con/dataIn_rd',
'reg/dataIn_rd',
'sig_reg_dataIn_rd',
],
]
# print([m for m in loop if m not in ref[0]])
# print([m for m in ref[0] if m not in loop])
#for loop in comb_loops:
self.assertSetEqual(comb_loops,
freeze_set_of_sets(ref))
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CombLoopAnalysisTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
# u = HandshakeRegLoop(HandshakeCheckExample)
# u = HandshakeRegLoop(HandshakeWire1)
# u = HandshakeCheckExample()
# print(to_rtl_str(u))
# s = CombLoopAnalyzer()
# synthesised(u)
# s.visit_Unit(u)
# for k, v in s.comb_connection_matrix.items():
# print(to_set_of_names(k), "\t", list(to_set_of_names(_v) for _v in v))
# print("tarjan")
# for scc in s.report():
# print(len(scc), list(to_set_of_names(_v) for _v in scc))
|
the-stack_0_27495
|
"""Process raw data."""
from functools import partial
import logging
import os
from pathlib import Path
import time
from creevey import CustomReportingPipeline
from creevey.load_funcs.image import load_image_from_disk
from creevey.ops.image import resize
from creevey.path_funcs import replace_dir
from creevey.util.image import find_image_files
from creevey.write_funcs.image import write_image
from fastai.vision import verify_images
import pandas as pd
from autofocus.build_dataset.constants import DATA_DIR
from autofocus.build_dataset.lpz_2016_2017.ops import (
record_is_grayscale,
record_mean_brightness,
trim_bottom,
)
MIN_DIM = 512
N_JOBS = 5
NUM_PIXELS_TO_TRIM = 198
THIS_DATASET_DIR = DATA_DIR / "lpz_2016_2017"
RAW_DIR = THIS_DATASET_DIR / "raw" / "data_2016_2017"
RAW_CSV_FILENAMES = ["detections_2016.csv", "detections_2017.csv"]
RAW_CSV_PATHS = [RAW_DIR / fn for fn in RAW_CSV_FILENAMES]
PROCESSED_DIR = THIS_DATASET_DIR / "processed"
PROCESSED_IMAGE_DIR = PROCESSED_DIR / "images"
PROCESSED_LABELS_CSV_OUTPATH = PROCESSED_DIR / "labels.csv"
CORRUPTED_FILES = [
RAW_DIR / "images_2016" / "DPT" / "D03-AMP1" / "._CHIL - D03-AMP1-JU16_00037.JPG"
]
def main() -> None:
"""
Process raw data.
Delete blacklisted corrupted images. Trim a footer from each image
and resize it to 512 pixels on its shorter dimension. Write results
to "autofocus/data/processed/images". Reformat labels from CSV and
write to a new file "autofocus/data/processed/labels.csv".
"""
logging.info("Deleting known corrupted files")
for path in CORRUPTED_FILES:
path.unlink()
logging.info(f"Processing images and writing results to {PROCESSED_IMAGE_DIR}")
run_record = _process_images()
logging.info("Processing labels")
labels = _process_labels(run_record)
logging.info(f"Writing processed labels to {PROCESSED_LABELS_CSV_OUTPATH }")
labels.to_csv(PROCESSED_DIR / "labels.csv", index=False)
def _process_images():
# Bottom 198 pixels are often a footer of camera information. I
# suspect that those pixels are more likely to lead the model to
# learn batch effects that do not generalize than to lead to genuine
# learning, so I remove them.
trim_footer = partial(trim_bottom, num_pixels=NUM_PIXELS_TO_TRIM)
resize_min_dim = partial(resize, min_dim=MIN_DIM)
ops = [trim_footer, resize_min_dim, record_is_grayscale, record_mean_brightness]
trim_resize_pipeline = CustomReportingPipeline(
load_func=load_image_from_disk, ops=ops, write_func=write_image
)
image_paths = find_image_files(RAW_DIR)
path_func = partial(replace_dir, outdir=PROCESSED_IMAGE_DIR)
run_record = trim_resize_pipeline.run(
inpaths=image_paths,
path_func=path_func,
n_jobs=N_JOBS,
skip_existing=False,
exceptions_to_catch=ZeroDivisionError,
)
logging.info("Checking for additional corrupted images")
run_record = _delete_bad_images(run_record)
return run_record
def _delete_bad_images(run_record):
verify_images(PROCESSED_IMAGE_DIR, delete=True)
is_file = run_record.loc[:, "outpath"].apply(os.path.isfile)
run_record = run_record.loc[is_file, :]
return run_record
def _extract_seasons(file_name):
# For parsing the seasons from the File Names
# The season names are based on the codes provided by Lincoln Park Zoo researchers
file_name = file_name.split("-")[3]
if file_name.startswith(("JA", "WI")):
return "Winter"
elif file_name.startswith(("AP", "SP")):
return "Spring"
elif file_name.startswith(("JU", "SU")):
return "Summer"
else:
return "Fall"
def _process_labels(run_record):
raw_df = (
pd.concat([pd.read_csv(path) for path in RAW_CSV_PATHS], sort=False)
.set_index("FileName")
.drop(["Unnamed: 0", "FilePath"], axis="columns")
.rename(columns={"ShortName": "label", "ImageDate": "date"})
)
run_record.index = pd.Series(run_record.index).apply(lambda path: Path(path).name)
processed_df = (
run_record.drop(
["skipped_existing", "exception_handled", "time_finished"], axis="columns"
)
.join(raw_df, how="left")
.loc[:, ["outpath", "label", "grayscale", "mean_brightness", "date"]]
.reset_index(drop=True)
)
processed_df.loc[:, "filename"] = processed_df.loc[:, "outpath"].apply(
lambda path: Path(path).name
)
processed_df.loc[:, "location"] = processed_df.loc[:, "filename"].apply(
lambda fn: fn.split("-")[2]
)
processed_df.loc[:, "season"] = processed_df.loc[:, "filename"].apply(
_extract_seasons
)
processed_df = processed_df.drop("outpath", axis="columns")
return processed_df
if __name__ == "__main__":
start_time = time.time()
logging.basicConfig(format="%(levelname)s %(asctime)s %(message)s")
logging.getLogger().setLevel(logging.INFO)
main()
end_time = time.time()
logging.info(f"Completed in {round(end_time - start_time, 2)} seconds")
|
the-stack_0_27496
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
import views_bnf_codes
import views_spending
import views_org_codes
import views_org_details
import views_org_location
import views_measures
urlpatterns = [
url(r'^spending/$', views_spending.total_spending,
name='total_spending'),
url(r'^bubble/$', views_spending.bubble,
name='bubble'),
url(r'^tariff/$', views_spending.tariff,
name='tariff_api'),
url(r'^spending_by_ccg/$', views_spending.spending_by_ccg,
name='spending_by_ccg'),
url(r'^spending_by_practice/$', views_spending.spending_by_practice,
name='spending_by_practice'),
url(r'^measure/$', views_measures.measure_global,
name='measure'),
url(r'^measure_by_ccg/$', views_measures.measure_by_ccg,
name='measure_by_ccg'),
url(r'^measure_numerators_by_org/$',
views_measures.measure_numerators_by_org,
name='measure_numerators_by_org'),
url(r'^measure_by_practice/$', views_measures.measure_by_practice,
name='measure_by_practice'),
url(r'^price_per_unit/$', views_spending.price_per_unit,
name='price_per_unit_api'),
url(r'^org_details/$', views_org_details.org_details),
url(r'^bnf_code/$', views_bnf_codes.bnf_codes),
url(r'^org_code/$', views_org_codes.org_codes),
url(r'^org_location/$', views_org_location.org_location),
url(r'^docs/', include('rest_framework_swagger.urls')),
]
urlpatterns = format_suffix_patterns(urlpatterns,
allowed=['json', 'csv'])
|
the-stack_0_27499
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest_lib.common.utils import misc as misc_utils
from tempest_lib import exceptions as lib_exc
def wait_for_bm_node_status(client, node_id, attr, status):
"""Waits for a baremetal node attribute to reach given status.
The client should have a show_node(node_uuid) method to get the node.
"""
_, node = client.show_node(node_id)
start = int(time.time())
while node[attr] != status:
time.sleep(client.build_interval)
_, node = client.show_node(node_id)
status_curr = node[attr]
if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
'within the required time (%(timeout)s s).' %
{'node_id': node_id,
'attr': attr,
'status': status,
'timeout': client.build_timeout})
message += ' Current state of %s: %s.' % (attr, status_curr)
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise lib_exc.TimeoutException(message)
|
the-stack_0_27500
|
# add paths of the directories where your photos live.
# it will not look for photos recursively, so you might want to add subdirectories as well.
import os
image_dirs = [
'/home/rammi/Downloads/tuebingen',
]
mapzen_api_key = os.environ['MAPZEN_API_KEY']
mapbox_api_key = os.environ['MAPBOX_API_KEY']
|
the-stack_0_27501
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# Copyright: (c) 2020, Jordan Borean (@jborean93) <[email protected]>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import os.path
import subprocess
import tempfile
from utils import (
argcomplete,
build_bash_script,
build_package_command,
build_package_repo_command,
complete_distribution,
docker_run,
get_version,
load_distribution_config,
OMI_REPO,
select_distribution,
)
def main():
"""Main program body."""
args = parse_args()
distribution = select_distribution(args)
if not distribution:
return
distro_details = load_distribution_config(distribution)
if args.docker and not distro_details['container_image']:
raise ValueError("Cannot run --docker on %s as no container_image has been specified" % distribution)
# On macOS we aren't running as root in a container so this step needs sudo.
sudo_prefix = 'sudo ' if distribution.startswith('macOS') else ''
script_steps = []
if not args.skip_deps:
repo_script = build_package_repo_command(distro_details['package_manager'], distro_details['microsoft_repo'])
dep_script = build_package_command(distro_details['package_manager'], distro_details['test_deps'])
script_steps.append(('Setting up the Microsoft package manager repo', repo_script))
if distribution == 'debian9':
debian_ms = 'echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-debian-stretch-prod stretch main" > /etc/apt/sources.list.d/microsoft.list'
script_steps.append(('Further steps for MS repo on Debian 9', debian_ms))
script_steps.append(('Installing test dependency packages', dep_script))
install_pswsman = '''cat > /tmp/install-pswsman.ps1 << EOL
\$ErrorActionPreference = 'Stop'
\$repoParams = @{
Name = 'PSWSManRepo'
PublishLocation = './build'
SourceLocation = './build'
InstallationPolicy = 'Trusted'
}
if (Get-PSRepository -Name \$repoParams.Name -ErrorAction SilentlyContinue) {
Unregister-PSRepository -Name \$repoParams.Name
}
Register-PSRepository @repoParams
try {
Install-Module -Name PSWSMan -Repository \$repoParams.Name -Scope CurrentUser
} finally {
Unregister-PSRepository -Name \$repoParams.Name
}
EOL
pwsh -NoProfile -NoLogo -File /tmp/install-pswsman.ps1
'''
script_steps.append(('Installing PSWSMan module', install_pswsman))
cert_path = os.path.join('integration_environment', 'cert_setup', 'ca.pem')
if os.path.exists(os.path.join(OMI_REPO, cert_path)):
cert_cmd = "%spwsh -Command 'Register-TrustedCertificate -Path %s -Verbose'" \
% (sudo_prefix, cert_path)
script_steps.append(('Adding CA chain to system trust store', cert_cmd))
pwsh_deps = '''cat > /tmp/pwsh-requirements.ps1 << EOL
\$ErrorActionPreference = 'Stop'
\$ProgressPreference = 'SilentlyContinue'
Install-Module -Name Pester -MinimumVersion 5.0 -Force
Install-Module -Name powershell-yaml -Force
Install-Module -Name MSAL.PS -Force -AcceptLicense
EOL
pwsh -NoProfile -NoLogo -File /tmp/pwsh-requirements.ps1'''
script_steps.append(('Installing Pester 5+ and other PowerShell deps', pwsh_deps))
install_script = '''PWSHDIR="$( dirname "$( readlink "$( which pwsh )" )" )"
%spwsh -Command 'Install-WSMan -Verbose\'''' % sudo_prefix
script_steps.append(('Copying lib artifacts to the PowerShell directory', install_script))
pester_script = '''cat > /tmp/pwsh-test.ps1 << EOL
\$ErrorActionPreference = 'Stop'
\$ProgressPreference = 'SilentlyContinue'
Import-Module -Name Pester -MinimumVersion 5.0
\$configuration = [PesterConfiguration]::Default
\$configuration.Output.Verbosity = 'Detailed'
\$configuration.Run.Path = 'libmi.tests.ps1'
\$configuration.Run.Exit = \$true
Invoke-Pester -Configuration \$configuration
EOL
echo "%s" > /tmp/distro.txt''' % distribution
script_steps.append(('Creating Pester test script', pester_script))
script_steps.append(('Getting PowerShell version', 'pwsh -Command \$PSVersionTable'))
script_steps.append(('Getting libmi version',
"pwsh -Command 'Get-WSManVersion'"))
if distribution.startswith('macOS'):
script_steps.append(('Output libpsrpclient libraries', 'otool -L -arch all "${PWSHDIR}/libpsrpclient.dylib"'))
script_steps.append(('Output libmi libraries', 'otool -L -arch all "${PWSHDIR}/libmi.dylib"'))
else:
script_steps.append(('Output libpsrpclient libraries', 'ldd "${PWSHDIR}/libpsrpclient.so"'))
script_steps.append(('Output libmi libraries', 'ldd "${PWSHDIR}/libmi.so"'))
if args.interactive:
script_steps.append(('Opening interactive shell', '/bin/bash'))
elif args.verify_version:
build_id = os.environ.get('OMI_BUILDVERSION_BUILDNR', '0')
verify_version = "%s.%s.%s.%s" % (get_version() + (build_id,))
script_steps.append(('Verify libraries are loaded and match %s' % verify_version,
'''cat > /tmp/version-test.ps1 << EOL
\$ErrorActionPreference = 'Stop'
\$ProgressPreference = 'SilentlyContinue'
Import-Module -Name PSWSMan
\$expectedVersion = [Version]'%s'
\$actualVersions = Get-WSManVersion
if (\$actualVersions.MI -ne \$expectedVersion) {
throw "libmi version '\$(\$actualVersions.MI)' does not match expected version '\$expectedVersion'"
}
if (\$actualVersions.PSRP -ne \$expectedVersion) {
throw "libpsrpclient version '\$(\$actualVersions.PSRP)' does not match expected version '\$expectedVersion'"
}
"SUCCESS: Versions are good"
EOL
pwsh -NoProfile -NoLogo -File /tmp/version-test.ps1''' % verify_version))
else:
script_steps.append(('Running PowerShell test', 'pwsh -NoProfile -NoLogo -File /tmp/pwsh-test.ps1'))
test_script = build_bash_script(script_steps)
if args.output_script:
print(test_script)
else:
with tempfile.NamedTemporaryFile(dir=OMI_REPO, prefix='test-', suffix='-%s.sh' % distribution) as temp_fd:
temp_fd.write(test_script.encode('utf-8'))
temp_fd.flush()
if args.docker:
docker_run(distro_details['container_image'], '/omi/%s' % os.path.basename(temp_fd.name),
env={'KRB5_CONFIG': '/omi/krb5.conf'}, interactive=args.interactive, shell=distro_details['shell'])
else:
print("Running tests locally")
subprocess.check_call(['bash', temp_fd.name], cwd=OMI_REPO)
def parse_args():
"""Parse and return args."""
parser = argparse.ArgumentParser(description='Test the OMI library in PowerShell.')
parser.add_argument('distribution',
metavar='distribution',
nargs='?',
default=None,
help='The distribution to test.').completer = complete_distribution
parser.add_argument('--interactive',
dest='interactive',
action='store_true',
help='When combined with --docker will start an interactive session in the test container.')
parser.add_argument('--skip-deps',
dest='skip_deps',
action='store_true',
help='Skip installing any dependencies.')
parser.add_argument('--verify-version',
dest='verify_version',
action='store_true',
help='Will only test that the library can be loaded and the version is the value expected.')
run_group = parser.add_mutually_exclusive_group()
run_group.add_argument('--docker',
dest='docker',
action='store_true',
help='Whether to test OMI in a docker container.')
run_group.add_argument('--output-script',
dest='output_script',
action='store_true',
help='Will print out the bash script that can test the library.')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.interactive and not args.docker:
parser.error('arguement --interactive: must be set with argument --docker')
return args
if __name__ == '__main__':
main()
|
the-stack_0_27504
|
import numpy as np
from typing import List
from geometry.vector import Vector
def vectors_mean(*vectors: List[Vector]):
return Vector(*np.mean([[v.x, v.y] for v in vectors], 0))
def smooth_path_vectors(path, window: int = 5) -> List[Vector]:
"""
Smooths vectors of a path by binning them and
taking the average vector
"""
velocity_means, accel_means, tangent_means = [], [], []
for t in np.arange(0, len(path)):
t_0 = t - window if t > window else 0
t_1 = t + window if len(path) - t > window else len(path)
velocity_means.append(
vectors_mean(*[path.velocity[i] for i in np.arange(t_0, t_1)])
)
accel_means.append(
vectors_mean(*[path.acceleration[i] for i in np.arange(t_0, t_1)])
)
tangent_means.append(
vectors_mean(*[path.tangent[i] for i in np.arange(t_0, t_1)])
)
velocity = Vector.from_list(velocity_means)
acceleration = Vector.from_list(accel_means)
tangent = Vector.from_list(tangent_means)
return velocity, acceleration, tangent
|
the-stack_0_27506
|
import asyncio
import warnings
from typing import Any, List, Set, Iterable, Optional, Tuple, Union, NamedTuple
from aiocqhttp import Event as CQEvent
from aiocqhttp.message import Message
from .log import logger
from . import NoneBot
from .command import call_command
from .session import BaseSession
from .typing import CommandName_T, CommandArgs_T, NLPHandler_T, PermissionPolicy_T
from .permission import check_permission
class NLProcessor:
"""INTERNAL API"""
__slots__ = ('func', 'keywords', 'only_to_me', 'only_short_message',
'allow_empty_message', 'permission')
def __init__(self, *, func: NLPHandler_T, keywords: Optional[Iterable[str]],
only_to_me: bool, only_short_message: bool,
allow_empty_message: bool,
permission: PermissionPolicy_T):
self.func = func
self.keywords = keywords
self.only_to_me = only_to_me
self.only_short_message = only_short_message
self.allow_empty_message = allow_empty_message
self.permission = permission # includes EllipsisType
async def test(self, session: 'NLPSession',
msg_text_length: Optional[int] = None) -> bool:
"""
Test whether the session matches this (self) NL processor.
:param session: NLPSession object
:param msg_text_length: this argument is `len(session.msg_text)`,
designated to be cached if this function
is invoked in a loop
:return: the session context matches this processor
"""
if msg_text_length is None:
msg_text_length = len(session.msg_text)
if not self.allow_empty_message and not session.msg:
# don't allow empty msg, but it is one, so no
return False
if self.only_short_message and \
msg_text_length > session.bot.config.SHORT_MESSAGE_MAX_LENGTH:
return False
if self.only_to_me and not session.event['to_me']:
return False
if self.keywords:
for kw in self.keywords:
if kw in session.msg_text:
break
else:
# no keyword matches
return False
return await self._check_perm(session)
async def _check_perm(self, session: 'NLPSession') -> bool:
"""
Check if the session has sufficient permission to
call the command.
:param session: NLPSession object
:return: the event has the permission
"""
return await check_permission(session.bot, session.event,
self.permission if self.permission is not ...
else session.bot.config.DEFAULT_NLP_PERMISSION)
class NLPManager:
"""INTERNAL API"""
_nl_processors: Set[NLProcessor] = set()
def __init__(self):
# TODO: don't copy
self.nl_processors = NLPManager._nl_processors.copy()
@classmethod
def add_nl_processor(cls, processor: NLProcessor) -> None:
"""Register a natural language processor
Args:
processor (NLProcessor): Processor object
"""
if processor in cls._nl_processors:
warnings.warn(f"NLProcessor {processor} already exists")
return
cls._nl_processors.add(processor)
@classmethod
def remove_nl_processor(cls, processor: NLProcessor) -> bool:
"""Remove a natural language processor globally
Args:
processor (NLProcessor): Processor to remove
Returns:
bool: Success or not
"""
if processor in cls._nl_processors:
cls._nl_processors.remove(processor)
return True
return False
@classmethod
def switch_nlprocessor_global(cls,
processor: NLProcessor,
state: Optional[bool] = None
) -> Optional[bool]:
"""Remove or add a natural language processor globally
Args:
processor (NLProcessor): Processor object
Returns:
bool: True if removed, False if added
"""
if processor in cls._nl_processors and not state:
cls._nl_processors.remove(processor)
return True
elif processor not in cls._nl_processors and state is not False:
cls._nl_processors.add(processor)
return False
def switch_nlprocessor(self,
processor: NLProcessor,
state: Optional[bool] = None) -> Optional[bool]:
"""Remove or add a natural language processor
Args:
processor (NLProcessor): Processor to remove
Returns:
bool: True if removed, False if added
"""
if processor in self.nl_processors and not state:
self.nl_processors.remove(processor)
return True
elif processor not in self.nl_processors and state is not False:
self.nl_processors.add(processor)
return False
class NLPSession(BaseSession):
"""
继承自 `BaseSession` 类,表示自然语言处理 Session。
"""
__slots__ = ('msg', 'msg_text', 'msg_images')
def __init__(self, bot: NoneBot, event: CQEvent, msg: str):
super().__init__(bot, event)
self.msg: str = msg
"""以字符串形式表示的消息内容,已去除开头的 @ 和机器人称呼,可能存在 CQ 码。"""
tmp_msg = Message(msg)
self.msg_text: str = tmp_msg.extract_plain_text()
"""消息内容的纯文本部分,已去除所有 CQ 码/非 `text` 类型的消息段。各纯文本消息段之间使用空格连接。"""
self.msg_images: List[str] = [
s.data['url']
for s in tmp_msg
if s.type == 'image' and 'url' in s.data
]
"""消息内容中所有图片的 URL 的列表,如果消息中没有图片,则为 `[]`。"""
class IntentCommand(NamedTuple):
"""
用于表示自然语言处理之后得到的意图命令,是一个 `NamedTuple`,由自然语言处理器返回。
版本: 1.2.0+
参数:
confidence:
name (Union[str, nonebot.typing.CommandName_T]):
args (nonebot.typing.CommandArgs_T):
current_arg:
"""
confidence: float
"""{kind}`instance-var` 意图的置信度,即表示对当前推测的用户意图有多大把握。"""
name: Union[str, CommandName_T]
"""{kind}`instance-var` {anno}`Union[str, nonebot.typing.CommandName_T]` 命令的名字。"""
args: Optional[CommandArgs_T] = None
"""{kind}`instance-var` {anno}`nonebot.typing.CommandArgs_T` 命令的(初始)参数。"""
current_arg: str = ''
"""{kind}`instance-var` 命令的当前输入参数。"""
async def handle_natural_language(bot: NoneBot, event: CQEvent,
manager: NLPManager) -> bool:
"""
INTERNAL API
Handle a message as natural language.
This function is typically called by "handle_message".
:param bot: NoneBot instance
:param event: message event
:param manager: natural language processor manager
:return: the message is handled as natural language
"""
session = NLPSession(bot, event, str(event.message))
# use msg_text here because CQ code "share" may be very long,
# at the same time some plugins may want to handle it
msg_text_length = len(session.msg_text)
# returns 1. processor result; 2. whether this processor is considered handled
async def try_run_nlp(p: NLProcessor) -> Tuple[Any, bool]:
try:
should_run = await p.test(session, msg_text_length=msg_text_length)
if should_run:
return await p.func(session), True
return None, False
except Exception as e:
logger.error('An exception occurred while running '
'some natural language processor:')
logger.exception(e)
return None, True
intent_commands: List[IntentCommand] = []
procs_empty = True
for res in asyncio.as_completed([try_run_nlp(p) for p in manager.nl_processors]):
result, should_run = await res
if not should_run:
continue
procs_empty = False
if isinstance(result, IntentCommand):
intent_commands.append(result)
if procs_empty:
return False
intent_commands.sort(key=lambda ic: ic.confidence, reverse=True)
logger.debug(f'Intent commands: {intent_commands}')
if intent_commands and intent_commands[0].confidence >= 60.0:
# choose the intent command with highest confidence
chosen_cmd = intent_commands[0]
logger.debug(
f'Intent command with highest confidence: {chosen_cmd}')
return await call_command(bot,
event,
chosen_cmd.name,
args=chosen_cmd.args,
current_arg=chosen_cmd.current_arg,
check_perm=False) or False
logger.debug('No intent command has enough confidence')
return False
__all__ = [
'NLPSession',
'IntentCommand',
]
__autodoc__ = {
"NLProcessor": False,
"NLPManager": False,
"handle_natural_language": False
}
|
the-stack_0_27508
|
class Solution:
def solve(self, nums):
uniques = set()
j = 0
ans = 0
for i in range(len(nums)):
while j < len(nums) and nums[j] not in uniques:
uniques.add(nums[j])
j += 1
ans = max(ans, len(uniques))
uniques.remove(nums[i])
return ans
|
the-stack_0_27509
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import partial
from os import path as osp
from typing import Dict
from typing import Optional
import torch
import torchvision.models
import examples.torch.common.models as custom_models
from examples.torch.classification.models.mobilenet_v2_32x32 import MobileNetV2For32x32
from examples.torch.common.example_logger import logger
import examples.torch.common.restricted_pickle_module as restricted_pickle_module
from nncf.torch.checkpoint_loading import load_state
from nncf.torch.utils import safe_thread_call
def load_model(model, pretrained=True, num_classes=1000, model_params=None,
weights_path: str = None) -> torch.nn.Module:
"""
** WARNING: This is implemented using torch.load functionality,
which itself uses Python's pickling facilities that may be used to perform
arbitrary code execution during unpickling. Only load the data you trust.
"""
logger.info("Loading model: {}".format(model))
if model_params is None:
model_params = {}
if model in torchvision.models.__dict__:
load_model_fn = partial(torchvision.models.__dict__[model], num_classes=num_classes, pretrained=pretrained,
**model_params)
elif model in custom_models.__dict__:
load_model_fn = partial(custom_models.__dict__[model], num_classes=num_classes, pretrained=pretrained,
**model_params)
elif model == "mobilenet_v2_32x32":
load_model_fn = partial(MobileNetV2For32x32, num_classes=100)
else:
raise Exception("Undefined model name")
loaded_model = safe_thread_call(load_model_fn)
if not pretrained and weights_path is not None:
sd = torch.load(weights_path, map_location='cpu', pickle_module=restricted_pickle_module)
load_state(loaded_model, sd, is_resume=False)
return loaded_model
MODEL_STATE_ATTR = 'state_dict'
COMPRESSION_STATE_ATTR = 'compression_state'
def load_resuming_checkpoint(resuming_checkpoint_path: str):
if osp.isfile(resuming_checkpoint_path):
logger.info("=> loading checkpoint '{}'".format(resuming_checkpoint_path))
checkpoint = torch.load(resuming_checkpoint_path, map_location='cpu',
pickle_module=restricted_pickle_module)
return checkpoint
raise FileNotFoundError("no checkpoint found at '{}'".format(resuming_checkpoint_path))
def extract_model_and_compression_states(resuming_checkpoint: Optional[Dict] = None):
if resuming_checkpoint is None:
return None, None
compression_state = resuming_checkpoint.get(COMPRESSION_STATE_ATTR)
model_state_dict = resuming_checkpoint.get(MODEL_STATE_ATTR)
return model_state_dict, compression_state
|
the-stack_0_27510
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Copyright 2018 ARM Ltd.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import struct
import signal
import threading
import traceback
import binascii
import json
from collections import namedtuple
from mbed_cloud.connect import ConnectAPI
from mbed_cloud.device_directory import DeviceDirectoryAPI
from mbed_cloud.exceptions import CloudApiException
wait_condition = threading.Condition()
wait_condition.acquire()
keep_running = True
API_connect = ConnectAPI()
API_device_directory = DeviceDirectoryAPI()
def byte_to_hex(value):
return binascii.hexlify(value)
def byte_to_int(value):
if len(value) == 2:
# unsigned short, uint16_t
return struct.unpack("<H", value)[0]
elif len(value) == 4:
# unsigned int, uint32_t
return struct.unpack("<i", value)[0]
else:
return None
def byte_to_float(value):
return struct.unpack("<f", value)[0]
def byte_to_str(value):
return value.decode("utf-8")
ResourcePath = namedtuple("ResourcePath", "object_id, instance_id, resource_id")
# See http://www.openmobilealliance.org/wp/OMNA/LwM2M/LwM2MRegistry.html
# Currently Device Management Client supports only text-format for resource values
LWM2M_RESOURCE_MAPPING = {
# 0: byte_to_str,
# 1: byte_to_str,
# 2: byte_to_str,
# 3: byte_to_str,
# 11: byte_to_int,
# 17: byte_to_str,
# 18: byte_to_str,
# 19: byte_to_str,
# 5700: byte_to_float,
# 5701: byte_to_str,
# 5601: byte_to_float,
# 5602: byte_to_float
}
def red(s):
if s is None:
s = "<null>"
return "\033[91m{}\033[0m".format(s)
def sig_handler(signum, frame):
print(red("Signal handler called: {}".format(signum)))
global keep_running
global wait_condition
wait_condition.acquire()
keep_running = False
wait_condition.notify()
wait_condition.release()
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
def async_response_is_done(async_resp):
def check():
return async_resp.is_done
return check
def is_keep_running():
return keep_running
def is_done(async_resp):
return async_resp.is_done
def split_path(resource_path):
# remove slash if present in the start
if resource_path[0] == '/':
resource_path = resource_path[1:]
# remove /d if present
if resource_path.startswith('d/'):
resource_path = resource_path[3:]
resource_path = resource_path[(resource_path.index('/') + 1):]
splitted = resource_path.split('/')
return ResourcePath(object_id=int(splitted[0]),
instance_id=int(splitted[1]),
resource_id=int(splitted[2]))
def read(device_id, resource_path):
API_connect.start_notifications()
try:
res = API_connect.get_resource_value(device_id, resource_path)
print("Read on '{} | {}' completed, value: '{}'".format(device_id, resource_path, res))
except CloudApiException as e:
print(red("Exception catched when trying to read"))
print(red("Reason: {} | status: {} | msg: {}".format(e.reason, e.status, e.message)))
def observe_async(device_id, resource_path):
global wait_condition
API_connect.start_notifications()
devices = API_connect.list_connected_devices().data
if not devices:
raise Exception("No devices registered. Aborting")
current_value = None
while keep_running:
try:
async_resp = API_connect.get_resource_value_async(device_id, resource_path)
except:
traceback.print_exc()
print(red("Get resource value async failed."))
return
# Busy wait - block the thread and wait for the response to finish.
wait_condition.acquire()
while is_keep_running() and not is_done(async_resp):
wait_condition.wait(1.0)
wait_condition.release()
# Check if we have a async error response, and abort if it is.
if not async_resp.is_done:
print("Async response not done, interrupted.")
elif async_resp.error:
print(red("Got async error response: {}".format(async_resp.error)))
else:
# Get the value from the async response, as we know it's done and it's not
# an error.
new_value = async_resp.value
res = split_path(resource_path)
new_value = LWM2M_RESOURCE_MAPPING.get(res.resource_id, byte_to_str)(new_value)
print("New value: {}".format(new_value))
# Save new current value
current_value = new_value
def execute(device_id, resource_path):
API_connect.start_notifications()
try:
res = API_connect.execute_resource(device_id, resource_path)
print("Execute on '{} | {}' returned: '{}'".format(device_id, resource_path, res))
except CloudApiException as e:
print(red("Exception catched when trying to execute"))
print(red("Reason: {} | status: {} | msg: {}".format(e.reason, e.status, e.message)))
KEYS_NEEDED_FOR_DEVICES = ("id", "name", "device_type", "device_execution_mode",
"created_at", "updated_at", "state", "account_id",
"host_gateway")
def write(device_id, resource_path, value):
API_connect.start_notifications()
try:
res = API_connect.set_resource_value(device_id, resource_path, value)
print("Write on '{} | {}' completed, new value: '{}'".format(device_id, resource_path, res))
except CloudApiException as e:
print(red("Exception catched when trying to write"))
print(red("Reason: {} | status: {} | msg: {}".format(e.reason, e.status, e.message)))
def device_object_to_dictionary(device):
return {
"device-id" : device.id,
"name": device.name,
"state": device.state,
"hosting_edge": device.host_gateway,
"type": device.device_type,
"exec_mode": device.device_execution_mode,
"created_at": device.created_at.isoformat(),
"updated_at": device.updated_at.isoformat(),
}
def run_filtered_request(filters, connected):
if connected:
devices = API_connect.list_connected_devices(filters=filters).data
else:
devices = API_device_directory.list_devices(filters=filters).data
filtered_devices = []
for device in devices:
filtered_devices.append(device_object_to_dictionary(device))
return filtered_devices
def filter_edge_hosted_devices(edge_device_id, connected):
edge_host_filter = { "host_gateway": edge_device_id
}
filtered_devices = run_filtered_request(edge_host_filter, connected)
print(json.dumps(filtered_devices, sort_keys=True, indent=2))
def filter_edge_devices(connected):
edge_filter = { "device_type": "MBED_GW" }
filtered_devices = run_filtered_request(edge_filter, connected)
print(json.dumps(filtered_devices, sort_keys=True, indent=2))
|
the-stack_0_27516
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pathlib import Path
from pants.testutil.pants_integration_test import ensure_daemon, run_pants
from pants.util.contextutil import temporary_dir
def test_visualize_to():
# Tests usage of the `--engine-visualize-to=` option, which triggers background
# visualization of the graph. There are unit tests confirming the content of the rendered
# results.
with temporary_dir(root_dir=os.getcwd()) as destdir:
run_pants(
[
f"--engine-visualize-to={destdir}",
"--backend-packages=pants.backend.python",
"list",
"testprojects/src/python/hello/greet",
]
).assert_success()
destdir_files = list(Path(destdir).iterdir())
assert len(destdir_files) > 0
@ensure_daemon
def test_graceful_termination(use_pantsd: bool) -> None:
result = run_pants(
[
"--backend-packages=['pants.backend.python', 'internal_plugins.rules_for_testing']",
"list-and-die-for-testing",
"testprojects/src/python/hello/greet",
],
use_pantsd=use_pantsd,
)
result.assert_failure()
assert result.stdout == "testprojects/src/python/hello/greet\n"
assert result.exit_code == 42
|
the-stack_0_27517
|
""" The core's core. """
from .assumptions import AssumeMeths, make__get_assumption
from sympy.core.compatibility import cmp
# used for canonical ordering of symbolic sequences
# via __cmp__ method:
# FIXME this is *so* irrelevant and outdated!
ordering_of_classes = [
# singleton numbers
'Zero', 'One','Half','Infinity','NaN','NegativeOne','NegativeInfinity',
# numbers
'Integer','Rational','Float',
# singleton symbols
'Exp1','Pi','ImaginaryUnit',
# symbols
'Symbol','Wild','Temporary',
# Functions that should come before Pow/Add/Mul
'ApplyConjugate', 'ApplyAbs',
# arithmetic operations
'Pow', 'Mul', 'Add',
# function values
'Apply',
'ApplyExp','ApplyLog',
'ApplySin','ApplyCos','ApplyTan','ApplyCot',
'ApplyASin','ApplyACos','ApplyATan','ApplyACot',
'ApplySinh','ApplyCosh','ApplyTanh','ApplyCoth',
'ApplyASinh','ApplyACosh','ApplyATanh','ApplyACoth',
'ApplyRisingFactorial','ApplyFallingFactorial',
'ApplyFactorial','ApplyBinomial',
'ApplyFloor', 'ApplyCeiling',
'ApplyRe','ApplyIm', 'ApplyArg',
'ApplySqrt','ApplySign',
'ApplyGamma','ApplyLowerGamma','ApplyUpperGamma','ApplyPolyGamma',
'ApplyErf',
'ApplyChebyshev','ApplyChebyshev2',
'Derivative','Integral',
# defined singleton functions
'Abs','Sign','Sqrt',
'Floor', 'Ceiling',
'Re', 'Im', 'Arg',
'Conjugate',
'Exp','Log',
'Sin','Cos','Tan','Cot','ASin','ACos','ATan','ACot',
'Sinh','Cosh','Tanh','Coth','ASinh','ACosh','ATanh','ACoth',
'RisingFactorial','FallingFactorial',
'factorial','binomial',
'Gamma','LowerGamma','UpperGamma','PolyGamma',
'Erf',
# special polynomials
'Chebyshev','Chebyshev2',
# undefined functions
'Function','WildFunction',
# anonymous functions
'Lambda',
# Landau O symbol
'Order',
# relational operations
'Equality', 'Unequality', 'StrictInequality', 'Inequality',
]
class BasicType(type):
pass
class Registry(object):
"""
Base class for registry objects.
Registries map a name to an object using attribute notation. Registry
classes behave singletonically: all their instances share the same state,
which is stored in the class object.
All subclasses should set `__slots__ = []`.
"""
__slots__ = []
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
#A set containing all sympy class objects, kept in sync with C
all_classes = set()
class ClassRegistry(Registry):
"""
Namespace for SymPy classes
This is needed to avoid problems with cyclic imports.
To get a SymPy class, use `C.<class_name>` e.g. `C.Rational`, `C.Add`.
For performance reasons, this is coupled with a set `all_classes` holding
the classes, which should not be modified directly.
"""
__slots__ = []
def __setattr__(self, name, cls):
Registry.__setattr__(self, name, cls)
all_classes.add(cls)
def __delattr__(self, name):
cls = getattr(self, name)
Registry.__delattr__(self, name)
# The same class could have different names, so make sure
# it's really gone from C before removing it from all_classes.
if cls not in iter(self.__class__.__dict__.values()):
all_classes.remove(cls)
C = ClassRegistry()
class BasicMeta(BasicType):
def __init__(cls, *args, **kws):
setattr(C, cls.__name__, cls)
# --- assumptions ---
# initialize default_assumptions dictionary
default_assumptions = {}
for k,v in cls.__dict__.items():
if not k.startswith('is_'):
continue
# this is not an assumption (e.g. is_Integer)
if k[3:] not in AssumeMeths._assume_defined:
continue
k = k[3:]
if isinstance(v,(bool,int,type(None))):
if v is not None:
v = bool(v)
default_assumptions[k] = v
# XXX maybe we should try to keep ._default_premises out of class ?
# XXX __slots__ in class ?
cls._default_premises = default_assumptions
for base in cls.__bases__:
try:
base_premises = base._default_premises
except AttributeError:
continue # no ._default_premises is ok
for k,v in base_premises.items():
# if an assumption is already present in child, we should ignore base
# e.g. Integer.is_integer=T, but Rational.is_integer=F (for speed)
if k in default_assumptions:
continue
default_assumptions[k] = v
# deduce all consequences from default assumptions -- make it complete
xass = AssumeMeths._assume_rules.deduce_all_facts(default_assumptions)
# and store completed set into cls -- this way we'll avoid rededucing
# extensions of class default assumptions each time on instance
# creation -- we keep it prededuced already.
cls.default_assumptions = xass
# let's store new derived assumptions back into class.
# this will result in faster access to this attributes.
#
# Timings
# -------
#
# a = Integer(5)
# %timeit a.is_zero -> 20 us (without this optimization)
# %timeit a.is_zero -> 2 us (with this optimization)
#
#
# BTW: it is very important to study the lessons learned here --
# we could drop Basic.__getattr__ completely (!)
#
# %timeit x.is_Add -> 2090 ns (Basic.__getattr__ present)
# %timeit x.is_Add -> 825 ns (Basic.__getattr__ absent)
#
# so we may want to override all assumptions is_<xxx> methods and
# remove Basic.__getattr__
# first we need to collect derived premises
derived_premises = {}
for k,v in xass.items():
if k not in default_assumptions:
derived_premises[k] = v
cls._derived_premises = derived_premises
for k,v in xass.items():
assert v == cls.__dict__.get('is_'+k, v), (cls,k,v)
# NOTE: this way Integer.is_even = False (inherited from Rational)
# NOTE: the next code blocks add 'protection-properties' to overcome this
setattr(cls, 'is_'+k, v)
# protection e.g. for Initeger.is_even=F <- (Rational.is_integer=F)
for base in cls.__bases__:
try:
base_derived_premises = base._derived_premises
except AttributeError:
continue # no ._derived_premises is ok
for k,v in base_derived_premises.items():
if ('is_'+k) not in cls.__dict__:
is_k = make__get_assumption(cls.__name__, k)
setattr(cls, 'is_'+k, property(is_k))
def __cmp__(cls, other):
# If the other object is not a Basic subclass, then we are not equal to
# it.
if not isinstance(other, BasicType):
return -1
n1 = cls.__name__
n2 = other.__name__
c = cmp(n1,n2)
if not c: return 0
UNKNOWN = len(ordering_of_classes)+1
try:
i1 = ordering_of_classes.index(n1)
except ValueError:
#print 'Add',n1,'to basic.ordering_of_classes list'
#return c
i1 = UNKNOWN
try:
i2 = ordering_of_classes.index(n2)
except ValueError:
#print 'Add',n2,'to basic.ordering_of_classes list'
#return c
i2 = UNKNOWN
if i1 == UNKNOWN and i2 == UNKNOWN:
return c
return cmp(i1,i2)
def __lt__(cls, other):
if cls.__cmp__(other)==-1:
return True
return False
def __gt__(cls, other):
if cls.__cmp__(other)==1:
return True
return False
C.BasicMeta = BasicMeta
|
the-stack_0_27518
|
from __future__ import absolute_import
from __future__ import unicode_literals
import os
PROJECT_ROOT = os.path.dirname(__file__)
REPO_BASE = os.path.realpath(os.path.join(PROJECT_ROOT, '..', '..'))
ROLES_ALL = ['all']
ROLES_ALL_SRC = [
'django_monolith',
'django_app',
'django_celery',
'django_pillowtop',
'staticfiles',
'airflow',
'django_manage'
]
ROLES_ALL_SERVICES = [
'django_monolith',
'django_app',
'django_celery',
'django_pillowtop',
'formplayer',
'staticfiles',
'airflow'
]
ROLES_CELERY = ['django_monolith', 'django_celery']
ROLES_PILLOWTOP = ['django_monolith', 'django_pillowtop']
ROLES_DJANGO = ['django_monolith', 'django_app']
ROLES_FORMPLAYER = ['django_monolith', 'formplayer']
ROLES_STATIC = ['django_monolith', 'staticfiles']
ROLES_STATIC_PRIMARY = ['django_monolith', 'staticfiles_primary']
ROLES_POSTGRESQL = ['pg', 'pgstandby', 'django_monolith']
ROLES_ELASTICSEARCH = ['elasticsearch', 'django_monolith']
ROLES_DEPLOY = ['deploy', 'django_monolith']
ROLES_MANAGE = ['django_manage']
ROLES_CONTROL = ['control']
ROLES_AIRFLOW = ['airflow']
RELEASE_RECORD = 'RELEASES.txt'
KEEP_UNTIL_PREFIX = 'KEEP_UNTIL__'
DATE_FMT = '%Y-%m-%d_%H.%M'
RSYNC_EXCLUDE = (
'.DS_Store',
'.git',
'*.pyc',
'*.example',
'*.db',
)
CACHED_DEPLOY_ENV_FILENAME = 'cached_deploy_env.pickle'
CACHED_DEPLOY_CHECKPOINT_FILENAME = 'cached_deploy_checkpoint.pickle'
FORMPLAYER_BUILD_DIR = 'formplayer_build'
BOWER_ZIP_NAME = 'bower.tar.gz'
NPM_ZIP_NAME = 'npm.tar.gz'
WHEELS_ZIP_NAME = 'wheels.tar.gz'
YARN_LOCK = 'yarn.lock'
|
the-stack_0_27519
|
import requests
import json
import re
req=requests.Session()
header={
"user-agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36"
}
url="http://www.122.gov.cn/m/map/select"
page=req.get(url=url,headers=header)
print(page.text)
pattern=re.compile(r"<i sfdm=\"(\d+)\" sfmc=\"(.*?)\" ym=\"(.*?)\" fzjg=\"(.*?)\".*?sftjb=\"(.*?)\"></i>",re.S)
d=re.findall(pattern,page.text)
s={}
for i in d:
s[i[0]]={"address":i[1],"url":i[2],"cp":i[3],"sftjb":i[4]}
print(s)
json.dump(s,open("./info.json","w"))
|
the-stack_0_27521
|
def find_value_in_range(num_vals, target):
for i in range(0, len(data) - num_vals):
total = 0
low = 99999999
high = 0
for j in range(i, i + num_vals):
value = int(data[j])
total += value
if value < low:
low = value
if value > high:
high = value
if total == target:
print(f"{low + high}")
return True
return False
with open("9/input.txt", "r") as file:
data = file.read().split('\n')
num_vals = 2
while True:
if find_value_in_range(num_vals, 675280050):
break
num_vals += 1
|
the-stack_0_27523
|
"""
This plugin adds an image view to the IDE.
"""
import mimetypes
from PyQt5 import QtCore, QtGui, QtWidgets
from hackedit.api import plugins
# add missing image mimetypes
mimetypes.add_type('image/bmp', '.bmp')
mimetypes.add_type('image/x-icon', '.ico')
class ImageViewer(plugins.EditorPlugin):
"""
This plugin add image viewing capabilities to the IDE.
"""
@staticmethod
def get_editor_class():
return _ImageViewer
class _Viewer(QtWidgets.QScrollArea):
"""
Combines a QLabel and QScrollArea to display an image.
Adapted from http://qt.developpez.com/doc/4.7/widgets-imageviewer/
"""
def __init__(self):
super().__init__()
self.scaleFactor = 0.0
self.imageLabel = QtWidgets.QLabel()
# self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.imageLabel.setScaledContents(True)
self.setBackgroundRole(QtGui.QPalette.Dark)
self.setWidget(self.imageLabel)
self.center_label()
def zoom_in(self):
self.scale_image(1.25)
def zoom_out(self):
self.scale_image(0.8)
def normal_size(self):
self.imageLabel.adjustSize()
self.scaleFactor = 1.0
def fit_to_window(self):
fit_to_window = self.fitToWindowAct.isChecked()
self.setWidgetResizable(fit_to_window)
if not fit_to_window:
self.normalSize()
self.updateActions()
def scale_image(self, factor):
self.scaleFactor *= factor
self.imageLabel.resize(
self.scaleFactor * self.imageLabel.pixmap().size())
self._adjust_scrollbar(self.horizontalScrollBar(), factor)
self._adjust_scrollbar(self.verticalScrollBar(), factor)
self.center_label()
QtCore.QTimer.singleShot(33, self.center_label)
QtCore.QTimer.singleShot(66, self.center_label)
def center_label(self):
img_size = self.imageLabel.size()
sa_size = self.size()
x = (sa_size.width() - img_size.width()) / 2
if x < 0:
x = 0.0
y = (sa_size.height() - img_size.height()) / 2
if y < 0:
y = 0.0
if x or y:
self.imageLabel.move(x, y)
def paintEvent(self, event):
self.center_label()
super().paintEvent(event)
def wheelEvent(self, event):
super().wheelEvent(event)
self.center_label()
@staticmethod
def _adjust_scrollbar(scrollBar, factor):
scrollBar.setValue(int(factor * scrollBar.value() +
((factor - 1) * scrollBar.pageStep()/2)))
class _FileLoader:
"""
Mimics the FileManager API of pyqode.
"""
def __init__(self, viewer, infos):
self.viewer = viewer
self.infos = infos
self.path = ''
def open(self, path, **__):
def sizeof_fmt(num, suffix='B'):
"""
Returns human readable size
Taken from:
http://stackoverflow.com/questions/1094841/reusable-library-to-get-
human-readable-version-of-file-size
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.2f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.2f%s%s" % (num, 'Yi', suffix)
self.path = path
image = QtGui.QImage(path)
if image.isNull():
QtWidgets.QMessageBox.information(
self.viewer, _("Image viewer"),
_("Failed to load image %r.") % path)
return
self.viewer.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
self.viewer.scaleFactor = 1.0
self.viewer.center_label()
self.viewer.normal_size()
w, h = image.size().width(), image.size().height()
size = sizeof_fmt(image.byteCount())
ext = QtCore.QFileInfo(path).suffix().upper()
depth = image.depth()
self.infos.setText(_('%dx%d %s (%d-bit color) %s') %
(w, h, ext, depth, size))
def save(self, *_):
pass
class _ImageViewer(QtWidgets.QWidget):
"""
Mimics the CodeEdit interface to display image files.
"""
mimetypes = [
'image/png',
'image/tiff',
'image/gif',
'image/bmp',
'image/x-icon',
'image/jpeg',
'image/tga',
'image/x-targa'
]
# compatibility with pyqode.core.api.CodeEdit
dirty_changed = QtCore.pyqtSignal(bool)
dirty = False
def __init__(self, parent=None, **__):
super().__init__(parent)
self.title = ''
# Setup viewer
self._viewer = _Viewer()
# Setup toolbar widget
self._toolbar = QtWidgets.QWidget()
hlayout = QtWidgets.QHBoxLayout()
self._bt_zoom_in = QtWidgets.QPushButton()
self._bt_zoom_in.setIcon(QtGui.QIcon.fromTheme('zoom-in'))
self._bt_zoom_in.setToolTip(_('Zoom in'))
self._bt_zoom_in.clicked.connect(self._viewer.zoom_in)
hlayout.addWidget(self._bt_zoom_in)
self._bt_zoom_out = QtWidgets.QPushButton()
self._bt_zoom_out.setIcon(QtGui.QIcon.fromTheme('zoom-out'))
self._bt_zoom_out.setToolTip(_('Zoom out'))
self._bt_zoom_out.clicked.connect(self._viewer.zoom_out)
hlayout.addWidget(self._bt_zoom_out)
self._bt_zoom_original = QtWidgets.QPushButton()
self._bt_zoom_original.setIcon(QtGui.QIcon.fromTheme('zoom-original'))
self._bt_zoom_original.setToolTip(_('Original size'))
self._bt_zoom_original.clicked.connect(self._viewer.normal_size)
hlayout.addWidget(self._bt_zoom_original)
hlayout.addSpacerItem(QtWidgets.QSpacerItem(
20, 20, QtWidgets.QSizePolicy.Expanding))
self._infos = QtWidgets.QLabel()
hlayout.addWidget(self._infos)
self._toolbar.setLayout(hlayout)
vlayout = QtWidgets.QVBoxLayout()
vlayout.addWidget(self._toolbar)
vlayout.addWidget(self._viewer)
self.setLayout(vlayout)
self.file = _FileLoader(self._viewer, self._infos)
def setDocumentTitle(self, title):
# compatibility with pyqode.core.api.CodeEdit
self.title = title
def documentTitle(self):
# compatibility with pyqode.core.api.CodeEdit
return self.title
def horizontalScrollBar(self):
# compatibility with pyqode.core.api.CodeEdit
return self._viewer.horizontalScrollBar()
def wheelEvent(self, *args):
pass
def split(self):
viewer = _ImageViewer()
viewer.file.open(self.file.path)
return viewer
|
the-stack_0_27525
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
ルールページの表示処理
また、ルールページからのリクエスト受信処理
[引数]
HTTPリクエスト
[戻り値]
HTTPレスポンス
"""
import copy
import os
import json
import pytz
import datetime
import ast
import re
import base64
import traceback
import requests
import xlrd
import uuid
import urllib.parse
import ssl
import urllib3
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.urls import reverse
from django.views.decorators.http import require_POST
from django.utils.six.moves.urllib.parse import urlsplit
from urllib3.exceptions import InsecureRequestWarning
from libs.commonlibs import define as defs
from libs.commonlibs.dt_component import DecisionTableComponent
from libs.commonlibs.oase_logger import OaseLogger
from libs.webcommonlibs.decorator import *
from libs.webcommonlibs.common import RequestToApply
from libs.webcommonlibs.common import TimeConversion
from libs.webcommonlibs.common import Common as WebCommon
from libs.webcommonlibs.events_request import EventsRequestCommon
from libs.webcommonlibs.oase_exception import OASEError
from web_app.models.models import RuleFile
from web_app.models.models import RuleType
from web_app.models.models import RuleManage
from web_app.models.models import EventsRequest
from web_app.models.models import RhdmResponse
from web_app.models.models import RhdmResponseAction
from web_app.models.models import ActionType
from web_app.models.models import DataObject
from web_app.models.models import System
from web_app.models.models import DriverType
from web_app.models.models import ConditionalExpression
from web_app.models.models import RhdmResponseCorrelation
from web_app.models.models import TokenInfo
from web_app.models.models import TokenPermission
from web_app.templatetags.common import get_message
from web_app.serializers.unicode_check import UnicodeCheck
MENU_ID_STG = 2141001004
MENU_ID_PRD = 2141001005
logger = OaseLogger.get_instance()
urllib3.disable_warnings(InsecureRequestWarning)
ssl._create_default_https_context = ssl._create_unverified_context
class RuleDefs():
"""
[クラス概要]
ルールページ内で使用する定数をここに定義する
"""
############################################
# 値定義
############################################
# ルールファイル定義
MAX_MB_SIZE = 1024
MAX_RULEFILE_SIZE = 1024 * 1024 * MAX_MB_SIZE
############################################
# ルールファイル一時設置パス
FILE_TEMP_PATH = '%s/temp/rule/' % (settings.BASE_DIR)
############################################
# ルールファイル管理世代数
GENERATION_MIN = 1
GENERATION_MAX = 10
############################################
# ルールファイル操作コード
FILE_OPERATION_UPLOAD = 1
FILE_OPERATION_DOWNLOAD = 2
FILE_OPERATION_PSEUDOCALL = 3
FILE_OPERATION_PRODUCT = 4
FILE_OPERATION_SWITCHBACK = 5
############################################
# ルール状態
RULE_FINISH_STS_OK = -1 # 正常終了
RULE_FINISH_STS_EXE = 0 # 未完了
RULE_FINISH_STS_NG = 1 # 異常終了
RULE_STATUS = {
1 : {'is_finish' : RULE_FINISH_STS_EXE, 'ope_sts' : None, 'sts' : 'MOSJA12079', 'msg' : 'MOSJA12087'},
2 : {'is_finish' : RULE_FINISH_STS_EXE, 'ope_sts' : None, 'sts' : 'MOSJA12080', 'msg' : 'MOSJA12088'},
3 : {'is_finish' : RULE_FINISH_STS_OK, 'ope_sts' : None, 'sts' : 'MOSJA12081', 'msg' : 'MOSJA12089'},
4 : {'is_finish' : RULE_FINISH_STS_NG, 'ope_sts' : defs.RULE_STS_OPERATION.STAGING_NG, 'sts' : 'MOSJA12082', 'msg' : 'MOSJA12090'},
5 : {'is_finish' : RULE_FINISH_STS_NG, 'ope_sts' : defs.RULE_STS_OPERATION.STAGING_NG, 'sts' : 'MOSJA12083', 'msg' : 'MOSJA12091'},
1000 : {'is_finish' : RULE_FINISH_STS_OK, 'ope_sts' : None, 'sts' : 'MOSJA12084', 'msg' : 'MOSJA12092'},
1001 : {'is_finish' : RULE_FINISH_STS_NG, 'ope_sts' : defs.RULE_STS_OPERATION.STAGING_NG, 'sts' : 'MOSJA12085', 'msg' : 'MOSJA12093'},
2001 : {'is_finish' : RULE_FINISH_STS_NG, 'ope_sts' : defs.RULE_STS_OPERATION.STAGING_NG, 'sts' : 'MOSJA12086', 'msg' : 'MOSJA12094'},
}
STAGING_VALIDATE_STATUSES = [
defs.RULE_STS_OPERATION.STAGING_NOTYET,
defs.RULE_STS_OPERATION.STAGING_VERIFY,
defs.RULE_STS_OPERATION.STAGING_NG,
defs.RULE_STS_OPERATION.STAGING,
]
MST_STS_OPERATION = {
defs.RULE_STS_OPERATION.STAGING_NOAPPLY: 'MOSJA12095', # 未適用
defs.RULE_STS_OPERATION.STAGING_NOTYET : 'MOSJA12079', # 検証未実施
defs.RULE_STS_OPERATION.STAGING_VERIFY : 'MOSJA12080', # 検証実施中
defs.RULE_STS_OPERATION.STAGING_NG : 'MOSJA12096', # 検証NG
defs.RULE_STS_OPERATION.STAGING : 'MOSJA12097', # 検証完了
defs.RULE_STS_OPERATION.STAGING_END : 'MOSJA12098', # 適用終了
defs.RULE_STS_OPERATION.PRODUCT_NOAPPLY: 'MOSJA12099', # プロダクション未適用
defs.RULE_STS_OPERATION.PRODUCT : 'MOSJA12100', # プロダクション未適用
defs.RULE_STS_OPERATION.PRODUCT_END : 'MOSJA12101', # プロダクション未適用
}
DISP_STAGING_STS_OPERATION = {
k: v for k, v in MST_STS_OPERATION.items()
if k <= defs.RULE_STS_OPERATION.STAGING and k >= defs.RULE_STS_OPERATION.STAGING_NOTYET
}
STAGING_OK_STATUSES = [
defs.RULE_STS_SYSTEM.STAGING_OK,
defs.RULE_STS_SYSTEM.PRODUCT,
defs.RULE_STS_SYSTEM.PRODUCT_NG,
defs.RULE_STS_SYSTEM.PRODUCT_OK,
]
MST_STS_SYSTEM = {
defs.RULE_STS_SYSTEM.UPLOAD : 'MOSJA12102', # アップロード中
defs.RULE_STS_SYSTEM.UPLOAD_NG : 'MOSJA12103', # アップロード異常終了
defs.RULE_STS_SYSTEM.UPLOAD_OK : 'MOSJA12104', # アップロード完了
defs.RULE_STS_SYSTEM.BUILD : 'MOSJA12105', # ビルド中
defs.RULE_STS_SYSTEM.BUILD_NG : 'MOSJA12106', # ビルド異常終了
defs.RULE_STS_SYSTEM.BUILD_OK : 'MOSJA12107', # ビルド完了
defs.RULE_STS_SYSTEM.STAGING : 'MOSJA12108', # ステージング適用処理中
defs.RULE_STS_SYSTEM.STAGING_NG : 'MOSJA12109', # ステージング適用異常終了
defs.RULE_STS_SYSTEM.STAGING_OK : 'MOSJA12110', # ステージング適用完了
defs.RULE_STS_SYSTEM.PRODUCT : 'MOSJA12111', # プロダクション適用処理中
defs.RULE_STS_SYSTEM.PRODUCT_NG : 'MOSJA12112', # プロダクション適用異常終了
defs.RULE_STS_SYSTEM.PRODUCT_OK : 'MOSJA12113', # プロダクション適用完了
}
@classmethod
def get_rulestatus_info(cls, sts_code, lang):
"""
[メソッド概要]
テストリクエストの適用状態から、完了状態や表示メッセージ等の情報を取得する
"""
ret_info = {'is_finish' : cls.RULE_FINISH_STS_NG, 'ope_sts' : defs.RULE_STS_OPERATION.STAGING_NG, 'sts' : 'MOSJA12083', 'msg' : 'MOSJA12114'}
if sts_code in cls.RULE_STATUS:
ret_info = copy.deepcopy(cls.RULE_STATUS[sts_code])
ret_info['sts'] = get_message(ret_info['sts'], lang, showMsgId=False)
ret_info['msg'] = get_message(ret_info['msg'], lang, showMsgId=False)
return ret_info
def makePseudoCallMessage(msg, reception_dt, event_dt, req_list, lang, add_msg=''):
"""
[メソッド概要]
ブラウザに表示するメッセージを作成する
"""
ret_str = ''
ret_str += get_message('MOSJA12115', lang, showMsgId=False) + '%s \n'% (reception_dt)
ret_str += get_message('MOSJA12116', lang, showMsgId=False) + '%s \n'% (event_dt)
for r in req_list:
ret_str += '%s %s \n' % (r['conditional_name'], r['value'])
ret_str += '%s \n' % (msg)
if add_msg:
ret_str += '%s \n' % (add_msg)
return ret_str
def makePseudoCallMessage_Bulk(msg, reception_dt, filename, cnt, max_cnt, lang):
"""
[メソッド概要]
ブラウザに表示するメッセージを作成する(一括リクエスト用)
"""
ret_str = ''
# ヘッダ
ret_str += get_message('MOSJA12115', lang, showMsgId=False) + '\n'
ret_str += get_message('MOSJA12116', lang, showMsgId=False) + '%s \n' % (reception_dt)
ret_str += get_message('MOSJA12045', lang, showMsgId=False) + '%s \n' % (filename)
# リクエスト件数
ret_str += get_message('MOSJA12117', lang, showMsgId=False) + '%3s / %3s \n\n' % (cnt, max_cnt)
# リクエスト詳細
ret_str += '%s \n' % (msg)
return ret_str
@check_allowed_auth([MENU_ID_STG, MENU_ID_PRD], defs.MENU_CATEGORY.ALLOW_EVERY)
def rule(request):
"""
[メソッド概要]
ルールページのトップ画面にアクセスされた際のリクエストを処理する
"""
logger.logic_log('LOSI00001', 'None', request=request)
now = datetime.datetime.now(pytz.timezone('UTC'))
msg = ''
lang = request.user.get_lang_mode()
# ステージング権限
perm_type_stg = request.user_config.get_menu_auth_type(MENU_ID_STG)
perm_info_stg = request.user_config.get_activerule_auth_type(MENU_ID_STG)
rule_ids_stg_admin = perm_info_stg[defs.ALLOWED_MENTENANCE]
# プロダクション権限
perm_type_prd = request.user_config.get_menu_auth_type(MENU_ID_PRD)
perm_info_prd = request.user_config.get_activerule_auth_type(MENU_ID_PRD)
rule_ids_prd_admin = perm_info_prd[defs.ALLOWED_MENTENANCE]
staging_list = []
staging_history_list = []
rule_product_list = []
rule_history_list = []
pseudo_rule_manage_id_dic = {}
apply_rule_manage_id_dic = {}
staging_pseudo_target = {}
staging_pseudo_target_rule_type = {}
try:
# ステージング適用ルール取得
staging_list, staging_history_list = _select_staging({}, perm_info_stg, request)
# プロダクション適用ルール取得
rule_product_list, rule_history_list = _select_production({}, perm_info_prd, request)
# プロダクション適用前データ
pseudo_rule_manage_id_dic = _get_testrequest_ids(staging_list)
# テストリクエスト対象データ
apply_rule_manage_id_dic = _get_production_appling_rule_id(staging_list)
# 条件式マスタから入力例を取得
staging_pseudo_target, staging_pseudo_target_rule_type = _get_staging_pseudo_targets(staging_list, pseudo_rule_manage_id_dic, lang)
except:
msg = get_message('MOSJA12000', request.user.get_lang_mode())
logger.system_log('LOSM12000', traceback.format_exc())
disp_staging_sts_operation = {
k : get_message(v,request.user.get_lang_mode(), showMsgId=False)
for k,v in RuleDefs.DISP_STAGING_STS_OPERATION.items()
}
data = {
'msg': msg, # TODO検討する
'staging_list': staging_list,
'staging_history_list': staging_history_list,
'product_list': rule_product_list,
'history_list': rule_history_list,
'disp_staging_sts_operation': disp_staging_sts_operation,
'now': now,
'apply_rule_manage_id_dic': apply_rule_manage_id_dic,
'pseudo_rule_manage_id_dic': pseudo_rule_manage_id_dic,
'stagingPseudoTargetList': staging_pseudo_target,
'stagingPseudoTargetRuleTypeList': staging_pseudo_target_rule_type,
'permission_type_stg': perm_type_stg,
'permission_type_prd': perm_type_prd,
'rule_ids_stg': rule_ids_stg_admin,
'rule_ids_prd': rule_ids_prd_admin,
}
data.update(request.user_config.get_templates_data(request))
log_data = {
'staging_list_cnt': len(staging_list),
'product_list_cnt': len(rule_product_list),
'history_list_cnt': len(rule_history_list),
'apply_rule_manage_ids': list(apply_rule_manage_id_dic.keys()),
'pseudo_rule_manage_ids': list(pseudo_rule_manage_id_dic.keys()),
'stagingPseudoTargetList': staging_pseudo_target,
}
logger.logic_log('LOSI00002', json.dumps(log_data, ensure_ascii=False), request=request)
return render(request, 'rule/rule.html', data)
def _filter_staging_rule(staging_list):
"""
ステージング適用ルールを画面に表示するために並ベ替える。
_select_staging()でのみ呼ばれる。
[引数]
staging_list: _select()で取得したステージングのデータ
[戻り値]
present_list: 現在のステージングルールのリスト
history_list: 過去のステージングルールのリスト
"""
logger.logic_log('LOSI00001', 'staging_list count: %s' % len(staging_list))
present_list = []
history_list = []
found_normalstatus_rule_type_ids = []
normalstatus = RuleDefs.STAGING_VALIDATE_STATUSES + [defs.RULE_STS_OPERATION.STAGING_END]
# ステージング適用ルールを並び替える
for s in staging_list:
# 正常なルールが見つかっていたら履歴のリストに積む
# それ以外は運用ステータスが正常/異常にかかわらず現在のリストに積む
if s['rule_type_id'] in found_normalstatus_rule_type_ids:
history_list.append(s)
else:
present_list.append(s)
# 正常なルールが見つかったら、それ以降の同じルールは
# 過去リストに積めるようにルール種別IDを保存する
if s['operation_status_id'] in normalstatus:
found_normalstatus_rule_type_ids.append(s['rule_type_id'])
logger.logic_log('LOSI00002', '[staging_list] present_list count: %s, history_list count: %s' % (len(present_list), len(history_list)))
return present_list, history_list
def _get_testrequest_ids(rule_staging_list):
"""
ステージング状態のルールから、テストリクエスト対象のレコードを取得。
処理成功かつステージングであるものがテストリクエスト対象
[引数]
rule_staging_list: _select_staging()で取得したデータ
[戻り値]
pseudo_rule_manage_id_dic:
key=rule_manage_id(これが必要)
value="" (key取れればvalueは何でもいい)
"""
logger.logic_log('LOSI00001', 'rule_staging_list count: %s' % len(rule_staging_list))
# 処理成功かつステージングであるものが擬似呼対象
pseudo_rule_manage_id_dic ={
r['rule_manage_id']: ""
for r in rule_staging_list
if r['system_status_id'] in RuleDefs.STAGING_OK_STATUSES \
and r['operation_status_id'] in RuleDefs.STAGING_VALIDATE_STATUSES
}
logger.logic_log('LOSI00002', ' pseudo_rule_manage_id_dic count: %s' % len(pseudo_rule_manage_id_dic))
return pseudo_rule_manage_id_dic
def _get_production_appling_rule_id(rule_staging_list):
"""
プロダクション適用待ちルールID取得
[引数]
rule_staging_list: _select_production()で取得したデータ
[戻り値]
apply_rule_manage_id_dic: プロダクション適用待ちルールid
"""
logger.logic_log('LOSI00001', 'rule_staging_list count: %s' % len(rule_staging_list))
apply_rule_manage_id_dic = {}
for rs in rule_staging_list:
# プロダクション適用前かどうか
pro_teki_cnt = RuleManage.objects.filter(
rule_type_id=rs['rule_type_id'],
request_type_id=defs.PRODUCTION,
rule_file_id=rs['rule_file_id']
).exclude(
system_status=defs.RULE_STS_SYSTEM.PRODUCT_NG
).count()
if pro_teki_cnt == 0 and rs['operation_status_id'] == defs.RULE_STS_OPERATION.STAGING:
apply_rule_manage_id_dic[rs['rule_manage_id']] = rs['rule_type_id']
logger.logic_log('LOSI00002', ' apply_rule_manage_id_dic count: %s' % len(apply_rule_manage_id_dic))
return apply_rule_manage_id_dic
def _get_staging_pseudo_targets(rule_staging_list, pseudo_rule_manage_id_dic, lang):
"""
擬似呼画面選択用ルール種別(staging)
[引数]
rule_staging_list: _select_staging()で取得したデータ
pseudo_rule_manage_id_dic: プロダクション適用前データ
[戻り値]
staging_pseudo_target: テストリクエスト可能なルール
staging_pseudo_target_rule_type: staging_pseudo_targetの条件式
"""
logger.logic_log('LOSI00001', 'rule_staging_list count: %s, pseudo_rule_manage_id_dic:%s' % (len(rule_staging_list), len(pseudo_rule_manage_id_dic)))
# 条件式マスタから入力例を取得
ce_list = ConditionalExpression.objects.all()
examples = {
ce.conditional_expression_id : get_message(ce.example, lang, showMsgId=False)
for ce in ce_list
}
staging_pseudo_target_rule_type = {}
staging_pseudo_target = {'': ''}
for u in rule_staging_list:
if u['rule_manage_id'] in pseudo_rule_manage_id_dic:
staging_pseudo_target[u['rule_manage_id']] = u['rule_type_name']
if not u['rule_type_id'] in staging_pseudo_target_rule_type:
conditions = list(DataObject.objects.filter(rule_type_id=u['rule_type_id']).order_by('data_object_id'))
# 重複ラベルは入力例が同じなので上書きを認める
condition_dict = {
c.conditional_name : examples[c.conditional_expression_id]
for c in conditions
}
staging_pseudo_target_rule_type[u['rule_type_id']] = condition_dict
logger.logic_log('LOSI00002', ' staging_pseudo_target count: %s, staging_pseudo_target_rule_type count: %s' % (len(staging_pseudo_target), len(staging_pseudo_target_rule_type)))
return staging_pseudo_target, staging_pseudo_target_rule_type
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
def rule_staging(request):
"""
[メソッド概要]
ステージングのデータ取得
"""
logger.logic_log('LOSI00001', 'None', request=request)
now = datetime.datetime.now(pytz.timezone('UTC'))
msg = ''
lang = request.user.get_lang_mode()
# 参照以上の権限を持つルール種別IDを取得し、フィルター条件に追加
perm_type_stg = request.user_config.get_menu_auth_type(MENU_ID_STG)
perm_info_stg = request.user_config.get_activerule_auth_type(MENU_ID_STG)
rule_ids_stg_admin = perm_info_stg[defs.ALLOWED_MENTENANCE]
perm_type_prd = request.user_config.get_menu_auth_type(MENU_ID_PRD)
perm_info_prd = request.user_config.get_activerule_auth_type(MENU_ID_PRD)
rule_ids_prd_admin = perm_info_prd[defs.ALLOWED_MENTENANCE]
staging_list = []
staging_history_list = []
pseudo_rule_manage_id_dic = {}
apply_rule_manage_id_dic = {}
staging_pseudo_target = {}
staging_pseudo_target_rule_type = {}
try:
filters = {}
if request and request.method == 'POST':
filters = request.POST.get('filters', "{}")
filters = json.loads(filters)
# ステージングのデータ取得
staging_list, staging_history_list = _select_staging(filters, perm_info_stg, request)
# プロダクション適用前データ
pseudo_rule_manage_id_dic = _get_testrequest_ids(staging_list)
# テストリクエスト対象データ
apply_rule_manage_id_dic = _get_production_appling_rule_id(staging_list)
# 条件式マスタから入力例を取得
staging_pseudo_target, staging_pseudo_target_rule_type = _get_staging_pseudo_targets(staging_list, pseudo_rule_manage_id_dic, lang)
except:
msg = get_message('MOSJA12000', request.user.get_lang_mode())
logger.system_log('LOSM12000', traceback.format_exc(), request=request)
disp_staging_sts_operation = {
k : get_message(v,request.user.get_lang_mode(), showMsgId=False)
for k,v in RuleDefs.DISP_STAGING_STS_OPERATION.items()
}
data = {
'msg': msg,
'now': now,
'staging_list': staging_list,
'staging_history_list': staging_history_list,
'apply_rule_manage_id_dic': apply_rule_manage_id_dic,
'pseudo_rule_manage_id_dic': pseudo_rule_manage_id_dic,
'disp_staging_sts_operation': disp_staging_sts_operation,
'stagingPseudoTargetList': staging_pseudo_target,
'stagingPseudoTargetRuleTypeList': staging_pseudo_target_rule_type,
'rule_ids_stg': rule_ids_stg_admin,
'rule_ids_prd': rule_ids_prd_admin,
'permission_type_stg': perm_type_stg,
'permission_type_prd': perm_type_prd,
'lang_mode': request.user.get_lang_mode(),
}
log_data = {
'staging_list_cnt': len(staging_list),
'apply_rule_manage_ids': list(apply_rule_manage_id_dic.keys()),
'pseudo_rule_manage_ids': list(pseudo_rule_manage_id_dic.keys()),
'stagingPseudoTargetList': staging_pseudo_target,
}
logger.logic_log('LOSI00002', json.dumps(log_data, ensure_ascii=False), request=request)
return render(request, 'rule/rule_staging_data.html', data)
@check_allowed_auth(MENU_ID_PRD, defs.MENU_CATEGORY.ALLOW_EVERY)
def rule_production(request):
"""
[メソッド概要]
プロダクションのデータ取得
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
product_list = []
history_list = []
rule_ids_prd_admin = []
permission_type_prd = request.user_config.get_menu_auth_type(MENU_ID_PRD)
try:
filters = {}
if request and request.method == 'POST':
filters = request.POST.get('filters', "{}")
filters = json.loads(filters)
# 参照以上の権限を持つルール種別IDを取得し、フィルター条件に追加
perm_info_prd = request.user_config.get_activerule_auth_type(MENU_ID_PRD)
rule_ids_prd_admin = perm_info_prd[defs.ALLOWED_MENTENANCE]
product_list, history_list = _select_production(filters, perm_info_prd, request)
except:
msg = get_message('MOSJA12000', request.user.get_lang_mode())
logger.system_log('LOSM12000', traceback.format_exc(), request=request)
data = {
'msg': msg,
'product_list': product_list,
'history_list': history_list,
'rule_ids_prd': rule_ids_prd_admin,
'permission_type_prd': permission_type_prd,
'lang_mode': request.user.get_lang_mode(),
}
log_data = {
'product_list_cnt': len(product_list) + len(history_list),
}
logger.logic_log('LOSI00002', json.dumps(log_data, ensure_ascii=False), request=request)
return render(request, 'rule/rule_production_data.html', data)
@check_allowed_auth(MENU_ID_PRD, defs.MENU_CATEGORY.ALLOW_EVERY)
def rule_history(request):
"""
[メソッド概要]
プロダクション適用履歴のデータ取得
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
try:
filters = {}
if request and request.method == 'POST':
filters = request.POST.get('filters', "{}")
filters = json.loads(filters)
# 参照以上の権限を持つルール種別IDを取得し、フィルター条件に追加
permission_info_prd = request.user_config.get_activerule_auth_type(MENU_ID_PRD)
rule_ids_prd = []
rule_ids_prd_view = permission_info_prd[defs.VIEW_ONLY]
rule_ids_prd_admin = permission_info_prd[defs.ALLOWED_MENTENANCE]
rule_ids_prd.extend(rule_ids_prd_view)
rule_ids_prd.extend(rule_ids_prd_admin)
if 'rule_type_id' not in filters:
filters['rule_type_id'] = {}
if 'LIST' not in filters['rule_type_id']:
filters['rule_type_id']['LIST'] = []
filters['rule_type_id']['LIST'].extend(rule_ids_stg)
rule_history_list = _select(filters, request)
except:
msg = get_message('MOSJA12000', request.user.get_lang_mode())
logger.system_log('LOSM12000', traceback.format_exc(), request=request)
data = {
'msg': msg,
'history_list': rule_history_list,
'rule_ids_prd': rule_ids_prd_admin,
'lang_mode': request.user.get_lang_mode(),
}
log_data = {
'history_list_cnt': len(rule_history_list),
}
logger.logic_log('LOSI00002', json.dumps(log_data, ensure_ascii=False), request=request)
return render(request, 'rule/rule_history_data.html', data)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
@require_POST
def rule_pseudo_request(request, rule_type_id):
"""
[メソッド概要]
テストリクエスト実行時のリクエストを処理する
"""
logger.logic_log('LOSI00001', 'None', request=request)
err_flg = 1
msg = ''
time_zone = settings.TIME_ZONE
now = datetime.datetime.now(pytz.timezone('UTC'))
reception_dt = TimeConversion.get_time_conversion(now, time_zone, request=request)
trace_id = ''
event_dt = '----/--/-- --:--:--'
req_list = []
try:
with transaction.atomic():
json_str = request.POST.get('json_str', None)
post_data = json.loads(json_str)
rule_table_name = post_data[EventsRequestCommon.KEY_RULETYPE]
eventdatetime = post_data[EventsRequestCommon.KEY_EVENTTIME]
eventinfo = post_data[EventsRequestCommon.KEY_EVENTINFO]
if json_str is None:
msg = get_message('MOSJA12002', request.user.get_lang_mode())
logger.user_log('LOSM12007', request=request)
raise Exception()
rt = RuleType.objects.get(rule_type_id=rule_type_id)
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if rt.rule_type_id not in rule_ids:
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12035', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':rt.rule_type_name}, log_params=['Send Request', rt.rule_type_id, rule_ids])
# 入力された情報のバリデーション
errmsg_list = []
_validate_eventdatetime(eventdatetime, errmsg_list, request.user.get_lang_mode())
_validate_eventinfo(rule_type_id, eventinfo, errmsg_list, request.user.get_lang_mode())
if len(errmsg_list):
msg = '\n'.join(errmsg_list) + '\n'
logger.system_log('LOSM12064', 'post_data:%s' % (post_data))
raise Exception()
# RestApiにリクエストを投げる
tkn = _get_token(now)
scheme = urlsplit(request.build_absolute_uri(None)).scheme
url = scheme + '://127.0.0.1:' + request.META['SERVER_PORT'] + reverse('web_app:event:eventsrequest')
r = requests.post(
url,
headers={
'content-type' : 'application/json',
'Authorization' : 'Bearer %s' % (tkn),
},
data=json_str.encode('utf-8'),
verify=False
)
# レスポンスからデータを取得
try:
r_content = json.loads(r.content.decode('utf-8'))
except json.JSONDecodeError:
msg = get_message('MOSJA12012', request.user.get_lang_mode())
logger.user_log('LOSM12052')
raise
# テストリクエストの実行中に失敗した場合
if not r_content["result"]:
msg = r_content["msg"]
logger.user_log('LOSM12001', traceback.format_exc())
raise
trace_id = r_content["trace_id"]
# 該当ルール種別をロック
data_obj_list = DataObject.objects.filter(rule_type_id=rt.pk).order_by('data_object_id')
label_list = []
conditional_name_list = []
for a in data_obj_list:
if a.label not in label_list:
label_list.append(a.label)
conditional_name_list.append(a.conditional_name)
# 実行ログに表示するためのデータ作成
req_list = [
{'conditional_name':conditional_name_list[i], 'value':v}
for i, v in enumerate(eventinfo)
]
event_dt = TimeConversion.get_time_conversion_utc(eventdatetime, time_zone, request=request)
event_dt = TimeConversion.get_time_conversion(event_dt, time_zone, request=request)
err_flg = 0
msg = get_message('MOSJA12007', request.user.get_lang_mode(), showMsgId=False)
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'msg': msg,
'log_msg': msg,
'trace_id': trace_id,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
return HttpResponse(resp_json, status=None)
except Exception as e:
logger.system_log('LOSM12050', traceback.format_exc(), request=request)
if not msg:
msg = get_message('MOSJA12001', request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'msg': get_message('MOSJA12023', request.user.get_lang_mode()),
'log_msg': msg,
'trace_id': trace_id,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
return HttpResponse(resp_json, status=None)
msg = makePseudoCallMessage(msg, reception_dt, event_dt, req_list, request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'msg': get_message('MOSJA12024', request.user.get_lang_mode(), showMsgId=False),
'log_msg': msg,
'trace_id': trace_id,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_ADMIN)
@require_POST
def rule_change_status(request):
"""
[メソッド概要]
ステージング適用ルールの運用ステータス変更
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
err_flg = 1
try:
# パラメーターチェック
status = request.POST.get('status', None)
rule_manage_id = request.POST.get('rule_manage_id', None)
if status is None or rule_manage_id is None:
msg = get_message('MOSJA12002', request.user.get_lang_mode())
logger.user_log('LOSM03005', status, rule_manage_id, request=request)
raise Exception()
status = int(status)
rule_manage_id = int(rule_manage_id)
logger.logic_log('LOSI03000', 'rule_manage_id:%s, status:%s' % (rule_manage_id, status), request=request)
# リクエストステータスの妥当性チェック
if status not in RuleDefs.DISP_STAGING_STS_OPERATION:
msg = get_message('MOSJA12002', request.user.get_lang_mode())
logger.user_log('LOSM03001', status, RuleDefs.DISP_STAGING_STS_OPERATION, request=request)
raise Exception()
with transaction.atomic():
# 該当ルール適用管理テーブルをロック
rule_manage = RuleManage.objects.select_for_update().get(pk=rule_manage_id)
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_ADMIN:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if rule_manage.rule_type_id not in rule_ids:
ruletypename = RuleType.objects.get(rule_type_id=rule_manage.rule_type_id).rule_type_name
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12118', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletypename}, log_params=['Change Status', rule_manage.rule_type_id, rule_ids])
# 対象ルールの状態チェック
if rule_manage.request_type_id != defs.STAGING:
msg = get_message('MOSJA12010', request.user.get_lang_mode())
logger.user_log('LOSI03001', 'req_type:%s, expect_type:%s' % (rule_manage.request_type_id, defs.STAGING), request=request)
raise Exception()
if rule_manage.system_status not in RuleDefs.STAGING_OK_STATUSES:
msg = get_message('MOSJA12010', request.user.get_lang_mode())
logger.user_log('LOSI03001', 'sys_sts:%s, expect_sts:%s' % (rule_manage.system_status, RuleDefs.STAGING_OK_STATUSES), request=request)
raise Exception()
if rule_manage.operation_status not in RuleDefs.STAGING_VALIDATE_STATUSES:
msg = get_message('MOSJA12010', request.user.get_lang_mode())
logger.user_log('LOSI03001', 'ope_sts:%s, expect_sts:%s' % (rule_manage.operation_status, RuleDefs.STAGING_VALIDATE_STATUSES), request=request)
raise Exception()
pro_flag = False
rcnt = RuleManage.objects.filter(
rule_type_id=rule_manage.rule_type_id,
request_type_id=defs.PRODUCTION,
rule_file_id=rule_manage.rule_file_id
).exclude(
system_status=defs.RULE_STS_SYSTEM.PRODUCT_NG
).count()
if rcnt == 0 and rule_manage.operation_status == defs.RULE_STS_OPERATION.STAGING:
pro_flag = True
if pro_flag == False and rule_manage.operation_status == defs.RULE_STS_OPERATION.STAGING:
msg = get_message('MOSJA12011', request.user.get_lang_mode())
logger.user_log('LOSI03001', 'pro_count:%s, rule_file_id:%s' % (rcnt, rule_manage.rule_file_id), request=request)
raise Exception()
# 状態更新
rule_manage.operation_status = status
rule_manage.last_update_timestamp = datetime.datetime.now(pytz.timezone('UTC'))
rule_manage.last_update_user = request.user.user_name
rule_manage.save()
msg = get_message('MOSJA12008', request.user.get_lang_mode(), showMsgId=False)
err_flg = 0
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
except RuleManage.DoesNotExist:
logger.system_log('LOSM12002', traceback.format_exc(), request=request)
msg = get_message('MOSJA12009', request.user.get_lang_mode())
except Exception as e:
logger.system_log('LOSM12002', traceback.format_exc(), request=request)
if not msg:
msg = get_message('MOSJA12001', request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'msg': msg,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
@require_POST
def rule_get_record(request):
"""
[メソッド概要]
ステージング適用ルール詳細に表示されるデータを取得する
"""
logger.logic_log('LOSI00001', 'None', request=request)
data = {}
msg = ''
err_flg = 0
time_zone = settings.TIME_ZONE
try:
# パラメーターチェック
rule_manage_id = request.POST.get('rule_manage_id', None)
if rule_manage_id is None:
msg = get_message('MOSJA12002', request.user.get_lang_mode())
raise Exception()
rule_manage_id = int(rule_manage_id)
logger.logic_log('LOSI03000', 'rule_manage_id:%s' % (rule_manage_id), request=request)
rule_manage = RuleManage.objects.get(pk=rule_manage_id)
rule_file_name = RuleFile.objects.get(rule_file_id=rule_manage.rule_file_id).rule_file_name
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if rule_manage.rule_type_id not in rule_ids:
ruletypename = RuleType.objects.get(rule_type_id=rule_manage.rule_type_id).rule_type_name
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12035', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletypename}, log_params=['Select Rule', rule_manage.rule_type_id, rule_ids])
# ステージング権限
permission_info_stg = request.user_config.get_activerule_auth_type(MENU_ID_STG)
rule_ids_stg_admin = permission_info_stg[defs.ALLOWED_MENTENANCE]
except RuleManage.DoesNotExist:
logger.system_log('LOSM12054', traceback.format_exc(), request=request)
msg = get_message('MOSJA12009', request.user.get_lang_mode())
err_flg = 1
except RuleFile.DoesNotExist:
logger.system_log('LOSM12054', traceback.format_exc(), request=request)
msg = get_message('MOSJA12009', request.user.get_lang_mode())
err_flg = 1
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
err_flg = 1
except Exception as e:
logger.system_log('LOSM12054', traceback.format_exc(), request=request)
err_flg = 1
if not msg:
msg = get_message('MOSJA12001', request.user.get_lang_mode())
if err_flg == 0:
# グループ情報取得
_, rule_type_dict = _getRuleTypeData(request)
operation_status_str = get_message(RuleDefs.MST_STS_OPERATION[rule_manage.operation_status],request.user.get_lang_mode(), showMsgId=False)
system_status_str = get_message(RuleDefs.MST_STS_SYSTEM[rule_manage.system_status],request.user.get_lang_mode(), showMsgId=False)
if request.user.get_lang_mode() == 'EN':
last_update_timestamp = rule_manage.last_update_timestamp.astimezone(pytz.timezone(time_zone)).strftime('%Y, %m, %d, %H:%M')
else:
last_update_timestamp = rule_manage.last_update_timestamp.astimezone(pytz.timezone(time_zone)).strftime('%Y年%m月%d日%H:%M')
data = {
'rule_type_id': rule_manage.rule_type_id,
'rule_type_name': rule_type_dict[rule_manage.rule_type_id]['name'],
'rule_table_name': rule_type_dict[rule_manage.rule_type_id]['table'],
'filename': rule_file_name,
'operation_status_id': rule_manage.operation_status,
'operation_status_str': operation_status_str,
'system_status_str': system_status_str,
'rule_ids_stg': rule_ids_stg_admin,
'last_update_user_name': rule_manage.last_update_user,
'last_update_timestamp': last_update_timestamp,
}
resp_json = {
'data': data,
'err_flg': err_flg,
'msg': msg,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
def rule_polling(request, rule_manage_id, trace_id):
"""
[メソッド概要]
テストリクエスト実行中のポーリングリクエストを処理する
"""
logger.logic_log('LOSI00001', 'trace_id:%s, manage_id:%s' % (trace_id, rule_manage_id), request=request)
resp_json = {}
err_flg = 1
is_finish = RuleDefs.RULE_FINISH_STS_NG
msg = ''
add_msg = ''
reception_dt = '----/--/-- --:--:--'
event_dt = '----/--/-- --:--:--'
req_list = []
flg = False
time_zone = settings.TIME_ZONE
try:
with transaction.atomic():
events_request = EventsRequest.objects.get(trace_id=trace_id)
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if events_request.rule_type_id not in rule_ids:
ruletypename = RuleType.objects.get(rule_type_id=events_request.rule_type_id).rule_type_name
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12035', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletypename}, log_params=['Polling', events_request.rule_type_id, rule_ids])
# テストリクエスト情報を取得
evinfo = ast.literal_eval(events_request.event_info)
evinfo = evinfo['EVENT_INFO'] if 'EVENT_INFO' in evinfo else []
rset = DataObject.objects.filter(rule_type_id=events_request.rule_type_id).order_by('data_object_id')
label_list = []
conditional_name_list = []
for a in rset:
if a.label not in label_list:
label_list.append(a.label)
conditional_name_list.append(a.conditional_name)
for rs, v in zip(conditional_name_list, evinfo):
req_list.append({'conditional_name':rs, 'value':v})
reception_dt = events_request.request_reception_time
reception_dt = TimeConversion.get_time_conversion(reception_dt, time_zone, request=request)
event_dt = events_request.event_to_time
event_dt = TimeConversion.get_time_conversion(event_dt, time_zone, request=request)
rule_info = RuleDefs.get_rulestatus_info(events_request.status, request.user.get_lang_mode())
is_finish = rule_info['is_finish']
msg = rule_info['msg']
ope_sts = rule_info['ope_sts']
# テストリクエスト終了の場合、状態を遷移させる
if is_finish != RuleDefs.RULE_FINISH_STS_EXE and ope_sts:
rule_manage = RuleManage.objects.select_for_update().get(pk=rule_manage_id)
if rule_manage.operation_status in RuleDefs.STAGING_VALIDATE_STATUSES:
rule_manage.operation_status = ope_sts
rule_manage.last_update_timestamp = datetime.datetime.now(pytz.timezone('UTC'))
rule_manage.last_update_user = request.user.user_name
rule_manage.save()
# 正常時は適用されたルールを取得する
if is_finish == RuleDefs.RULE_FINISH_STS_OK:
target_rule = ''
match_rulename = ''
rhdm_res = RhdmResponse.objects.get(trace_id=trace_id)
rhdm_res_acts = RhdmResponseAction.objects.filter(response_id=rhdm_res.response_id)
if len(rhdm_res_acts) > 0:
flg = True
if flg:
for r in rhdm_res_acts:
if match_rulename:
match_rulename = '%s, %s' % (match_rulename, r.rule_name)
else:
match_rulename = r.rule_name
for r in rhdm_res_acts:
try:
acttype = ActionType.objects.get(pk=r.action_type_id)
dritype = DriverType.objects.get(pk=acttype.driver_type_id)
except ActionType.DoesNotExist:
logger.user_log('LOSM03006', r.action_type_id, request=request)
msg = get_message('MOSJA12032', request.user.get_lang_mode())
raise
except DriverType.DoesNotExist:
logger.user_log('LOSM03007', acttype.driver_type_id, request=request)
msg = get_message('MOSJA12032', request.user.get_lang_mode())
raise
except Exception as e:
logger.user_log('LOSM03008', request=request)
msg = get_message('MOSJA12032', request.user.get_lang_mode())
raise
tmp_actparainfo = json.loads(r.action_parameter_info)
for i in range(len(tmp_actparainfo['ACTION_PARAMETER_INFO'])):
if i == 0:
actparainfo = tmp_actparainfo['ACTION_PARAMETER_INFO'][i]
else:
actparainfo = '%s, %s' % (actparainfo, tmp_actparainfo['ACTION_PARAMETER_INFO'][i])
name = dritype.name + '(ver' + str(dritype.driver_major_version) + ')'
if not r.action_pre_info:
actpreinfo = get_message('MOSJA12154', request.user.get_lang_mode(), showMsgId=False)
else:
actpreinfo = get_message('MOSJA12155', request.user.get_lang_mode(), showMsgId=False)
rhdm_res_cor = RhdmResponseCorrelation.objects.filter(
rule_name=r.rule_name, request_type_id=defs.STAGING).order_by(
'last_update_timestamp').reverse().first()
target_rule = r.rule_name
target_execution = r.execution_order
target_drivertype = name
target_actparainfo = actparainfo
target_actpreinfo = actpreinfo
target_retry_interval = r.action_retry_interval
target_retry_count = r.action_retry_count
target_stop_interval = r.action_stop_interval
target_stop_count = r.action_stop_count
if rhdm_res_cor != None:
target_cond_count = rhdm_res_cor.cond_count
target_cond_term = rhdm_res_cor.cond_term
target_large_group = rhdm_res_cor.cond_large_group
target_large_group_priority = rhdm_res_cor.cond_large_group_priority
target_small_group = rhdm_res_cor.cond_small_group
target_small_group_priority = rhdm_res_cor.cond_small_group_priority
else:
target_cond_count = 'X'
target_cond_term = 'X'
target_large_group = 'X'
target_large_group_priority = 'X'
target_small_group = 'X'
target_small_group_priority = 'X'
if add_msg:
add_msg += '\n\n'
add_msg = add_msg + get_message('MOSJA12122', request.user.get_lang_mode(), showMsgId=False) + target_rule + ' \n'
add_msg = add_msg + get_message('MOSJA12123', request.user.get_lang_mode(), showMsgId=False) + str(target_execution) + ' \n'
add_msg = add_msg + get_message('MOSJA12124', request.user.get_lang_mode(), showMsgId=False) + target_drivertype + ' \n'
add_msg = add_msg + get_message('MOSJA12125', request.user.get_lang_mode(), showMsgId=False) + target_actparainfo + ' \n'
add_msg = add_msg + get_message('MOSJA12126', request.user.get_lang_mode(), showMsgId=False) + target_actpreinfo + ' \n'
add_msg = add_msg + get_message('MOSJA12181', request.user.get_lang_mode(), showMsgId=False) + str(target_retry_interval) + ' \n'
add_msg = add_msg + get_message('MOSJA12182', request.user.get_lang_mode(), showMsgId=False) + str(target_retry_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12183', request.user.get_lang_mode(), showMsgId=False) + str(target_stop_interval) + ' \n'
add_msg = add_msg + get_message('MOSJA12184', request.user.get_lang_mode(), showMsgId=False) + str(target_stop_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12185', request.user.get_lang_mode(), showMsgId=False) + str(target_cond_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12186', request.user.get_lang_mode(), showMsgId=False) + str(target_cond_term) + ' \n'
add_msg = add_msg + get_message('MOSJA12187', request.user.get_lang_mode(), showMsgId=False) + target_large_group + ' \n'
add_msg = add_msg + get_message('MOSJA12188', request.user.get_lang_mode(), showMsgId=False) + str(target_large_group_priority) + ' \n'
add_msg = add_msg + get_message('MOSJA12189', request.user.get_lang_mode(), showMsgId=False) + target_small_group + ' \n'
add_msg = add_msg + get_message('MOSJA12190', request.user.get_lang_mode(), showMsgId=False) + str(target_small_group_priority) + ' \n'
else:
add_msg = get_message('MOSJA12127', request.user.get_lang_mode(), showMsgId=False, match_rulename=match_rulename) + '\n\n'
add_msg = add_msg + get_message('MOSJA12141', request.user.get_lang_mode(), showMsgId=False) + '\n'
add_msg = add_msg + get_message('MOSJA12122', request.user.get_lang_mode(), showMsgId=False) + target_rule + ' \n'
add_msg = add_msg + get_message('MOSJA12123', request.user.get_lang_mode(), showMsgId=False) + str(target_execution) + ' \n'
add_msg = add_msg + get_message('MOSJA12124', request.user.get_lang_mode(), showMsgId=False) + target_drivertype + ' \n'
add_msg = add_msg + get_message('MOSJA12125', request.user.get_lang_mode(), showMsgId=False) + target_actparainfo + ' \n'
add_msg = add_msg + get_message('MOSJA12126', request.user.get_lang_mode(), showMsgId=False) + target_actpreinfo + ' \n'
add_msg = add_msg + get_message('MOSJA12181', request.user.get_lang_mode(), showMsgId=False) + str(target_retry_interval) + ' \n'
add_msg = add_msg + get_message('MOSJA12182', request.user.get_lang_mode(), showMsgId=False) + str(target_retry_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12183', request.user.get_lang_mode(), showMsgId=False) + str(target_stop_interval) + ' \n'
add_msg = add_msg + get_message('MOSJA12184', request.user.get_lang_mode(), showMsgId=False) + str(target_stop_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12185', request.user.get_lang_mode(), showMsgId=False) + str(target_cond_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12186', request.user.get_lang_mode(), showMsgId=False) + str(target_cond_term) + ' \n'
add_msg = add_msg + get_message('MOSJA12187', request.user.get_lang_mode(), showMsgId=False) + target_large_group + ' \n'
add_msg = add_msg + get_message('MOSJA12188', request.user.get_lang_mode(), showMsgId=False) + str(target_large_group_priority) + ' \n'
add_msg = add_msg + get_message('MOSJA12189', request.user.get_lang_mode(), showMsgId=False) + target_small_group + ' \n'
add_msg = add_msg + get_message('MOSJA12190', request.user.get_lang_mode(), showMsgId=False) + str(target_small_group_priority) + ' \n'
else:
target_rule = 'X'
target_execution = 'X'
target_drivertype = 'X'
target_actparainfo = 'X'
target_actpreinfo = 'X'
target_retry_interval = 'X'
target_retry_count = 'X'
target_stop_interval = 'X'
target_stop_count = 'X'
target_cond_count = 'X'
target_cond_term = 'X'
target_large_group = 'X'
target_large_group_priority = 'X'
target_small_group = 'X'
target_small_group_priority = 'X'
add_msg = get_message('MOSJA12127', request.user.get_lang_mode(), showMsgId=False, match_rulename=match_rulename) + '\n\n'
add_msg = add_msg + get_message('MOSJA12141', request.user.get_lang_mode(), showMsgId=False) + '\n'
add_msg = add_msg + get_message('MOSJA12122', request.user.get_lang_mode(), showMsgId=False) + target_rule + ' \n'
add_msg = add_msg + get_message('MOSJA12123', request.user.get_lang_mode(), showMsgId=False) + str(target_execution) + ' \n'
add_msg = add_msg + get_message('MOSJA12124', request.user.get_lang_mode(), showMsgId=False) + target_drivertype + ' \n'
add_msg = add_msg + get_message('MOSJA12125', request.user.get_lang_mode(), showMsgId=False) + target_actparainfo + ' \n'
add_msg = add_msg + get_message('MOSJA12126', request.user.get_lang_mode(), showMsgId=False) + target_actpreinfo + ' \n'
add_msg = add_msg + get_message('MOSJA12181', request.user.get_lang_mode(), showMsgId=False) + str(target_retry_interval) + ' \n'
add_msg = add_msg + get_message('MOSJA12182', request.user.get_lang_mode(), showMsgId=False) + str(target_retry_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12183', request.user.get_lang_mode(), showMsgId=False) + str(target_stop_interval) + ' \n'
add_msg = add_msg + get_message('MOSJA12184', request.user.get_lang_mode(), showMsgId=False) + str(target_stop_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12185', request.user.get_lang_mode(), showMsgId=False) + str(target_cond_count) + ' \n'
add_msg = add_msg + get_message('MOSJA12186', request.user.get_lang_mode(), showMsgId=False) + str(target_cond_term) + ' \n'
add_msg = add_msg + get_message('MOSJA12187', request.user.get_lang_mode(), showMsgId=False) + target_large_group + ' \n'
add_msg = add_msg + get_message('MOSJA12188', request.user.get_lang_mode(), showMsgId=False) + str(target_large_group_priority) + ' \n'
add_msg = add_msg + get_message('MOSJA12189', request.user.get_lang_mode(), showMsgId=False) + target_small_group + ' \n'
add_msg = add_msg + get_message('MOSJA12190', request.user.get_lang_mode(), showMsgId=False) + str(target_small_group_priority) + ' \n'
err_flg = 0
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
is_finish = RuleDefs.RULE_FINISH_STS_NG
except Exception:
logger.system_log('LOSM12000', traceback.format_exc(), request=request)
is_finish = RuleDefs.RULE_FINISH_STS_NG
if not msg:
msg = get_message('MOSJA12001', request.user.get_lang_mode())
msg = makePseudoCallMessage(msg, reception_dt, event_dt, req_list, request.user.get_lang_mode(), add_msg)
resp_json = {
'err_flg': err_flg,
'is_finish': is_finish,
'msg': msg,
'trace_id': trace_id,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_ADMIN)
@require_POST
def rule_upload(request):
"""
[メソッド概要]
ルールファイルアップロードを処理する
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
filepath = ''
response_data = {}
lang = request.user.get_lang_mode()
try:
####################################################
# アップロードファイルチェック
####################################################
# ファイル名チェック
rulefile = request.FILES['rulefile']
filename = rulefile.name
if not rulefile or not filename:
msg = get_message('MOSJA03004', lang)
logger.system_log('LOSM12013', request=request)
raise Exception()
# 拡張子チェック
if not (filename.endswith('.xls') or filename.endswith('.xlsx')):
msg = get_message('MOSJA03005', lang)
logger.system_log('LOSM12014', request=request)
raise Exception()
# サイズチェック
if rulefile.size > RuleDefs.MAX_RULEFILE_SIZE:
msg = get_message('MOSJA03006', lang, maxbytes=RuleDefs.MAX_MB_SIZE)
logger.system_log('LOSM12015', RuleDefs.MAX_MB_SIZE, request=request)
raise Exception()
# チェックOKの場合、ファイルデータ取得
filedata = ''
for chunk in rulefile.chunks():
filedata = '%s%s' % (filedata, base64.b64encode(chunk).decode('utf-8'))
####################################################
# アップロードファイルからルール種別IDを判断する
####################################################
# パスが存在しない場合、ディレクトリを作成
temppath = RuleDefs.FILE_TEMP_PATH
os.makedirs(temppath, exist_ok=True)
# 一意のファイル名で保存
tempname = '%s%s' % (str(uuid.uuid4()), request.user.user_id)
filepath = '%s/%s' % (temppath, tempname)
with open(filepath, 'wb') as fp:
fp.write(base64.b64decode(filedata.encode('utf-8')))
# Excelからルールテーブル名を取得
error_flag, ret_str = DecisionTableComponent.get_tablename_by_excel(filepath)
if error_flag == 1:
msg = get_message(ret_str, lang)
logger.logic_log('LOSM03004', '%s%s' % (temppath, rulefile.name), request=request)
raise Exception()
else:
table_name = ret_str
# ルールテーブル名からルール種別IDを取得
ruletype = RuleType.objects.get(rule_table_name=table_name)
ruletypeid = ruletype.rule_type_id
# 一時ファイル削除
os.remove(filepath)
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_ADMIN:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
if ruletypeid not in rule_ids:
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12038', lang, showMsgId=False), 'rule_type_name':ruletype.rule_type_name}, log_params=['Upload', ruletypeid, rule_ids])
# 適用君へアップロード要求
send_data = {
'request' : 'UPLOAD',
'ruletypeid' : ruletypeid,
'filename' : filename,
'filedata' : filedata,
'upload_user_id' : request.user.user_id,
}
result, msgid = RequestToApply.operate(send_data, request=request)
# resultとmsgを変換
if result:
response_data['result'] = 'OK'
response_data['msg'] = get_message(msgid, lang, showMsgId=False)
else:
response_data['result'] = 'NG'
response_data['msg'] = get_message(msgid, lang)
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, lang, **(e.arg_dict))
else:
msg = get_message(e.msg_id, lang)
response_data['result'] = 'NG'
response_data['msg'] = msg
if filepath and os.path.exists(filepath):
os.remove(filepath)
except Exception as e:
logger.system_log('LOSM12003', traceback.format_exc(), request=request)
response_data['result'] = 'NG'
response_data['msg'] = msg if msg else get_message('MOSJA03007', lang)
if filepath and os.path.exists(filepath):
os.remove(filepath)
resp_json = json.dumps(response_data, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json, status=None)
@check_allowed_auth([MENU_ID_STG, MENU_ID_PRD], defs.MENU_CATEGORY.ALLOW_EVERY)
def rule_download(request, rule_manage_id):
"""
[メソッド概要]
ルールファイルダウンロードを処理する
"""
logger.logic_log('LOSI00001', 'None', request=request)
content_type='application/excel'
try:
file_name_expansion = ''
if '_testrequest' in rule_manage_id:
tmp_list = rule_manage_id.split('_')
rule_manage_id = tmp_list[0]
file_name_expansion = '_testrequest.xlsx'
# ルール種別判定
rule_manage = RuleManage.objects.get(pk=int(rule_manage_id))
ruletypeid = rule_manage.rule_type_id
send_data = {
'request': 'DOWNLOAD',
'ruletypeid': ruletypeid,
'rule_manage_id': rule_manage_id,
'file_name_expansion': file_name_expansion,
}
# ルール別アクセス権限チェック
menu_id = 0
if rule_manage.request_type_id == defs.PRODUCTION:
menu_id = MENU_ID_PRD
elif rule_manage.request_type_id == defs.STAGING:
menu_id = MENU_ID_STG
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(menu_id, chk_auth))
if ruletypeid not in rule_ids:
ruletypename = RuleType.objects.get(rule_type_id=ruletypeid).rule_type_name
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA00077', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletypename}, log_params=['Download', ruletypeid, rule_ids])
# 異常終了の場合はzipをリクエストする
if rule_manage.system_status in (
defs.RULE_STS_SYSTEM.UPLOAD_NG,
defs.RULE_STS_SYSTEM.BUILD_NG,
defs.RULE_STS_SYSTEM.STAGING_NG,
defs.RULE_STS_SYSTEM.PRODUCT_NG
):
send_data['request'] = 'DOWNLOAD_ZIP'
content_type = 'application/zip'
# 適用君へ新規ルール作成リクエスト送信
result, msgid, filename, filedata = RequestToApply.getfile(send_data, request=request)
# 適用君異常
if not result:
logger.logic_log('LOSM03003', msgid, request=request)
raise
response = HttpResponse(filedata, content_type=content_type)
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % (urllib.parse.quote(filename))
logger.logic_log('LOSI00002', 'filename=%s' % (filename), request=request)
return response
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
return HttpResponse(request, status=500)
except Exception as e:
logger.system_log('LOSM12004', traceback.format_exc(), request=request)
logger.logic_log('LOSI00002', e, request=request)
return HttpResponse(request, status=500)
@check_allowed_auth(MENU_ID_PRD, defs.MENU_CATEGORY.ALLOW_ADMIN)
def rule_switchback(request, rule_manage_id):
"""
[メソッド概要]
ルール切り戻しを処理する
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
response_data = {}
try:
# 切り戻しルールチェック
try:
ruleManage = RuleManage.objects.get(rule_manage_id=rule_manage_id)
rule_type_id = ruleManage.rule_type_id
ruletype = RuleType.objects.get(pk=rule_type_id)
except RuleManage.DoesNotExist:
msg = get_message('MOSJA03010', request.user.get_lang_mode())
logger.system_log('LOSM12016', request=request)
raise
except RuleType.DoesNotExist:
msg = get_message('MOSJA03011', request.user.get_lang_mode())
logger.system_log('LOSM12017', request=request)
raise
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_ADMIN:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_PRD, chk_auth))
if rule_type_id not in rule_ids:
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12046', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletype.rule_type_name}, log_params=['SwitchBack', rule_type_id, rule_ids])
# 切り戻しルールの状態チェック
if ruleManage.request_type_id != defs.PRODUCTION:
raise OASEError('MOSJA03001', 'LOSM03002', log_params=[ruleManage.request_type_id, defs.PRODUCTION])
if not _is_switchbackable(ruleManage.system_status, ruleManage.operation_status):
raise OASEError('MOSJA12033', 'LOSM03009', log_params=[ruleManage.system_status, [defs.RULE_STS_SYSTEM.PRODUCT_NG, defs.RULE_STS_SYSTEM.PRODUCT_OK], ruleManage.operation_status, [defs.RULE_STS_OPERATION.PRODUCT_NOAPPLY, defs.RULE_STS_OPERATION.PRODUCT_END]])
# 適用君へルール切り戻しリクエスト送信
send_data = {
'request': 'APPLY',
'ruletypeid': rule_type_id,
'groupid': ruletype.group_id,
'artifactid': ruletype.artifact_id,
'rule_file_id': ruleManage.rule_file_id,
'request_type_id': defs.PRODUCTION,
'apply_user_id': request.user.user_id,
'rule_manage_id': rule_manage_id,
}
result, msgid = RequestToApply.operate(send_data, request=request)
if result:
response_data['result'] = 'OK'
response_data['msg'] = get_message(msgid, request.user.get_lang_mode(), showMsgId=False)
else:
response_data['result'] = 'NG'
response_data['msg'] = get_message(msgid, request.user.get_lang_mode())
logger.system_log('LOSM12005', '', request=request)
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
response_data['result'] = 'NG'
response_data['msg'] = msg if msg else get_message('MOSJA03008', request.user.get_lang_mode())
except Exception:
response_data['result'] = 'NG'
response_data['msg'] = msg if msg else get_message('MOSJA03008', request.user.get_lang_mode())
logger.system_log('LOSM12005', traceback.format_exc(), request=request)
logger.logic_log('LOSI00002', response_data['result'], request=request)
return HttpResponse(json.dumps(response_data, ensure_ascii=False), status=None)
@check_allowed_auth(MENU_ID_PRD, defs.MENU_CATEGORY.ALLOW_ADMIN)
def rule_apply(request, rule_manage_id, request_type_id):
"""
[メソッド概要]
ルールプロダクション適用を処理する
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
response_data = {}
try:
# 適用前ルールチェック
if int(request_type_id) != defs.PRODUCTION:
msg = get_message('MOSJA03001', request.user.get_lang_mode())
logger.user_log('LOSM03002', request_type_id, defs.PRODUCTION, request=request)
raise
try:
rule_manage = RuleManage.objects.get(pk=rule_manage_id)
rulefile = RuleFile.objects.get(pk=rule_manage.rule_file_id)
ruletype = RuleType.objects.get(pk=rule_manage.rule_type_id)
except RuleManage.DoesNotExist:
msg = get_message('MOSJA03010', request.user.get_lang_mode())
logger.system_log('LOSM12016', request=request)
raise
except RuleFile.DoesNotExist:
msg = get_message('MOSJA03013', request.user.get_lang_mode())
logger.system_log('LOSM12019', request=request)
raise
except RuleType.DoesNotExist:
msg = get_message('MOSJA03011', request.user.get_lang_mode())
logger.system_log('LOSM12017', request=request)
raise
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_ADMIN:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_PRD, chk_auth))
if ruletype.rule_type_id not in rule_ids:
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12100', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletype.rule_type_name}, log_params=['Apply', ruletype.rule_type_id, rule_ids])
# ステージング適用チェック
is_staging = False
if rule_manage.request_type_id == defs.STAGING:
is_staging = True
else:
msg = get_message('MOSJA03012', request.user.get_lang_mode())
logger.user_log('LOSM12018', request=request)
raise Exception()
# ステージング状態チェック
if rule_manage.operation_status != defs.RULE_STS_OPERATION.STAGING:
msg = get_message('MOSJA03014', request.user.get_lang_mode())
logger.user_log('LOSM12020', request=request)
raise Exception()
# 適用君へルールプロダクション適用をリクエスト送信
send_data = {
'request': 'APPLY',
'ruletypeid': rule_manage.rule_type_id,
'groupid': ruletype.group_id,
'artifactid': ruletype.artifact_id,
'rule_file_id': rule_manage.rule_file_id,
'request_type_id': defs.PRODUCTION,
'apply_user_id': request.user.user_id,
'rule_manage_id': rule_manage_id,
}
result, msgid = RequestToApply.operate(send_data, request=request)
if result:
response_data['result'] = 'OK'
response_data['msg'] = get_message(msgid, request.user.get_lang_mode(), showMsgId=False)
else:
response_data['result'] = 'NG'
response_data['msg'] = get_message(msgid, request.user.get_lang_mode())
logger.system_log('LOSM12006', '', request=request)
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
response_data['result'] = 'NG'
response_data['msg'] = msg if msg else get_message('MOSJA03009', request.user.get_lang_mode())
except Exception:
response_data['result'] = 'NG'
response_data['msg'] = msg if msg else get_message('MOSJA03009', request.user.get_lang_mode())
logger.system_log('LOSM12006', traceback.format_exc(), request=request)
response_json = json.dumps(response_data, ensure_ascii=False)
logger.logic_log('LOSI00002', response_data['result'], request=request)
return HttpResponse(response_json)
def _getRuleTypeData(request=None):
"""
[メソッド概要]
データ更新処理
"""
logger.logic_log('LOSI00001', '', request=request)
rule_types = RuleType.objects.all()
rule_type_list = rule_types.values('rule_type_id', 'rule_type_name')
rule_type_dict = {rt.rule_type_id:{'name': rt.rule_type_name, 'table': rt.rule_table_name,} for rt in rule_types}
logger.logic_log('LOSI00002', 'rule_type_dict: %s' % json.dumps(rule_type_dict, ensure_ascii=False), request=request)
return rule_type_list, rule_type_dict
def _select(filters={}, request=None):
"""
[メソッド概要]
ルールのデータ取得
"""
logger.logic_log('LOSI00001', 'filters: %s' % (filters), request=request)
rule_list = []
# ルール管理情報取得
where_info = {}
WebCommon.convert_filters(filters, where_info)
rule_manage_list = RuleManage.objects.filter(**where_info).order_by('rule_manage_id').reverse()
#==================グループ情報取得==================#
_, rule_type_dict = _getRuleTypeData(request)
#==================ルールファイル情報取得==================#
rule_file_ids = [rm.rule_file_id for rm in rule_manage_list]
rule_file_list = RuleFile.objects.filter(pk__in=rule_file_ids)
rule_file_dict = {rf.rule_file_id: rf.rule_file_name for rf in rule_file_list}
# グループ情報作成
for rm in rule_manage_list:
operation_status_str = get_message(RuleDefs.MST_STS_OPERATION[rm.operation_status],request.user.get_lang_mode(), showMsgId=False)
system_status_str = get_message(
RuleDefs.MST_STS_SYSTEM[rm.system_status], request.user.get_lang_mode(), showMsgId=False)
rule_info_dic = {
'request_type_id': rm.request_type_id,
'rule_manage_id': rm.rule_manage_id,
'rule_type_id': rm.rule_type_id,
'rule_type_name': rule_type_dict[rm.rule_type_id]['name'],
'rule_table_name': rule_type_dict[rm.rule_type_id]['table'],
'rule_file_id': rm.rule_file_id,
'filename': rule_file_dict[rm.rule_file_id],
'operation_status_id': rm.operation_status,
'operation_status_str': operation_status_str,
'system_status_id': rm.system_status,
'system_status_str': system_status_str,
'last_update_user_name': rm.last_update_user,
'last_update_timestamp': rm.last_update_timestamp,
'is_finish': True if rm.system_status in defs.RULE_STS_SYSTEM.FINISH_STATUS else False,
'is_switchback': _is_switchbackable(rm.system_status, rm.operation_status),
}
rule_list.append(rule_info_dic)
logger.logic_log('LOSI00002', 'hit_count: %s' % str(len(rule_list)), request=request)
return rule_list
def _select_staging(filters, perm_info_stg, request):
"""
[概要]
ステージングルールのデータ取得
[引数]
filters: _select()に基づくfilters. {}でも可。
perm_info_stg: ステージング権限
[戻り値]
rule_list: ステージングルール
"""
logger.logic_log('LOSI00001', 'filters: %s' % (filters))
# 参照以上の権限を持つルール種別IDを取得し、フィルター条件に追加
rule_ids_stg_view = perm_info_stg[defs.VIEW_ONLY]
rule_ids_stg_admin = perm_info_stg[defs.ALLOWED_MENTENANCE]
rule_ids_stg = rule_ids_stg_view + rule_ids_stg_admin
if 'rule_type_id' not in filters:
filters['rule_type_id'] = {}
if 'LIST' not in filters['rule_type_id']:
filters['rule_type_id']['LIST'] = []
filters['rule_type_id']['LIST'].extend(rule_ids_stg)
filters['request_type_id'] = {'LIST': [defs.STAGING]}
rule_list = _select(filters, request)
staging_list, staging_history_list = _filter_staging_rule(rule_list)
logger.logic_log('LOSI00002', 'staging_rule count: %s' % str(len(rule_list)))
return staging_list, staging_history_list
def _select_production(filters, perm_info_prd, request):
"""
[概要]
プロダクションルールのデータ取得
[引数]
filters: _select()に基づくfilters. {}でも可。
perm_info_prd: プロダクション権限
[戻り値]
rule_product_list: 適用中ルール
rule_history_list: 適用終了ルール
"""
logger.logic_log('LOSI00001', 'filters: %s' % (filters))
rule_product_list = []
rule_history_list = []
# 参照以上の権限を持つルール種別IDを取得し、フィルター条件に追加
rule_ids_prd_view = perm_info_prd[defs.VIEW_ONLY]
rule_ids_prd_admin = perm_info_prd[defs.ALLOWED_MENTENANCE]
rule_ids_prd = rule_ids_prd_view + rule_ids_prd_admin
if 'rule_type_id' not in filters:
filters['rule_type_id'] = {}
if 'LIST' not in filters['rule_type_id']:
filters['rule_type_id']['LIST'] = []
filters['rule_type_id']['LIST'].extend(rule_ids_prd)
filters['request_type_id'] = {'LIST': [defs.PRODUCTION]}
rule_list = _select(filters, request)
# 運用ステータスにより、適用中/適用終了ルールを振り分ける
noapply_type_ids = []
for r in rule_list:
# 未適用ステータス
if r['operation_status_id'] == defs.RULE_STS_OPERATION.PRODUCT_NOAPPLY:
# 最新の1件のみを適用中に振り分ける
if r['rule_type_id'] not in noapply_type_ids:
rule_product_list.append(r)
# 最新でなければ適用終了に振り分ける
else:
rule_history_list.append(r)
noapply_type_ids.append(r['rule_type_id'])
# 適用中ステータス
elif r['operation_status_id'] == defs.RULE_STS_OPERATION.PRODUCT:
rule_product_list.append(r)
# 適用終了ステータス
elif r['operation_status_id'] == defs.RULE_STS_OPERATION.PRODUCT_END:
rule_history_list.append(r)
# 不明なステータス
else:
logger.logic_log('LOSM12070', r['operation_status_id'], r['rule_manage_id'])
logger.logic_log('LOSI00002', 'production_rule count: %s' % str(len(rule_list)))
return rule_product_list, rule_history_list
def _is_switchbackable(sys_sts, ope_sts):
"""
[概要]
切り戻し可否フラグ判定
[引数]
sys_sts: 作業ステータス
ope_sts: 運用ステータス
[戻り値]
bool : True=可能、False=不可
"""
if sys_sts in [defs.RULE_STS_SYSTEM.PRODUCT_NG, defs.RULE_STS_SYSTEM.PRODUCT_OK] \
and ope_sts in [defs.RULE_STS_OPERATION.PRODUCT_NOAPPLY, defs.RULE_STS_OPERATION.PRODUCT_END]:
return True
return False
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
@require_POST
def bulkpseudocall(request, rule_type_id):
"""
[メソッド概要]
テストリクエストの一括実行を処理する
"""
errmsg = ''
response_data = {}
rule_data_list = []
message_list = []
trace_id_list = []
time_zone = settings.TIME_ZONE
reception_dt = datetime.datetime.now(pytz.timezone('UTC'))
reception_dt = TimeConversion.get_time_conversion(reception_dt, time_zone, request=request)
logger.logic_log('LOSI00001', 'rule_type_id: %s' % rule_type_id, request=request)
try:
# データの取得
rule_data_list, errmsg, message_list = _testrequest_upload(request, rule_type_id)
if errmsg:
logger.system_log('LOSM12003', 'rule_data_list:%s' % rule_data_list, request=request)
raise Exception
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
rt = RuleType.objects.get(rule_type_id=rule_type_id)
if rt.rule_type_id not in rule_ids:
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12035', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':rt.rule_type_name}, log_params=['Bulk Request', rt.rule_type_id, rule_ids])
####################################################
# リクエスト送信
####################################################
scheme = urlsplit(request.build_absolute_uri(None)).scheme
tkn = _get_token()
url = scheme + '://127.0.0.1:' + request.META['SERVER_PORT'] + reverse('web_app:event:eventsrequest')
for rule_dic in rule_data_list:
row = ''
event_time = ''
event_info = []
for k, v in rule_dic.items():
if k == 'eventtime':
event_time = str(v)
continue
if k == 'row':
row = str(v)
continue
event_info.append(v)
# リクエストをJSON形式に変換
json_str = {}
json_str[EventsRequestCommon.KEY_RULETYPE] = rt.rule_type_name
json_str[EventsRequestCommon.KEY_REQTYPE] = defs.STAGING
json_str[EventsRequestCommon.KEY_EVENTTIME] = event_time
json_str[EventsRequestCommon.KEY_EVENTINFO] = event_info
json_str = json.dumps(json_str)
r_content = None
r = requests.post(
url,
headers={'content-type': 'application/json', 'Authorization' : 'Bearer %s' % (tkn),},
data=json_str.encode('utf-8'),
verify=False
)
# レスポンスからデータを取得
r_content = json.loads(r.content.decode('utf-8'))
# テストリクエストの実行中に失敗した場合
if not r_content["result"]:
errmsg = r_content["msg"]
logger.user_log('LOSM12060', traceback.format_exc())
raise
if r_content is not None and r_content['trace_id']:
trace_id_list.append({'id':r_content['trace_id'], 'row':row})
response_data['result'] = 'OK'
response_data['msg'] = get_message('MOSJA03015', request.user.get_lang_mode(), showMsgId=False)
response_data['trace'] = trace_id_list
response_data['recept'] = reception_dt
response_data['filename'] = request.FILES['testreqfile'].name
response_data['log_msg'] = makePseudoCallMessage_Bulk('', reception_dt, request.FILES['testreqfile'].name, 0, len(trace_id_list),request.user.get_lang_mode())
except json.JSONDecodeError:
response_data['result'] = 'NG'
response_data['msg'] = get_message('MOSJA12012', request.user.get_lang_mode())
logger.user_log('LOSM12052')
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
response_data['result'] = 'NG'
response_data['msg'] = msg
except Exception as e:
response_data['result'] = 'NG'
response_data['msg'] = errmsg if errmsg else get_message('MOSJA03001', request.user.get_lang_mode())
logger.system_log('LOSM12060', 'rule_data_list:%s, %s' % (rule_data_list,traceback.format_exc()), request=request)
if len(message_list) > 0:
response_data['log_msg'] = '\n' .join(message_list) + '\n'
resp_json = json.dumps(response_data, ensure_ascii=False)
logger.logic_log('LOSI00002', resp_json, request=request)
return HttpResponse(resp_json, status=None)
@check_allowed_auth(MENU_ID_STG, defs.MENU_CATEGORY.ALLOW_EVERY)
@require_POST
def rule_polling_bulk(request, rule_manage_id):
"""
[メソッド概要]
一括テストリクエスト実行中のポーリングリクエストを処理する
"""
logger.logic_log('LOSI00001', 'None', request=request)
resp_json = {}
err_flg = 1
is_finish = RuleDefs.RULE_FINISH_STS_NG
msg = ''
recept = ''
filename = ''
trace_list = []
try:
# パラメーター取得
recept = request.POST.get('recept', '')
filename = request.POST.get('filename', '')
trace_list = request.POST.get('trace_ids', None)
if trace_list is None:
logger.user_log('LOSM12063', request=request)
raise Exception()
trace_list = ast.literal_eval(trace_list)
trace_ids = []
trace_info = {}
for trc in trace_list:
trace_ids.append(trc['id'])
trace_info[trc['id']] = {}
trace_info[trc['id']]['row'] = int(trc['row'])
trace_info[trc['id']]['sts'] = ''
trace_info[trc['id']]['msg'] = ''
trace_info[trc['id']]['respid'] = 0
logger.logic_log('LOSI12009', str(len(trace_ids)), recept, filename, request=request)
# ルール別アクセス権限チェック
rule_ids = []
for chk_auth in defs.MENU_CATEGORY.ALLOW_EVERY:
rule_ids.extend(request.user_config.get_activerule_auth_type(MENU_ID_STG, chk_auth))
rule_manage = RuleManage.objects.get(pk=rule_manage_id)
if rule_manage.rule_type_id not in rule_ids:
ruletypename = RuleType.objects.get(rule_type_id=rule_manage.rule_type_id).rule_type_name
raise OASEError('MOSJA12031', 'LOSI12012', msg_params={'opename':get_message('MOSJA12035', request.user.get_lang_mode(), showMsgId=False), 'rule_type_name':ruletypename}, log_params=['Bulk Polling', rule_manage.rule_type_id, rule_ids])
# 対象のリクエスト情報を取得
ev_req_list = []
if len(trace_ids) > 0:
ev_req_list = EventsRequest.objects.filter(trace_id__in=trace_ids).order_by('request_id').values('trace_id', 'status')
# リクエスト状態チェック
ope_sts = None
finish_sts = {'OK':0, 'NG':0, 'EXE':0}
ok_trace_ids = []
for evreq in ev_req_list:
if evreq['trace_id'] in trace_info:
# 状態取得
rule_info = RuleDefs.get_rulestatus_info(evreq['status'],request.user.get_lang_mode())
# 状態をカウント
if rule_info['is_finish'] == RuleDefs.RULE_FINISH_STS_OK:
finish_sts['OK'] += 1
ok_trace_ids.append(evreq['trace_id'])
elif rule_info['is_finish'] == RuleDefs.RULE_FINISH_STS_NG:
finish_sts['NG'] += 1
elif rule_info['is_finish'] == RuleDefs.RULE_FINISH_STS_EXE:
finish_sts['EXE'] += 1
# 全リクエスト完了した際の状態遷移情報を保持
if rule_info['ope_sts'] and ope_sts != defs.RULE_STS_OPERATION.STAGING_NG:
ope_sts = rule_info['ope_sts']
# リクエスト別のメッセージを保持
trace_info[evreq['trace_id']]['sts'] = rule_info['sts']
trace_info[evreq['trace_id']]['msg'] = rule_info['msg']
# 正常終了リクエストのマッチングルール情報を取得
resp_ids = []
resp_acts = []
act_types = {}
if len(ok_trace_ids) > 0:
rset = RhdmResponse.objects.filter(trace_id__in=ok_trace_ids).values('trace_id', 'response_id')
for rs in rset:
trace_info[rs['trace_id']]['respid'] = rs['response_id']
resp_ids.append(rs['response_id'])
resp_act_info = {}
if len(resp_ids) > 0:
dti = ActionType.objects.filter(disuse_flag='0').values_list('driver_type_id', flat=True)
rset = DriverType.objects.filter(driver_type_id__in=dti).values('name', 'driver_major_version')
ati = []
for rs in rset:
name_version = rs['name'] + '(ver' + str(rs['driver_major_version']) + ')'
ati.append(name_version)
act_types = dict(zip(dti, ati))
rset = RhdmResponseAction.objects.filter(response_id__in=resp_ids).values(
'response_id', 'rule_name', 'execution_order', 'action_type_id', 'action_parameter_info',
'action_pre_info', 'action_retry_interval', 'action_retry_count', 'action_stop_interval',
'action_stop_count')
for rs in rset:
if rs['response_id'] not in resp_act_info:
resp_act_info[rs['response_id']] = []
str_actparams = json.loads(rs['action_parameter_info'])
str_actparams = '%s' % (str_actparams['ACTION_PARAMETER_INFO'])
str_actpreinfo = ''
if not rs['action_pre_info']:
str_actpreinfo = get_message('MOSJA12154', request.user.get_lang_mode(), showMsgId=False)
else:
str_actpreinfo = get_message('MOSJA12155', request.user.get_lang_mode(), showMsgId=False)
rhdm_res_cor = RhdmResponseCorrelation.objects.filter(
rule_name=rs['rule_name'], request_type_id=defs.STAGING).order_by(
'last_update_timestamp').reverse().first()
resp_act_info_tmp = {}
resp_act_info_tmp['rulename'] = rs['rule_name']
resp_act_info_tmp['exeorder'] = rs['execution_order']
resp_act_info_tmp['acttype'] = act_types[rs['action_type_id']] if rs['action_type_id'] in act_types else get_message('MOSJA12156', request.user.get_lang_mode(), showMsgId=False)
resp_act_info_tmp['actparam'] = str_actparams
resp_act_info_tmp['actpreinfo'] = str_actpreinfo
resp_act_info_tmp['retry_interval'] = rs['action_retry_interval']
resp_act_info_tmp['retry_count'] = rs['action_retry_count']
resp_act_info_tmp['stop_interval'] = rs['action_stop_interval']
resp_act_info_tmp['stop_count'] = rs['action_stop_count']
if rhdm_res_cor != None:
resp_act_info_tmp['cond_count'] = rhdm_res_cor.cond_count
resp_act_info_tmp['cond_term'] = rhdm_res_cor.cond_term
resp_act_info_tmp['large_group'] = rhdm_res_cor.cond_large_group
resp_act_info_tmp['large_group_priority'] = rhdm_res_cor.cond_large_group_priority
resp_act_info_tmp['small_group'] = rhdm_res_cor.cond_small_group
resp_act_info_tmp['small_group_priority'] = rhdm_res_cor.cond_small_group_priority
else:
resp_act_info_tmp['cond_count'] = 'X'
resp_act_info_tmp['cond_term'] = 'X'
resp_act_info_tmp['large_group'] = 'X'
resp_act_info_tmp['large_group_priority'] = 'X'
resp_act_info_tmp['small_group'] = 'X'
resp_act_info_tmp['small_group_priority'] = 'X'
resp_act_info[rs['response_id']].append(resp_act_info_tmp)
# 全リクエスト完了、および、状態遷移情報ありの場合は、状態を遷移させる
if ope_sts and finish_sts['EXE'] <= 0:
with transaction.atomic():
rule_manage = RuleManage.objects.select_for_update().get(pk=rule_manage_id)
if rule_manage.operation_status in RuleDefs.STAGING_VALIDATE_STATUSES:
rule_manage.operation_status = ope_sts
rule_manage.last_update_timestamp = datetime.datetime.now(pytz.timezone('UTC'))
rule_manage.last_update_user = request.user.user_name
rule_manage.save()
# 応答情報作成
for k, v in sorted(trace_info.items(), key=lambda x: x[1]['row']):
msg += '%s' % get_message('MOSJA12150', request.user.get_lang_mode(), showMsgId=False, line=v['row'])
if v['respid'] > 0 and v['respid'] in resp_act_info:
msg += get_message('MOSJA12142', request.user.get_lang_mode(), showMsgId=False) + '%s' % (len(resp_act_info[v['respid']])) + get_message('MOSJA12143', request.user.get_lang_mode(), showMsgId=False) + '\n'
for i, res_act in enumerate(resp_act_info[v['respid']]):
msg = msg + get_message('MOSJA12144', request.user.get_lang_mode(), showMsgId=False, count=str(i + 1)) + '\n'
msg = msg + get_message('MOSJA12145', request.user.get_lang_mode(), showMsgId=False) + res_act['rulename'] + '\n'
msg = msg + get_message('MOSJA12146', request.user.get_lang_mode(), showMsgId=False) + str(res_act['exeorder']) + '\n'
msg = msg + get_message('MOSJA12147', request.user.get_lang_mode(), showMsgId=False) + res_act['acttype'] + '\n'
msg = msg + get_message('MOSJA12148', request.user.get_lang_mode(), showMsgId=False) + res_act['actparam'] + '\n'
msg = msg + get_message('MOSJA12149', request.user.get_lang_mode(), showMsgId=False) + res_act['actpreinfo'] + '\n'
msg = msg + get_message('MOSJA12191', request.user.get_lang_mode(), showMsgId=False) + str(res_act['retry_interval']) + '\n'
msg = msg + get_message('MOSJA12192', request.user.get_lang_mode(), showMsgId=False) + str(res_act['retry_count']) + '\n'
msg = msg + get_message('MOSJA12193', request.user.get_lang_mode(), showMsgId=False) + str(res_act['stop_interval']) + '\n'
msg = msg + get_message('MOSJA12194', request.user.get_lang_mode(), showMsgId=False) + str(res_act['stop_count']) + '\n'
msg = msg + get_message('MOSJA12195', request.user.get_lang_mode(), showMsgId=False) + str(res_act['cond_count']) + '\n'
msg = msg + get_message('MOSJA12196', request.user.get_lang_mode(), showMsgId=False) + str(res_act['cond_term']) + '\n'
msg = msg + get_message('MOSJA12197', request.user.get_lang_mode(), showMsgId=False) + res_act['large_group'] + '\n'
msg = msg + get_message('MOSJA12198', request.user.get_lang_mode(), showMsgId=False) + str(res_act['large_group_priority']) + '\n'
msg = msg + get_message('MOSJA12199', request.user.get_lang_mode(), showMsgId=False) + res_act['small_group'] + '\n'
msg = msg + get_message('MOSJA12200', request.user.get_lang_mode(), showMsgId=False) + str(res_act['small_group_priority']) + '\n'
msg += '\n %s \n\n' % (v['msg'])
msg = makePseudoCallMessage_Bulk(msg, recept, filename, len(trace_ids) - finish_sts['EXE'], len(trace_ids),request.user.get_lang_mode())
if finish_sts['EXE'] > 0:
is_finish = RuleDefs.RULE_FINISH_STS_EXE
else:
if finish_sts['NG'] > 0:
is_finish = RuleDefs.RULE_FINISH_STS_NG
else:
is_finish = RuleDefs.RULE_FINISH_STS_OK
err_flg = 0
logger.logic_log('LOSI12010', finish_sts['OK'], finish_sts['NG'], finish_sts['EXE'], request=request)
except OASEError as e:
if e.log_id:
if e.arg_list and isinstance(e.arg_list, list):
logger.logic_log(e.log_id, *(e.arg_list), request=request)
else:
logger.logic_log(e.log_id, request=request)
if e.msg_id:
if e.arg_dict and isinstance(e.arg_dict, dict):
msg = get_message(e.msg_id, request.user.get_lang_mode(), **(e.arg_dict))
else:
msg = get_message(e.msg_id, request.user.get_lang_mode())
is_finish = RuleDefs.RULE_FINISH_STS_NG
except Exception:
logger.system_log('LOSM12000', traceback.format_exc(), request=request)
is_finish = RuleDefs.RULE_FINISH_STS_NG
if not msg:
msg = get_message('MOSJA12001', request.user.get_lang_mode())
resp_json = {
'err_flg': err_flg,
'is_finish': is_finish,
'msg': msg,
'recept': recept,
'filename': filename,
'trace': trace_list,
}
resp_json = json.dumps(resp_json, ensure_ascii=False)
logger.logic_log('LOSI00002', 'err_flg:%s, is_finish:%s' % (err_flg, is_finish), request=request)
return HttpResponse(resp_json)
def _testrequest_upload(request, rule_type_id):
"""
[メソッド概要]
テストリクエスト用ファイルをtemp/testrequest配下にアップロードする
"""
logger.logic_log('LOSI00001', 'rule_type_id: %s' % rule_type_id, request=request)
msg = ''
filepath = ''
rule_data_list = []
message_list = []
try:
####################################################
# アップロードファイルフォーマットチェック
####################################################
# ファイル名取得
rulefile = request.FILES['testreqfile']
filename = rulefile.name
# 拡張子チェック
if not (filename.endswith('.xls') or filename.endswith('.xlsx')):
msg = get_message('MOSJA03005', request.user.get_lang_mode())
logger.system_log('LOSM12014', request=request)
raise Exception()
# チェックOKの場合、ファイルデータ取得
filedata = ''
for chunk in rulefile.chunks():
filedata = '%s%s' % (filedata, base64.b64encode(chunk).decode('utf-8'))
# パスが存在しない場合、ディレクトリを作成
temppath = RuleDefs.FILE_TEMP_PATH
os.makedirs(temppath, exist_ok=True)
# 一意のファイル名で保存
tempname = '%s%s' % (str(uuid.uuid4()), request.user.user_id)
filepath = '%s%s' % (temppath, tempname)
with open(filepath, 'wb') as fp:
fp.write(base64.b64decode(filedata.encode('utf-8')))
####################################################
# Excelから条件部のデータを取得
####################################################
rule_data_list, message_list, msg = _get_data_by_excel(request, filepath, filename, rule_type_id)
if msg:
logger.system_log('LOSM12000', 'rule_data_list:%s' % rule_data_list, request=request)
raise Exception(msg)
# 一時ファイル削除
os.remove(filepath)
except Exception as e:
msg = msg if msg else get_message('MOSJA03007', request.user.get_lang_mode())
logger.system_log('LOSM12003', traceback.format_exc(), request=request)
if filepath and os.path.exists(filepath):
os.remove(filepath)
logger.logic_log('LOSI00002', rule_data_list, request=request)
return rule_data_list, msg, message_list
def _get_data_by_excel(request, filepath, filename, rule_type_id):
"""
[メソッド概要]
テストリクエスト用エクセルの情報を取得
[引数]
filepath : 一時保存先
[戻り値]
dict : 取得したデータの配列
"""
logger.logic_log('LOSI00001', 'filepath: %s, rule_type_id: %s' % (filepath, rule_type_id), request=request)
# ユーザー入力開始セルの番号
COL_INDEX_RULE_START = 2
ROW_INDEX_RULE_START = 2
# ルール名が記載されている行
ROW_RULE_NAME_START = 1
col_max_length = ''
row_max_length = ''
rule_data_list = []
message_list = []
errmsg = ''
try:
wb = xlrd.open_workbook(filepath)
tables_ws = wb.sheet_by_name('Values')
col_max_length = tables_ws.ncols
row_max_length = tables_ws.nrows
####################################################
# フォーマットチェック
####################################################
rule_data_list, message_list = _check_testrequest_data(rule_type_id, filename, tables_ws, ROW_INDEX_RULE_START, COL_INDEX_RULE_START, row_max_length, col_max_length, request.user.get_lang_mode())
if len(message_list) > 0:
errmsg = get_message('MOSJA12018', request.user.get_lang_mode())
logger.system_log('LOSM12062', filename)
# テストリクエストの上限数チェック
request_row_max = int(System.objects.get(config_id = 'REQUEST_ROW_MAX').value)
row_count = len(rule_data_list)
if request_row_max < row_count:
errmsg = get_message('MOSJA12018', request.user.get_lang_mode())
message_list.append(get_message('MOSJA03123', rulerowmax=request_row_max))
except Exception as e:
errmsg = get_message('MOSJA03016', request.user.get_lang_mode())
logger.system_log('LOSM12000', traceback.format_exc())
logger.logic_log('LOSI00002', 'rule_data_list: %s' % rule_data_list, request=request)
return rule_data_list, message_list, errmsg
def _check_testrequest_data(rule_type_id, filename, wsheet, row_index, col_index, row_max, col_max, lang):
"""
[メソッド概要]
条件部の入力チェック
"""
rule_data_list = []
label_list = ['eventtime'] # イベント発生日時用ラベル名
cond_id_list = [0] # イベント発生日時用cond_id
message_list = []
chk_flag = False
cond_id = ''
chk_row_max = 0
emo_chk = UnicodeCheck()
# Excelのセル種別
CELL_TYPE_EMPTY = 0
CELL_TYPE_TEXT = 1
CELL_TYPE_NUMBER = 2
CELL_TYPE_DATE = 3
hhmm_repatter = re.compile(r'^([0-1][0-9]|2[0-3]):[0-5][0-9]$')
date_repatter = re.compile(r'^0\.[0-9]+$')
num_repatter = re.compile(r'^[0-9]+$')
dec_repatter = re.compile(r'^[0-9]+[.]{1}0+$')
# データオブジェクトを抽出
tmp_data_list = list(DataObject.objects.filter(rule_type_id=rule_type_id).order_by('data_object_id'))
checker = {}
for dataObj in tmp_data_list:
if dataObj.label in checker:
continue
checker[dataObj.label] = dataObj
data_object_list = checker.values();
for dobj in data_object_list:
label_list.append(dobj.label)
cond_id_list.append(dobj.conditional_expression_id)
# シートに記載があればチェック
for row in range(row_index, row_max):
for col in range(col_index, col_max):
if wsheet.cell_type(row, col) != CELL_TYPE_EMPTY:
chk_flag = True
chk_row_max = row
break;
if not chk_flag:
message_list.append(get_message('MOSJA12019', lang, filename=filename))
return rule_data_list, message_list
for row in range(row_index, chk_row_max + 1):
chk_flag = False
index = 0
rule_data_dic = {}
# 列のいずれかに記載があればチェック
col = col_index
for col in range(col_index, col_max):
if wsheet.cell_type(row, col) != CELL_TYPE_EMPTY:
chk_flag = True
# 最後の表の場合終了
if not chk_flag and row > chk_row_max:
break
if not chk_flag:
continue
# 行数を辞書に追加
rule_data_dic['row'] = row + 1
col = col_index
while col < col_max:
cell_val = ''
if wsheet.cell_type(row, col) == CELL_TYPE_EMPTY:
message_list.append(get_message('MOSJA12014', lang, cellname=_convert_rowcol_to_cellno(row, col)))
if wsheet.cell_type(row, col) == CELL_TYPE_TEXT \
and wsheet.cell(row, col).value == '':
message_list.append(get_message('MOSJA12014', lang, cellname=_convert_rowcol_to_cellno(row, col)))
if index <= len(data_object_list):
cond_id = cond_id_list[index]
cell_val = wsheet.cell(row, col).value
if cond_id == 0:
try:
cell_val = datetime.datetime.strptime(cell_val, '%Y-%m-%d %H:%M:%S')
except Exception as e:
message_list.append(get_message('MOSJA12017', lang, cellname=_convert_rowcol_to_cellno(row, col)))
# 時刻条件
elif cond_id == 15:
cell_val = wsheet.cell(row, col).value
if wsheet.cell_type(row, col) == CELL_TYPE_NUMBER:
message_list.append(get_message('MOSJA12016', lang, cellname=_convert_rowcol_to_cellno(row, col)))
elif wsheet.cell_type(row, col) == CELL_TYPE_TEXT:
if not hhmm_repatter.match(cell_val):
message_list.append(get_message('MOSJA12016', lang, cellname=_convert_rowcol_to_cellno(row, col)))
elif wsheet.cell_type(row, col) == CELL_TYPE_DATE:
cell_val_str = str(cell_val)
if not date_repatter.match(cell_val_str):
message_list.append(get_message('MOSJA12016', lang, cellname=_convert_rowcol_to_cellno(row, col)))
# Excelから取得した値からHH:mm方式に変換
cell_time = datetime.datetime(1899, 12, 30) + datetime.timedelta(days=cell_val)
cell_val = cell_time.strftime("%H:%M")
# 数値条件
elif cond_id in [1, 2, 5, 6, 7, 8]:
if wsheet.cell_type(row, col) == CELL_TYPE_DATE:
_col_name = get_message('MOSJA12151', lang, showMsgId=False)
message_list.append(get_message('MOSJA12015', lang, colname=_col_name, cellname=_convert_rowcol_to_cellno(row, col)))
elif wsheet.cell_type(row, col) == CELL_TYPE_TEXT:
if not num_repatter.match(str(cell_val)):
_col_name = get_message('MOSJA12151', lang, showMsgId=False)
message_list.append(get_message('MOSJA12015', lang, colname=_col_name, cellname=_convert_rowcol_to_cellno(row, col)))
elif wsheet.cell_type(row, col) == CELL_TYPE_NUMBER:
# 0 padding 消し?
cell_val = int(cell_val)
cell_val = str(cell_val)
if not num_repatter.match(cell_val) and not dec_repatter.match(cell_val):
_col_name = get_message('MOSJA12151', lang, showMsgId=False)
message_list.append(get_message('MOSJA12015', lang, colname=_col_name, cellname=_convert_rowcol_to_cellno(row, col)))
# 文字条件
elif cond_id in [3, 4, 9, 10]:
# 絵文字チェック
value_list = emo_chk.is_emotion(cell_val)
if len(value_list) > 0:
_col_name = get_message('MOSJA12152', lang, showMsgId=False)
message_list.append(get_message('MOSJA12028', lang, colname=_col_name, cellname=_convert_rowcol_to_cellno(row, col)))
# 含む含まない
elif cond_id in [13, 14]:
result = _validate_contains(cell_val)
if not result:
_col_name = get_message('MOSJA12153', lang, showMsgId=False)
message_list.append(get_message('MOSJA12021', lang, colname=_col_name, cellname=_convert_rowcol_to_cellno(row, col)))
# 絵文字チェック
value_list = emo_chk.is_emotion(cell_val)
if len(value_list) > 0:
_col_name = get_message('MOSJA12153', lang, showMsgId=False)
message_list.append(get_message('MOSJA12028', lang, colname=_col_name, cellname=_convert_rowcol_to_cellno(row, col)))
key = label_list[index]
rule_data_dic[key] = cell_val
col += 1
index += 1
rule_data_list.append(rule_data_dic)
return rule_data_list, message_list
def _convert_rowcol_to_cellno(row, col):
"""
[メソッド概要]
行番号、列番号をセル番号に変換
"""
cellno = ''
col_tmp = col
col_name = ''
col_list = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
col_tmp, idx = divmod(col_tmp, 26) # 26 : A-Z
col_name = col_list[idx]
while col_tmp > 0:
col_tmp, idx = divmod(col_tmp - 1, 26)
col_name = '%s%s' % (col_list[idx], col_name)
cellno = '%s%s' % (col_name, row + 1)
return cellno
def _validate_eventinfo(rule_type_id, eventinfo, message_list, lang):
"""
[メソッド概要]
eventinfoの入力チェック
"""
hhmm_repatter = re.compile(r'^([0-1][0-9]|2[0-3]):[0-5][0-9]$')
num_repatter = re.compile(r'^[0-9]+$')
dec_repatter = re.compile(r'^[0-9]+[.]{1}0+$')
emo_chk = UnicodeCheck()
# データオブジェクトを抽出
data_object_list = DataObject.objects.filter(rule_type_id=rule_type_id).order_by('data_object_id')
index = 0
conditional_name_set = set()
for i, d in enumerate(data_object_list):
cond_id = d.conditional_expression_id
cond_name = d.conditional_name
# 重複している条件名はスキップ
if cond_name in conditional_name_set:
continue
conditional_name_set.add(cond_name)
# 空白チェック
if eventinfo[index] == '':
message_list.append(get_message('MOSJA12030', lang, conditional_name=cond_name))
# 時刻条件
elif cond_id == 15:
if not hhmm_repatter.match(eventinfo[index]):
message_list.append(get_message('MOSJA12026', lang, conditional_name=cond_name))
# 数値条件
elif cond_id in [1, 2, 5, 6, 7, 8]:
if not num_repatter.match(eventinfo[index]) and not dec_repatter.match(eventinfo[index]):
_col_name = get_message('MOSJA12151', lang, showMsgId=False)
message_list.append(get_message('MOSJA12025', lang, colname=_col_name, conditional_name=cond_name))
# 文字列 or 正規表現
elif cond_id in [3, 4, 9, 10]:
# 絵文字チェック
value_list = emo_chk.is_emotion(eventinfo[index])
if len(value_list) > 0:
_col_name = get_message('MOSJA12152', lang, showMsgId=False)
message_list.append(get_message('MOSJA12029', lang, colname=_col_name, conditional_name=cond_name))
# 含む含まない
elif cond_id in [13, 14]:
if not _validate_contains(eventinfo[index]):
_col_name = get_message('MOSJA12153', lang, showMsgId=False)
message_list.append(get_message('MOSJA12027', lang, colname=_col_name, conditional_name=cond_name))
# 絵文字チェック
value_list = emo_chk.is_emotion(eventinfo[index])
if len(value_list) > 0:
_col_name = get_message('MOSJA12153', lang, showMsgId=False)
message_list.append(get_message('MOSJA12029', lang, colname=_col_name, conditional_name=cond_name))
index += 1
def _validate_eventdatetime(eventdatetime, message_list, lang):
"""
[メソッド概要]
イベント発生日時の入力チェック
"""
try:
eventdatetime = datetime.datetime.strptime(eventdatetime, '%Y-%m-%d %H:%M:%S')
except Exception as e:
message_list.append(get_message('MOSJA12022', lang))
def _validate_contains(value):
"""
[メソッド概要]
含む含まないのフォーマットチェック
"""
chk_val = str(value).strip()
if not (chk_val.startswith("[") and chk_val.endswith("]")):
return False
chk_list = chk_val.strip("[""]").split(",")
for v in chk_list:
v = v.strip()
if not v.startswith("\"") or not v.endswith("\""):
return False
return True
def _get_token(now=None):
"""
[メソッド概要]
有効なトークンを取得する
[引数]
now : datetime : 現在日時
[戻り値]
tkn : string : トークン
"""
if not now:
now = datetime.datetime.now(pytz.timezone('UTC'))
tkn = ''
# 有効なトークンを取得
tkn_list = list(
TokenInfo.objects.filter(
Q(use_start_time__isnull=True) | Q(use_start_time__lte=now)
).filter(
Q(use_end_time__isnull=True) | Q(use_end_time__gt=now)
).values_list(
'token_data', flat=True
)
)
if len(tkn_list) > 0:
tkn = tkn_list[0]
return tkn
|
the-stack_0_27528
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file for running the ImageNet example.
This file is intentionally kept short. The majority for logic is in libraries
than can be easily tested and imported in Colab.
"""
from absl import app
from absl import flags
from absl import logging
from clu import platform
import train
import jax
from ml_collections import config_flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('workdir', None, 'Directory to store model data.')
config_flags.DEFINE_config_file(
'config',
None,
'File path to the training hyperparameter configuration.',
lock_config=True)
flags.mark_flags_as_required(['config', 'workdir'])
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
FLAGS.log_dir = FLAGS.workdir
FLAGS.stderrthreshold = 'info'
logging.get_absl_handler().start_logging_to_file()
# Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make
# it unavailable to JAX.
tf.config.experimental.set_visible_devices([], 'GPU')
logging.info('JAX host: %d / %d', jax.host_id(), jax.host_count())
logging.info('JAX local devices: %r', jax.local_devices())
# Add a note so that we can tell which task is which JAX host.
# (Depending on the platform task 0 is not guaranteed to be host 0)
platform.work_unit().set_task_status(
f'host_id: {jax.host_id()}, host_count: {jax.host_count()}')
platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
FLAGS.workdir, 'workdir')
train.train_and_evaluate(FLAGS.config, FLAGS.workdir)
if __name__ == '__main__':
app.run(main)
|
the-stack_0_27529
|
from toga import validators
import unittest
class TestValidators(unittest.TestCase):
def setUp(self):
self.args = []
self.kwargs = {}
self.validator_factory = None
self.valid_inputs = []
self.invalid_inputs = []
self.check_empty = True
def check(self):
if self.validator_factory is None:
self.fail("Validator is not set!")
self.check_validator(
self.validator_factory(*self.args, **self.kwargs),
valid_inputs=self.valid_inputs,
invalid_inputs=self.invalid_inputs,
)
dummy_error = "This is a dummy error message!"
self.check_validator(
self.validator_factory(
*self.args, **self.kwargs, error_message=dummy_error
),
valid_inputs=self.valid_inputs,
invalid_inputs=[
(input_string, dummy_error)
for input_string, error_message in self.invalid_inputs
],
)
if self.check_empty:
self.assertIsNone(
self.validator_factory(
*self.args, **self.kwargs, error_message=dummy_error, allow_empty=True
)("")
)
self.assertEqual(
self.validator_factory(
*self.args, **self.kwargs, error_message=dummy_error, allow_empty=False
)(""),
dummy_error
)
def check_validator(self, validator, valid_inputs, invalid_inputs):
for valid_input in valid_inputs:
self.assertIsNone(
validator(valid_input),
msg='"{}" should be a valid input, but it is not'.format(valid_input),
)
for invalid_input, error_message in invalid_inputs:
self.assertEqual(
error_message,
validator(invalid_input),
msg='"{}" error message is different than expected.'.format(
invalid_input
),
)
def test_validate_minimum_length(self):
default_error_message = "Input is too short (length should be at least 5)"
self.args = [5]
self.validator_factory = validators.MinLength
self.valid_inputs = ["I am long enough", "right", "longer"]
self.invalid_inputs = [
("I", default_error_message),
("am", default_error_message),
("tiny", default_error_message),
]
self.check()
def test_validate_maximum_length(self):
default_error_message = "Input is too long (length should be at most 10)"
self.args = [10]
self.validator_factory = validators.MaxLength
self.valid_inputs = ["", "I am good", "nice", "a"]
self.invalid_inputs = [
("I am way too long", default_error_message),
("are you serious now?", default_error_message),
]
self.check_empty = False
self.check()
def test_validate_length_between(self):
default_error_message = "Input should be between 5 and 10 characters"
self.args = [5, 10]
self.validator_factory = validators.LengthBetween
self.valid_inputs = ["I am good", "right", "123456789"]
self.invalid_inputs = [
("I", default_error_message),
("am", default_error_message),
("tiny", default_error_message),
("I am way too long", default_error_message),
("are you serious now?", default_error_message),
]
self.check()
def test_validate_startswith(self):
default_error_message = 'Input should start with "good"'
self.args = ["good"]
self.validator_factory = validators.StartsWith
self.valid_inputs = [
"good to be back", "goodness!", "goody", "good, good, good"
]
self.invalid_inputs = [
("no good", default_error_message),
("I am so bad", default_error_message),
("goo goo dolls", default_error_message),
("go od", default_error_message),
(
"It doesn't matter if I'm good, if I don't start with it",
default_error_message
),
]
self.check()
def test_validate_endswith(self):
default_error_message = 'Input should end with "good"'
self.args = ["good"]
self.validator_factory = validators.EndsWith
self.valid_inputs = [
"go back to good", "It is so good", "good", "good, good, good"
]
self.invalid_inputs = [
("good start, but no", default_error_message),
("I am so bad", default_error_message),
("goo goo dolls", default_error_message),
("go od", default_error_message),
(
"It doesn't matter if I'm good, if I don't end with it",
default_error_message
),
]
self.check()
def test_validate_contains(self):
default_error_message = 'Input should contain "good"'
self.args = ["good"]
self.validator_factory = validators.Contains
self.valid_inputs = ["This is very good", "goodness", "goody", "nogood"]
self.invalid_inputs = [
("I am so bad", default_error_message),
("goo goo dolls", default_error_message),
("go od", default_error_message),
]
self.check()
def test_validate_contains_once(self):
self.args = ["good"]
self.kwargs = dict(compare_count=1)
self.validator_factory = validators.Contains
self.valid_inputs = ["This is very good", "goodness", "goody", "nogood"]
self.invalid_inputs = [
("I am so bad", 'Input should contain "good"'),
("good, very good", 'Input should contain "good" exactly 1 times'),
(
"it's good to be so good in being good",
'Input should contain "good" exactly 1 times'
),
]
self.check()
def test_validate_contains_zero_times(self):
self.args = ["bad"]
self.kwargs = dict(compare_count=0)
self.validator_factory = validators.Contains
self.valid_inputs = [
"", "This is very good", "goodness", "goody", "nogood", "good, very good"
]
self.invalid_inputs = [
("I am so bad", 'Input should not contain "bad"'),
("Why being so baddy?", 'Input should not contain "bad"'),
("sinbad", 'Input should not contain "bad"'),
]
self.check_empty = False
self.check()
def test_validate_not_contains(self):
self.args = ["bad"]
self.validator_factory = validators.NotContains
self.valid_inputs = [
"", "This is very good", "goodness", "goody", "nogood", "good, very good"
]
self.invalid_inputs = [
("I am so bad", 'Input should not contain "bad"'),
("Why being so baddy?", 'Input should not contain "bad"'),
("sinbad", 'Input should not contain "bad"'),
]
self.check_empty = False
self.check()
def test_validate_contains_two_words(self):
default_error_message = 'Input should contain "good" or "bad"'
self.args = [["good", "bad"]]
self.validator_factory = validators.Contains
self.valid_inputs = [
"There are always good and bad in life",
"bad before good",
"good, good, bad",
"I am so bad",
"I am so good",
]
self.invalid_inputs = [
("wanted words are not here", default_error_message),
("go od", default_error_message),
("b ad", default_error_message),
]
self.check()
def test_validate_match_regex(self):
default_error_message = "Input should match regex: [A-Z]{1}[a-z]{2}[A-Z]{1}"
self.args = ["[A-Z]{1}[a-z]{2}[A-Z]{1}"]
self.validator_factory = validators.MatchRegex
self.valid_inputs = [
"GooD", "partial is AlsO good in this case"
]
self.invalid_inputs = [
("Good", default_error_message),
("gooD", default_error_message),
("Goo", default_error_message),
("Goo2", default_error_message),
("!Goo", default_error_message),
]
self.check()
def test_contains_uppercase(self):
self.validator_factory = validators.ContainsUppercase
self.valid_inputs = [
"Good", "using Toga is very helpful", "ending with uppercase workS"
]
self.invalid_inputs = [
(
"lowercase is not helpful",
"Input should contain at least one uppercase character"
),
]
self.check()
def test_contains_two_uppercase(self):
self.kwargs = dict(compare_count=2)
self.validator_factory = validators.ContainsUppercase
self.valid_inputs = [
"GooD", "using TogA is very helpful"
]
self.invalid_inputs = [
(
"no uppercase is no good",
"Input should contain at least one uppercase character"
),
(
"One uppercase is not enough",
"Input should contain exactly 2 uppercase characters"
),
(
"Three Is a Crowd",
"Input should contain exactly 2 uppercase characters"
),
]
self.check()
def test_contains_lowercase(self):
self.validator_factory = validators.ContainsLowercase
self.valid_inputs = [
"gOOD", "USING tOGA IS VERY HELPFUL", "ENDING WITH LOWERCASE WORKs"
]
self.invalid_inputs = [
(
"STOP YELLING!",
"Input should contain at least one lowercase character"
),
]
self.check()
def test_contains_two_lowercase(self):
self.kwargs = dict(compare_count=2)
self.validator_factory = validators.ContainsLowercase
self.valid_inputs = [
"GooD", "USING tOGa IS VERY HELPFUL"
]
self.invalid_inputs = [
(
"NO LOWERCASE IS NO GOOD",
"Input should contain at least one lowercase character"
),
(
"oNE LOWERCASE IS NOT ENOUGH",
"Input should contain exactly 2 lowercase characters"
),
(
"tHREE iS A cROWD",
"Input should contain exactly 2 lowercase characters"
),
]
self.check()
def test_contains_digit(self):
self.validator_factory = validators.ContainsDigit
self.valid_inputs = [
"1) start counting", "count 2 and continue", "ends with 3"
]
self.invalid_inputs = [
("no digits in here", "Input should contain at least one digit"),
]
self.check()
def test_contains_two_digits(self):
self.kwargs = dict(compare_count=2)
self.validator_factory = validators.ContainsDigit
self.valid_inputs = [
"1+2", "the number 3 is bigger than 1",
]
self.invalid_inputs = [
("no digits in here", "Input should contain at least one digit"),
("only 1 digit is not enough", "Input should contain exactly 2 digits"),
("3 is w4y 2 much", "Input should contain exactly 2 digits"),
]
self.check()
def test_contains_special(self):
default_error_message = "Input should contain at least one special character"
self.validator_factory = validators.ContainsSpecial
self.valid_inputs = ["Hey!", "[email protected]", "#1"]
self.invalid_inputs = [
("bad", default_error_message),
("123", default_error_message)
]
self.check()
def test_contains_two_special(self):
self.kwargs = dict(compare_count=2)
self.validator_factory = validators.ContainsSpecial
self.valid_inputs = ["!Hey!", "[email protected]", "#1#"]
self.invalid_inputs = [
("nospecial", "Input should contain at least one special character"),
("notenough!", "Input should contain exactly 2 special characters"),
("this is too much", "Input should contain exactly 2 special characters"),
]
self.check()
def test_integer(self):
default_error_message = "Input should be an integer"
self.validator_factory = validators.Integer
self.valid_inputs = ["0", "00", "1", "21", "1234", "12423571"]
self.invalid_inputs = [
("a", default_error_message),
("ab", default_error_message),
("this is a not valid!", default_error_message),
("0.0", default_error_message),
("2.1", default_error_message),
("-0.22", default_error_message),
(".2", default_error_message),
("88.0", default_error_message),
("9.", default_error_message)
]
self.check()
def test_number(self):
default_error_message = "Input should be a number"
self.validator_factory = validators.Number
self.valid_inputs = [
"0", "00", "0.0", "1", "2.1", "-1", "-0.22", ".2", "88.0", "9."
]
self.invalid_inputs = [
("a", default_error_message),
("ab", default_error_message),
("this is a not valid!", default_error_message),
(".", default_error_message),
("88.a", default_error_message)
]
self.check()
def test_email(self):
default_error_message = "Input should be a valid email address"
self.validator_factory = validators.Email
self.valid_inputs = [
"[email protected]",
"[email protected]",
"[email protected]"
]
self.invalid_inputs = [
("[email protected]", default_error_message),
("tiberius.beeware.org", default_error_message),
("tiberius@[email protected]", default_error_message),
("tiberius@beeware", default_error_message),
("tiberius@beeware.", default_error_message)
]
self.check()
|
the-stack_0_27533
|
from setuptools import setup, find_packages
tests_require = [
"parameterized",
"nose2"
]
setup(
name="OpenNMT-tf",
version="2.4.0",
license="MIT",
description="Neural machine translation and sequence learning using TensorFlow",
author="OpenNMT",
author_email="[email protected]",
url="http://opennmt.net",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
project_urls={
"Documentation": "http://opennmt.net/OpenNMT-tf/",
"Forum": "http://forum.opennmt.net/",
"Gitter": "https://gitter.im/OpenNMT/OpenNMT-tf",
"Source": "https://github.com/OpenNMT/OpenNMT-tf/"
},
keywords="tensorflow opennmt nmt neural machine translation",
install_requires=[
"pyonmttok>=1.17.0,<2;platform_system=='Linux'",
"pyyaml==5.1.*",
"rouge==0.3.1",
"sacrebleu>=1.4.1,<2",
"tensorflow-addons>=0.6,<0.7"
],
extras_require={
"tests": tests_require,
},
tests_require=tests_require,
test_suite="nose2.collector.collector",
packages=find_packages(exclude=["bin", "*.tests"]),
entry_points={
"console_scripts": [
"onmt-ark-to-records=opennmt.bin.ark_to_records:main",
"onmt-build-vocab=opennmt.bin.build_vocab:main",
"onmt-detokenize-text=opennmt.bin.detokenize_text:main",
"onmt-main=opennmt.bin.main:main",
"onmt-merge-config=opennmt.bin.merge_config:main",
"onmt-tokenize-text=opennmt.bin.tokenize_text:main",
],
}
)
|
the-stack_0_27535
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from pyro.ops.ssm_gp import MaternKernel
from tests.common import assert_equal
@pytest.mark.parametrize("num_gps", [1, 2, 3])
@pytest.mark.parametrize("nu", [0.5, 1.5, 2.5])
def test_matern_kernel(num_gps, nu):
mk = MaternKernel(
nu=nu, num_gps=num_gps, length_scale_init=0.1 + torch.rand(num_gps)
)
dt = torch.rand(1).item()
forward = mk.transition_matrix(dt)
backward = mk.transition_matrix(-dt)
forward_backward = torch.matmul(forward, backward)
# going forward dt in time and then backward dt in time should bring us back to the identity
eye = (
torch.eye(mk.state_dim).unsqueeze(0).expand(num_gps, mk.state_dim, mk.state_dim)
)
assert_equal(forward_backward, eye)
# let's just check that these are PSD
torch.linalg.cholesky(mk.stationary_covariance())
torch.linalg.cholesky(mk.process_covariance(forward))
# evolving forward infinitesimally should yield the identity
nudge = mk.transition_matrix(torch.tensor([1.0e-9]))
assert_equal(nudge, eye)
|
the-stack_0_27536
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides functions to collect information about the operating
system, Python and hosting environment.
"""
import newrelic
from newrelic.common.system_info import (total_physical_memory,
logical_processor_count, physical_processor_count)
import sys
import os
import platform
# try:
# import pkg_resources
# except ImportError:
# pass
try:
import newrelic.core._thread_utilization
except ImportError:
pass
def environment_settings():
"""Returns an array of arrays of environment settings
"""
env = []
# Agent information.
env.append(('Agent Version', '.'.join(map(str, newrelic.version_info))))
# System information.
env.append(('Arch', platform.machine()))
env.append(('OS', platform.system()))
env.append(('OS version', platform.release()))
env.append(('Total Physical Memory (MB)', total_physical_memory()))
env.append(('Logical Processors', logical_processor_count()))
physical_processor_packages, physical_cores = physical_processor_count()
# Report this attribute only if it has a valid value.
if physical_processor_packages:
env.append(('Physical Processor Packages',
physical_processor_packages))
# Report this attribute only if it has a valid value.
if physical_cores:
env.append(('Physical Cores', physical_cores))
# Python information.
env.append(('Python Program Name', sys.argv[0]))
env.append(('Python Executable', sys.executable))
env.append(('Python Home', os.environ.get('PYTHONHOME', '')))
env.append(('Python Path', os.environ.get('PYTHONPATH', '')))
env.append(('Python Prefix', sys.prefix))
env.append(('Python Exec Prefix', sys.exec_prefix))
env.append(('Python Runtime', '.'.join(platform.python_version_tuple())))
env.append(('Python Implementation', platform.python_implementation()))
env.append(('Python Version', sys.version))
env.append(('Python Platform', sys.platform))
env.append(('Python Max Unicode', sys.maxunicode))
# Extensions information.
extensions = []
if 'newrelic.core._thread_utilization' in sys.modules:
extensions.append('newrelic.core._thread_utilization')
env.append(('Compiled Extensions', ', '.join(extensions)))
# Dispatcher information.
dispatcher = []
if not dispatcher and 'mod_wsgi' in sys.modules:
mod_wsgi = sys.modules['mod_wsgi']
if hasattr(mod_wsgi, 'process_group'):
if mod_wsgi.process_group == '':
dispatcher.append(('Dispatcher', 'Apache/mod_wsgi (embedded)'))
else:
dispatcher.append(('Dispatcher', 'Apache/mod_wsgi (daemon)'))
env.append(('Apache/mod_wsgi Process Group',
mod_wsgi.process_group))
else:
dispatcher.append(('Dispatcher', 'Apache/mod_wsgi'))
if hasattr(mod_wsgi, 'version'):
dispatcher.append(('Dispatcher Version', str(mod_wsgi.version)))
if hasattr(mod_wsgi, 'application_group'):
env.append(('Apache/mod_wsgi Application Group',
mod_wsgi.application_group))
if not dispatcher and 'uwsgi' in sys.modules:
dispatcher.append(('Dispatcher', 'uWSGI'))
uwsgi = sys.modules['uwsgi']
if hasattr(uwsgi, 'version'):
dispatcher.append(('Dispatcher Version', uwsgi.version))
if not dispatcher and 'flup.server.fcgi' in sys.modules:
dispatcher.append(('Dispatcher', 'flup/fastcgi (threaded)'))
if not dispatcher and 'flup.server.fcgi_fork' in sys.modules:
dispatcher.append(('Dispatcher', 'flup/fastcgi (prefork)'))
if not dispatcher and 'flup.server.scgi' in sys.modules:
dispatcher.append(('Dispatcher', 'flup/scgi (threaded)'))
if not dispatcher and 'flup.server.scgi_fork' in sys.modules:
dispatcher.append(('Dispatcher', 'flup/scgi (prefork)'))
if not dispatcher and 'flup.server.ajp' in sys.modules:
dispatcher.append(('Dispatcher', 'flup/ajp (threaded)'))
if not dispatcher and 'flup.server.ajp_fork' in sys.modules:
dispatcher.append(('Dispatcher', 'flup/ajp (forking)'))
if not dispatcher and 'flup.server.cgi' in sys.modules:
dispatcher.append(('Dispatcher', 'flup/cgi'))
if not dispatcher and 'gunicorn' in sys.modules:
if 'gunicorn.workers.ggevent' in sys.modules:
dispatcher.append(('Dispatcher', 'gunicorn (gevent)'))
elif 'gunicorn.workers.geventlet' in sys.modules:
dispatcher.append(('Dispatcher', 'gunicorn (eventlet)'))
elif 'uvicorn.workers' in sys.modules:
dispatcher.append(('Dispatcher', 'gunicorn (uvicorn)'))
uvicorn = sys.modules.get('uvicorn')
if hasattr(uvicorn, '__version__'):
dispatcher.append(('Worker Version', uvicorn.__version__))
else:
dispatcher.append(('Dispatcher', 'gunicorn'))
gunicorn = sys.modules['gunicorn']
if hasattr(gunicorn, '__version__'):
dispatcher.append(('Dispatcher Version', gunicorn.__version__))
if not dispatcher and 'uvicorn' in sys.modules:
dispatcher.append(('Dispatcher', 'uvicorn'))
uvicorn = sys.modules['uvicorn']
if hasattr(uvicorn, '__version__'):
dispatcher.append(('Dispatcher Version', uvicorn.__version__))
if not dispatcher and 'tornado' in sys.modules:
dispatcher.append(('Dispatcher', 'tornado'))
tornado = sys.modules['tornado']
if hasattr(tornado, 'version_info'):
dispatcher.append(('Dispatcher Version',
str(tornado.version_info)))
env.extend(dispatcher)
# Module information.
plugins = []
# Using six to create create a snapshot of sys.modules can occassionally
# fail in a rare case when modules are imported in parallel by different
# threads. This is because list(six.iteritems(sys.modules)) results in
# list(iter(sys.modules.iteritems())), which means sys.modules could change
# between the time when the iterable is handed over from the iter() to
# list().
#
# TL;DR: Do NOT use six module for the following iteration.
for name, module in list(sys.modules.items()):
# If the module isn't actually loaded (such as failed relative imports
# in Python 2.7), the module will be None and should not be reported.
if module is None:
continue
if name.startswith('newrelic.hooks.'):
plugins.append(name)
elif name.find('.') == -1 and hasattr(module, '__file__'):
# XXX This is disabled as it can cause notable overhead in
# pathalogical cases. Will be replaced with a new system
# where have a whitelist of packages we really want version
# information for and will work out on case by case basis
# how to extract that from the modules themselves.
# try:
# if 'pkg_resources' in sys.modules:
# version = pkg_resources.get_distribution(name).version
# if version:
# name = '%s (%s)' % (name, version)
# except Exception:
# pass
plugins.append(name)
env.append(('Plugin List', plugins))
return env
|
the-stack_0_27537
|
import sqlite3
from key_value.help import key_value_invalid
conn = sqlite3.connect('db.sqlite3')
c=conn.cursor()
def initial():
c.execute('''CREATE TABLE key_value (key text, value text)''')
conn.commit()
def get(key):
key=(key,)
c.execute('''SELECT value FROM key_value WHERE key=?''',key)
try:
l=list(c.fetchone())
except:
print("Key Value Not Found !")
return False
# print(key[0]," - ",l[0])
return l[0]
def get_value(keys):
if len(keys)==0:
print("Sorry No Keys Found")
for i in keys:
out=get(i)
if out:
print(i," - ",out)
return True
def get_array(key,index):
try:
index=int(index)
except:
print("Sorry Index must be Integer")
return False
l=get(key)
if l:
l=list(l.strip().split(","))
if index>=len(l) or index<-len(l):
print("Index Out of Range")
return False
else:
print(key,"[",index,"]",l[index])
return l[index]
else:
print("Sorry Data Not Found !")
return False
def check_exists(key):
key=(key,)
c.execute('''SELECT value FROM key_value WHERE key=?''',key)
try:
l=list(c.fetchone())
return l[0]
except:
return False
def set_val(key,value):
if check_exists(key):
print("Key-Value Pair Already Exists !")
else:
m=(key,value,)
c.execute('''INSERT INTO key_value VALUES (?,?)''',m)
conn.commit()
def set_variable(pair):
set_val(pair[0],pair[1])
return True
def set_array(pair):
key=pair[0]
values=','.join(pair[1:])
set_val(key,values)
def set_value(pair):
# initial()
if len(pair)<2:
key_value_invalid()
elif len(pair)==2:
set_variable(pair)
elif len(pair)>2:
if pair[0]=="array" or pair[0]=="Array" or pair[0]=="ARRAY" or pair[0]=="list" or pair[0]=="List" or pair=="LIST":
set_array(pair[1:])
elif pair[0]=="stack" or pair[0]=="Stack" or pair[0]=="STACK":
set_array(pair[1:])
elif pair[0]=="queue" or pair[0]=="Queue" or pair[0]=="QUEUE":
set_array(pair[1:])
elif pair[0]=="variable":
set_variable(pair[1:])
else:
print("Sorry Wrong Data-Structure")
def push_array(key,values):
out=check_exists(key)
if out:
l=list(out.strip().split(","))
if len(values):
for i in values:
l.append(i)
s=','.join(l)
print(s)
update(key,s)
else:
print("Sorry Values Doesn't Exists")
else:
print("Key-Value Pair doesn't Exists")
def pop(keys,index):
if len(keys)==0:
print("Sorry Keys Not Found !")
return False
else:
try:
index=int(index)
except:
print("Index must be Integer")
return False
for i in keys:
out=check_exists(i)
if out:
l=list(out.strip().split(","))
if index>=len(l) or index<-len(l):
print("Index Out of Bounds")
else:
val=l[index]
l.pop(index)
print(i," - ",val)
s=','.join(l)
update(i,s)
else:
print("Key Not Found !")
return True
def update(key,value):
if check_exists(key):
key=(value,key,)
c.execute('''UPDATE key_value SET value = ? WHERE key = ?;''',key)
conn.commit()
else:
print("Key-Value Pair Doesn't Exists")
def delete(keys):
if len(keys)==0:
print("Sorry Keys Not Found !")
return False
else:
for key in keys:
if check_exists(key):
key5=(key,)
c.execute('''DELETE FROM key_value WHERE key = ?''',key5)
conn.commit()
else:
print("Key Not Found ! - ",key)
# print("Key-Value Pair Deleted !")
return True
|
the-stack_0_27538
|
"""
Class used to model NICMOS specific instrument data.
:Authors: Christopher Hanley, David Grumm, Megan Sosey
:License: :doc:`LICENSE`
"""
from stsci.tools import fileutil
from nictools import readTDD
import numpy as np
from .imageObject import imageObject
class NICMOSInputImage(imageObject):
SEPARATOR = '_'
def __init__(self, filename=None, output=None):
super().__init__(filename, output=output)
self.timeExt = 'TIME'
# define the cosmic ray bits value to use in the dq array
self.cr_bits_value = 4096
# Detector parameters, nic only has 1 detector in each file
self.full_shape = (256,256)
self._instrument=self._image['PRIMARY'].header["INSTRUME"]
self.native_units = 'COUNTS/S'
self.flatkey = 'FLATFILE'
for chip in range(1,self._numchips+1,1):
self._image[self.scienceExt,chip].cte_dir = 0 #no correction for nicmos
self._effGain = 1. #get the specific gain from the detector subclass
def _assignSignature(self, chip):
"""assign a unique signature for the image based
on the instrument, detector, chip, and size
this will be used to uniquely identify the appropriate
static mask for the image
this also records the filename for the static mask to the outputNames dictionary
"""
sci_chip = self._image[self.scienceExt,chip]
ny=sci_chip._naxis1
nx=sci_chip._naxis2
detnum = sci_chip.detnum
sig=(self.outroot,(nx,ny),int(detnum)) #signature is a tuple
sci_chip.signature=sig #signature is a tuple
def doUnitConversions(self):
"""Convert the data to electrons
This converts all science data extensions and saves
the results back to disk. We need to make sure
the data inside the chips already in memory is altered as well.
"""
# Image information
_handle = fileutil.openImage(self._filename, mode='readonly', memmap=False)
for det in range(1,self._numchips+1,1):
chip=self._image[self.scienceExt,det]
if chip._gain is not None:
#conversionFactor = (self.getExpTime() * self.getGain())
conversionFactor = chip._gain
if self.isCountRate():
conversionFactor *= chip._exptime
counts_str = 'COUNTS/S'
else:
counts_str = 'COUNTS'
# Multiply the values of the sci extension pixels by the gain.
print("Converting %s[%s,%d] from %s to ELECTRONS"%(self._filename,self.scienceExt,det,counts_str))
"""
# If the exptime is 0 the science image will be zeroed out.
np.multiply(_handle[self.scienceExt,det].data,conversionFactor,_handle[self.scienceExt,det].data)
#chip.data=_handle[self.scienceExt,det].data.copy()
# Set the BUNIT keyword to 'electrons'
chip.header.update('BUNIT','ELECTRONS')
_handle[0].header.update('BUNIT','ELECTRONS')
# Update the PHOTFLAM value
photflam = _handle[0].header['PHOTFLAM']
_handle[0].header.update('PHOTFLAM',(photflam/chip._gain))
chip._effGain = 1.0
"""
chip._effGain = chip._gain
chip._conversionFactor = conversionFactor
else:
msg = "Invalid gain value for data, no conversion done"
print(msg)
raise ValueError(msg)
# Close the files and clean-up
_handle.close()
self._effGain = conversionFactor #1.0
def _setchippars(self):
self._setDefaultReadnoise()
def getexptimeimg(self,chip):
"""
Return an array representing the exposure time per pixel for the detector.
Returns
-------
dark: array
Exposure time array in the same shape as the input image
"""
return self._image[self.timeExt,chip].data
def getflat(self, chip):
"""
Method for retrieving a detector's flat field.
Returns
-------
flat : array
The flat field array in the same shape as the input image with **units of cps**.
"""
# The reference flat field is inverted:
flat = 1.0 / super().getflat(chip)
return flat
def getdarkcurrent(self):
"""
Return the dark current for the NICMOS detectors.
Returns
-------
darkcurrent : float
Dark current value with **units of cps**.
"""
try:
darkcurrent = self._image[0].header['exptime'] * \
self._image[self.scienceExt,1]._darkrate
except:
str = "#############################################\n"
str += "# #\n"
str += "# Error: #\n"
str += "# Cannot find the value for 'EXPTIME' #\n"
str += "# in the image header. NICMOS input #\n"
str += "# images are expected to have this header #\n"
str += "# keyword. #\n"
str += "# #\n"
str += "#Error occured in the NICMOSInputImage class#\n"
str += "# #\n"
str += "#############################################\n"
raise ValueError(str)
return darkcurrent
def getdarkimg(self,chip):
"""
Return an array representing the dark image for the detector.
Returns
-------
dark : array
The dark array in the same shape as the image with **units of cps**.
"""
# Read the temperature dependeant dark file. The name for the file is taken from
# the TEMPFILE keyword in the primary header.
tddobj = readTDD.fromcalfile(self.name)
if tddobj is None:
return np.ones(self.full_shape, dtype=self.image_dtype) * self.getdarkcurrent()
else:
# Create Dark Object from AMPGLOW and Lineark Dark components
darkobj = tddobj.getampglow() + tddobj.getlindark()
# Return the darkimage taking into account an subarray information available
return darkobj[self.ltv2:self.size2,self.ltv1:self.size1]
def isCountRate(self):
"""
isCountRate: Method or IRInputObject used to indicate if the
science data is in units of counts or count rate. This method
assumes that the keyword 'BUNIT' is in the header of the input
FITS file.
"""
has_bunit = False
if 'BUNIT' in self._image['sci',1].header :
has_bunit = True
countrate = False
if (self._image[0].header['UNITCORR'].strip() == 'PERFORM') or \
(has_bunit and self._image['sci',1].header['bunit'].find('/') != -1) :
countrate = True
return countrate
class NIC1InputImage(NICMOSInputImage):
def __init__(self, filename=None, output=None):
super().__init__(filename, output=output)
self._effGain = 1. #get the gain from the detector subclass
self._detector = self._image["PRIMARY"].header["CAMERA"]
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.08 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate / self._effGain # DN/s
return _darkrate
def _getDefaultReadnoise(self):
""" This could be updated to calculate the readnoise from the NOISFILE.
"""
_rdnoise = 26.0 # electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise / self._effGain # ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(instrpars['rdnoise'], pri_header,
instrpars['rnkeyword'])
chip._exptime = self.getInstrParameter(instrpars['exptime'], pri_header,
instrpars['expkeyword'])
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
self._assignSignature(chip._chip) #this is used in the static mask, static mask name also defined here, must be done after outputNames
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
class NIC2InputImage(NICMOSInputImage):
def __init__(self,filename=None, output=None):
super().__init__(filename, output=output)
self._effGain=1. #measured
self._detector=self._image["PRIMARY"].header["CAMERA"]
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.08 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate / self._effGain # DN/s
return _darkrate
def _getDefaultReadnoise(self):
_rdnoise = 26.0 #electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise/self._effGain #ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
# this is used in the static mask, static mask name also defined
# here, must be done after outputNames
self._assignSignature(chip._chip)
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
def createHoleMask(self):
"""Add in a mask for the coronographic hole to the general static
pixel mask. """
pass
class NIC3InputImage(NICMOSInputImage):
def __init__(self, filename=None, output=None):
super().__init__(filename, output=output)
self._detector=self._image["PRIMARY"].header["CAMERA"] #returns 1,2,3
self._effGain = 1.
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.15 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate/self._effGain #DN/s
return _darkrate
def _getDefaultReadnoise(self):
_rdnoise = 29.0 # electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise/self._effGain #ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN'
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 6.5 #measured gain
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
self._assignSignature(chip._chip) #this is used in the static mask, static mask name also defined here, must be done after outputNames
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
|
the-stack_0_27539
|
from __future__ import unicode_literals
import datetime
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponsePermanentRedirect, JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from ..cart.utils import set_cart_cookie
from ..core.utils import get_paginator_items, serialize_decimal
from ..settings import PAGINATE_BY
from .filters import DEFAULT_SORT, ProductFilter, SORT_BY_FIELDS
from .models import Category
from .utils import (products_with_details, products_for_cart,
handle_cart_form, get_availability,
get_product_images, get_variant_picker_data,
get_product_attributes_data,
product_json_ld, products_with_availability)
def product_details(request, slug, product_id, form=None):
"""Product details page
The following variables are available to the template:
product:
The Product instance itself.
is_visible:
Whether the product is visible to regular users (for cases when an
admin is previewing a product before publishing).
form:
The add-to-cart form.
price_range:
The PriceRange for the product including all discounts.
undiscounted_price_range:
The PriceRange excluding all discounts.
discount:
Either a Price instance equal to the discount value or None if no
discount was available.
local_price_range:
The same PriceRange from price_range represented in user's local
currency. The value will be None if exchange rate is not available or
the local currency is the same as site's default currency.
"""
products = products_with_details(user=request.user)
product = get_object_or_404(products, id=product_id)
if product.get_slug() != slug:
return HttpResponsePermanentRedirect(product.get_absolute_url())
today = datetime.date.today()
is_visible = (
product.available_on is None or product.available_on <= today)
if form is None:
form = handle_cart_form(request, product, create_cart=False)[0]
availability = get_availability(product, discounts=request.discounts,
local_currency=request.currency)
template_name = 'product/details_%s.html' % (
type(product).__name__.lower(),)
templates = [template_name, 'product/details.html']
product_images = get_product_images(product)
variant_picker_data = get_variant_picker_data(
product, request.discounts, request.currency)
product_attributes = get_product_attributes_data(product)
show_variant_picker = all([v.attributes for v in product.variants.all()])
json_ld_data = product_json_ld(product, availability, product_attributes)
return TemplateResponse(
request, templates,
{'is_visible': is_visible,
'form': form,
'availability': availability,
'product': product,
'product_attributes': product_attributes,
'product_images': product_images,
'show_variant_picker': show_variant_picker,
'variant_picker_data': json.dumps(
variant_picker_data, default=serialize_decimal),
'json_ld_product_data': json.dumps(
json_ld_data, default=serialize_decimal)})
def product_add_to_cart(request, slug, product_id):
# types: (int, str, dict) -> None
if not request.method == 'POST':
return redirect(reverse(
'product:details',
kwargs={'product_id': product_id, 'slug': slug}))
products = products_for_cart(user=request.user)
product = get_object_or_404(products, pk=product_id)
form, cart = handle_cart_form(request, product, create_cart=True)
if form.is_valid():
form.save()
if request.is_ajax():
response = JsonResponse({'next': reverse('cart:index')}, status=200)
else:
response = redirect('cart:index')
else:
if request.is_ajax():
response = JsonResponse({'error': form.errors}, status=400)
else:
response = product_details(request, slug, product_id, form)
if not request.user.is_authenticated:
set_cart_cookie(cart, response)
return response
def category_index(request, path, category_id):
category = get_object_or_404(Category, id=category_id)
actual_path = category.get_full_path()
if actual_path != path:
return redirect('product:category', permanent=True, path=actual_path,
category_id=category_id)
products = (products_with_details(user=request.user)
.filter(categories__name=category)
.order_by(DEFAULT_SORT))
product_filter = ProductFilter(
request.GET, queryset=products, category=category)
products_paginated = get_paginator_items(
product_filter.qs, PAGINATE_BY, request.GET.get('page'))
products_and_availability = list(products_with_availability(
products_paginated, request.discounts, request.currency))
sort_by = request.GET.get('sort_by', DEFAULT_SORT)
ctx = {'category': category, 'filter': product_filter,
'products': products_and_availability,
'products_paginated': products_paginated,
'sort_by_choices': SORT_BY_FIELDS,
'sort_by_label': sort_by.strip('-'),
'is_descending': sort_by.startswith('-')}
return TemplateResponse(request, 'category/index.html', ctx)
|
the-stack_0_27542
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2019-2020, NeXpy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING, distributed with this software.
#-----------------------------------------------------------------------------
import argparse
from nexusformat import __version__
def main():
parser = argparse.ArgumentParser(
description="Determine version number of nexusformat API")
parser.add_argument('-v', '--version', action='version',
version='nexusformat v%s' % __version__)
parser.parse_args(['--version'])
if __name__ == "__main__":
main()
|
the-stack_0_27543
|
# This file is part of the General Standards Digitizer Driver.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. No part
# of the General Standards Digitizer Driver, including this file, may be copied,
# modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
from __future__ import print_function
import os
import argparse
INIT_CHANNELS_FILE = 'genStdsInitChannels.cmd'
LOAD_CHANNELS_DB_FILE = 'genStdsLoadChannelsDb.cmd'
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--num-channels', type=int, default=64, help='Number of channels to generate')
args = parser.parse_args()
dir_path = os.path.dirname(os.path.realpath(__file__))
channels = [i for i in range(args.num_channels)]
with open(os.path.join(dir_path, INIT_CHANNELS_FILE), 'w') as f:
f.write("\n# Initialize the stdArrays plugins.\n")
for channel in channels:
f.write('NDStdArraysConfigure("$(DEVICE_NAME)_ch{0:}_stdarrays", $(STDARRAYS_QUEUE_SIZE), $(STDARRAYS_BLOCKING_CALLBACKS), "$(DEVICE_NAME)_channels", {0:}, $(STDARRAYS_MAX_MEMORY), $(STDARRAYS_PRIORITY), $(STDARRAYS_STACK_SIZE))\n'.format(channel))
with open(os.path.join(dir_path, LOAD_CHANNELS_DB_FILE), 'w') as f:
f.write("# Load records for each chanel.\n")
for channel in channels:
f.write('dbLoadRecords("$(TR_CORE)/db/TRChannel.db", "PREFIX=$(PREFIX):CH{0:}, CHANNELS_PORT=$(DEVICE_NAME)_channels, CHANNEL={0:}")\n'
.format(channel))
f.write('dbLoadRecords("$(TR_CORE)/db/TRChannelData.db", "PREFIX=$(PREFIX):CH{0:}, STDAR_PORT=$(DEVICE_NAME)_ch{0:}_stdarrays, SIZE=$(WAVEFORM_SIZE), SNAP_SCAN=$(SNAP_SCAN)")\n'
.format(channel))
if __name__ == '__main__':
main()
|
the-stack_0_27546
|
"""Command to create a sample email"""
import email.utils
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.db import transaction
from stored_mail import models
class Command(BaseCommand):
help = 'Create a sample email'
def add_arguments(self, parser) -> None:
parser.add_argument('from_address')
parser.add_argument('to_address')
parser.add_argument('--count', type=int, default=1)
def handle(self, *args, **options):
from_name, from_address = email.utils.parseaddr(options['from_address'])
to_name, to_address = email.utils.parseaddr(options['to_address'])
count = options['count']
now = timezone.now()
with transaction.atomic():
for i in range(count):
outgoing = models.OutgoingMessage()
outgoing.from_address = from_address
outgoing.from_name = from_name
outgoing.subject = "Test message"
outgoing.text = f"This is a test message generated at {now}, sequence {i+1} of {count}"
outgoing.html = f"<body><div>This is a <em>test</em> message generated at {now}, sequence {i+1} of {count}</div></body>"
outgoing.save()
send_addr = models.RelatedAddress()
send_addr.message = outgoing
send_addr.field = 'to'
send_addr.name = to_name
send_addr.address = to_address
send_addr.save()
|
the-stack_0_27549
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
if sys.platform == "win32":
# Installer, yum, pip installs openvino dlls to the different directories
# and those paths need to be visible to the openvino modules
#
# If you're using a custom installation of openvino,
# add the location of openvino dlls to your system PATH.
#
# looking for the libs in the pip installation path by default.
openvino_libs = [os.path.join(os.path.dirname(__file__), '..', '..', 'openvino', 'libs')]
# setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable.
openvino_libs_installer = os.getenv('OPENVINO_LIB_PATHS')
if openvino_libs_installer:
openvino_libs.extend(openvino_libs_installer.split(';'))
for lib in openvino_libs:
lib_path = os.path.join(os.path.dirname(__file__), lib)
if os.path.isdir(lib_path):
# On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH.
if (3, 8) <= sys.version_info:
os.add_dll_directory(os.path.abspath(lib_path))
else:
os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"]
from .offline_transformations_api import *
__all__ = ['ApplyMOCTransformations']
|
the-stack_0_27552
|
#!/usr/bin/env python3
import os
from parsers import read_smartctl
from parsers import read_decode_dimms
from parsers import read_dmidecode
from parsers import read_lspci_and_glxinfo
from parsers import read_lscpu
filedir = "tests/2018-castes-mbp/"
def test_lspci():
expect = {
"type": "graphics-card",
"working": "yes",
"brand-manufacturer": "AMD/ATI",
"brand": "Apple Inc. Radeon Pro 560X",
"internal-name": "",
"model": "Radeon RX 460/560D / Pro 450/455/460/555/555X/560/560X",
"capacity-byte": 4294967296,
}
output = read_lspci_and_glxinfo.read_lspci_and_glxinfo(
True, os.path.join(filedir, "lspci.txt"), os.path.join(filedir, "glxinfo.txt")
)
assert output == expect
def test_lscpu():
expect = {
"type": "cpu",
"working": "yes",
"isa": "x86-64",
"model": "Core i7-8750H",
"brand": "Intel",
"core-n": 6,
"thread-n": 12,
"frequency-hertz": 2200000000,
}
output = read_lscpu.read_lscpu(os.path.join(filedir, "lscpu.txt"))
assert output == expect
def test_ram():
output = read_decode_dimms.read_decode_dimms(os.path.join(filedir, "dimms.txt"))
assert len(output) == 0
def test_baseboard():
expect = {
"type": "motherboard",
"working": "yes",
"brand": "Apple Inc.",
"model": "Mac-937A206F2EE63C01",
"sn": "C0290440002JP5P1T",
}
output = read_dmidecode.get_baseboard(os.path.join(filedir, "baseboard.txt"))
assert output == expect
def test_connector():
baseboard = read_dmidecode.get_baseboard(os.path.join(filedir, "baseboard.txt"))
expect = {
"type": "motherboard",
"working": "yes",
"brand": "Apple Inc.",
"model": "Mac-937A206F2EE63C01",
"sn": "C0290440002JP5P1T",
"usb-ports-n": 2,
"mini-jack-ports-n": 1,
"thunderbolt-ports-n": 1,
"notes": "",
}
output = read_dmidecode.get_connectors(
os.path.join(filedir, "connector.txt"), baseboard
)
assert output == expect
def test_chassis():
expect = {
"brand": "Apple Inc.",
"sn": "CENSORED",
"type": "case",
"motherboard-form-factor": "proprietary-laptop",
"model": "",
}
output = read_dmidecode.get_chassis(os.path.join(filedir, "chassis.txt"))
assert output == expect
def test_smartctl():
expect = []
output = read_smartctl.read_smartctl(filedir)
assert output == expect
|
the-stack_0_27553
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
from spack.architecture import Platform, Target
from spack.operating_systems.mac_os import MacOs
class Darwin(Platform):
priority = 89
front_end = 'x86_64'
back_end = 'x86_64'
default = 'x86_64'
def __init__(self):
super(Darwin, self).__init__('darwin')
self.add_target(self.default, Target(self.default))
mac_os = MacOs()
self.default_os = str(mac_os)
self.front_os = str(mac_os)
self.back_os = str(mac_os)
self.add_operating_system(str(mac_os), mac_os)
@classmethod
def detect(cls):
return 'darwin' in platform.system().lower()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.