ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40eb6a480acfe779b3d256afb7fc239b53bc776 | #!/usr/bin/env python2.7
import datetime
import json
import os
import sys
import urllib
from textwrap import dedent
import pytz
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
import udf
from udf import useData
class CJSON(udf.TestCase):
def setUp(self):
self.query('DROP SCHEMA t1 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA t1')
self.query(dedent('''\
CREATE EXTERNAL SCALAR SCRIPT
cjson_decode(json VARCHAR(10000))
RETURNS VARCHAR(10000) AS
# redirector @@redirector_url@@
import json
import cjson
def run(ctx):
return json.dumps(cjson.decode(ctx.json))
'''))
self.query(dedent('''\
CREATE EXTERNAL SCALAR SCRIPT
cjson_encode(json VARCHAR(10000))
RETURNS VARCHAR(10000) AS
# redirector @@redirector_url@@
import json
import cjson
def run(ctx):
return cjson.encode(json.loads(ctx.json))
'''))
def test_decode_empty_list(self):
rows = self.query('SELECT cjson_decode(?) FROM DUAL', '[]')
self.assertRowsEqual([('[]',)], rows)
def test_encode_empty_list(self):
rows = self.query('SELECT cjson_encode(?) FROM DUAL', '[]')
self.assertRowsEqual([('[]',)], rows)
@staticmethod
def nested():
return [
[1, 2, 3.3, -4.5e10],
{"a": "A", "b": "B"},
[],
{},
{"a": [1,2,3,4], "x": ["a", "b", "c"]},
False,
True,
None,
]
def test_decode_structured_data(self):
data = json.dumps(self.nested())
rows = self.query('SELECT cjson_decode(?) FROM DUAL', data)
self.assertRowsEqual([(data,)], rows)
def test_encode_structured_data(self):
data = json.dumps(self.nested())
rows = self.query('SELECT cjson_encode(?) FROM DUAL', data)
self.assertRowsEqual([(data,)], rows)
class Numpy(udf.TestCase):
def setUp(self):
self.query('DROP SCHEMA t1 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA t1')
@useData((x,) for x in (3, 30, 300))
def test_numpy_inverse(self, dim):
self.query(dedent('''\
CREATE EXTERNAL SCALAR SCRIPT
numpy(dim INTEGER)
RETURNS boolean AS
# redirector @@redirector_url@@
from numpy import *
from numpy.linalg import inv
from numpy.random import seed, random_sample
def run(ctx):
dim = ctx.dim
seed(12345678 * dim)
A = random_sample((dim, dim))
Ai = inv(A)
R = dot(A, Ai) - identity(dim)
return bool(-1e-12 <= R.min() <= R.max() <= 1e-12)
'''))
rows = self.query('SELECT numpy(?) FROM dual', dim)
self.assertRowsEqual([(True,)], rows)
class Pytz(udf.TestCase):
def setUp(self):
self.query('DROP SCHEMA t1 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA t1')
timezones = '''
America/Manaus
Asia/Katmandu
Asia/Tokyo
Asia/Yerevan
Europe/Berlin
'''.split()
@useData((tz,) for tz in timezones)
def test_convert(self, tz):
self.query(dedent('''\
CREATE EXTERNAL SCALAR SCRIPT
tz_convert_py(dt TIMESTAMP, tz VARCHAR(100))
RETURNS TIMESTAMP AS
# redirector @@redirector_url@@
import pytz
def run(ctx):
tz = pytz.timezone(ctx.tz)
dt_utc = ctx.dt.replace(tzinfo=pytz.utc)
dt = dt_utc.astimezone(tz)
return dt.replace(tzinfo=None)
'''))
dt = datetime.datetime(2012, 4, 3, 23, 59, 0)
rows = self.query('SELECT tz_convert_py(?, ?) FROM dual', (dt, tz))
converted = dt.replace(tzinfo=pytz.utc).astimezone(pytz.timezone(tz))
self.assertRowsEqual([(converted.replace(tzinfo=None),)], rows)
if __name__ == '__main__':
udf.main()
# vim: ts=4:sts=4:sw=4:et:fdm=indent
|
py | b40eb6eceafe292f86e5d062fd0994285e63df4b | """
Basic example on how to generate a package documentation
========================================================
A simple example of how to document a Python package, here 'pysphinxdoc'.
"""
# sphinx_gallery_thumbnail_path = "_static/pysphinxdoc.png"
import subprocess
#############################################################################
# Generate the rst auto documentation
# -----------------------------------
cmd = ["sphinxdoc", "-v", "2", "-p", "$HOME/git/pysphinxdoc", "-n",
"pysphinxdoc", "-o", "$HOME/git/pysphinxdoc/doc"]
subprocess.check_call(cmd)
#############################################################################
# Compute the html documentation
# ------------------------------
cmd = ["make", "raw-html"]
subprocess.check_call(cmd, cwd="$HOME/git/pysphinxdoc/doc")
|
py | b40eb7182d554e226afd81c671b9da52da0c5741 | import cv2
def trimming(input_path: str, output_path: str, top: int, bottom: int, left: int, right: int) -> None:
img = cv2.imread(input_path)
trimmed_img = img[top : bottom, left: right]
cv2.imwrite(output_path, trimmed_img)
return
|
py | b40eb7f475b0cc434eeb9328abc5fb8161c22126 | #!/usr/bin/env python2
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import argparse
import logging
import operator
import os
import sys
from collections import defaultdict
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(pathname)s:%(lineno)s - "
"%(funcName)s - %(levelname)s ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# If you supply the <lang> directory (the one that corresponds to
# how you decoded the data) to this script, it assumes that the <lang>
# directory contains phones/align_lexicon.int, and it uses this to work
# out a reasonable guess of the non-scored phones, based on which have
# a single-word pronunciation that maps to a silence phone.
# It then uses the words.txt to work out the written form of those words.
parser = argparse.ArgumentParser(
description = "This program works out a reasonable guess at a list of "
"non-scored words (words that won't affect the WER evaluation): "
"things like [COUGH], [NOISE] and so on. This is useful because a list of "
"such words is required by some other scripts (e.g. modify_ctm_edits.py), "
"and it's inconvenient to have to specify the list manually for each language. "
"This program writes out the words in text form, one per line.")
parser.add_argument("lang", type = str,
help = "The lang/ directory. This program expects "
"lang/words.txt and lang/phones/silence.int and "
"lang/phones/align_lexicon.int to exist, and will use them to work "
"out a reasonable guess of the non-scored words (as those whose "
"pronunciations are a single phone in the 'silphones' list)")
args = parser.parse_args()
non_scored_words = set()
def read_lang(lang_dir):
global non_scored_words
if not os.path.isdir(lang_dir):
logger.error("expected lang/ directory %s to "
"exist.", lang_dir)
raise RuntimeError
for f in [ '/words.txt', '/phones/silence.int', '/phones/align_lexicon.int' ]:
if not os.path.exists(lang_dir + f):
logger.error("expected file %s%s to exist.", lang_dir, f)
raise RuntimeError
# read silence-phones.
try:
silence_phones = set()
for line in open(lang_dir + '/phones/silence.int').readlines():
silence_phones.add(int(line))
except Exception:
logger.error("problem reading file "
"%s/phones/silence.int", lang_dir)
raise
# read align_lexicon.int.
# format is: <word-index> <word-index> <phone-index1> <phone-index2> ..
# We're looking for line of the form:
# w w p
# where w > 0 and p is in the set 'silence_phones'
try:
silence_word_ints = set()
for line in open(lang_dir + '/phones/align_lexicon.int').readlines():
a = line.split()
if len(a) == 3 and a[0] == a[1] and int(a[0]) > 0 and \
int(a[2]) in silence_phones:
silence_word_ints.add(int(a[0]))
except Exception:
logger.error("problem reading file %s/phones/align_lexicon.int",
lang_dir)
raise
try:
for line in open(lang_dir + '/words.txt').readlines():
[ word, integer ] = line.split()
if int(integer) in silence_word_ints:
non_scored_words.add(word)
except Exception:
logger.error("problem reading file %s/words.txt.int", lang_dir)
raise
if not len(non_scored_words) == len(silence_word_ints):
raise RuntimeError("error getting silence words, len({0}) != len({1})"
"".format(non_scored_words, silence_word_ints))
for word in non_scored_words:
print(word)
read_lang(args.lang)
|
py | b40eb851ee9021be4d5f6cc1d3d240b8a9cba865 | import os
import glob
import pickle
import re
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import the project utils
import sys
sys.path.insert(0, '../')
import NB_sortseq_utils as utils
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from IPython.core.pylabtools import figsize
# Seaborn, useful for graphics
import seaborn as sns
sns.set_palette("deep", color_codes=True)
utils.set_plotting_style1()
#===============================================================================
# Set output directory
#===============================================================================
output = 'output_figs/'
#===============================================================================
# Read the data
#===============================================================================
df = pd.read_csv('../flow/20150209_flow_hist.csv', comment='#')
# histogram # 1 details
date = 20150209
promoter = 'relB'
strain = 'MG1655'
region = 'na'
bin = 'na'
sequence = 'wild-type'
media = 'M9glucose'
condition = 'na'
df_wt_hist1 = df[(df.date == date) & (df.promoter == promoter) \
& (df.strain == strain) & (df.region == region) \
& (df.sequence == sequence) \
& (df.media == media) & (df.condition == condition)]
# histogram # 1 details
date = 20150209
promoter = 'relB'
strain = 'MG1655'
region = 'mut1'
bin = 'na'
sequence = 'library'
media = 'M9glucose'
condition = 'na'
df_lib_hist1 = df[(df.date == date) & (df.promoter == promoter) \
& (df.strain == strain) & (df.region == region) \
& (df.sequence == sequence) \
& (df.media == media) & (df.condition == condition)]
df = pd.read_csv('../flow/20150210_flow_hist.csv', comment='#')
# histogram # 1 details
date = 20150210
promoter = 'relB'
strain = 'MG1655'
region = 'mut1'
bin = 1
sequence = 'library'
media = 'M9glucose'
condition = 'na'
df_hist1 = df[(df.date == date) & (df.promoter == promoter) \
& (df.strain == strain) & (df.region == region) \
& (df.bin == bin) & (df.sequence == sequence) \
& (df.media == media) & (df.condition == condition)]
# histogram # 1 details
date = 20150210
promoter = 'relB'
strain = 'MG1655'
region = 'mut1'
bin = 2
sequence = 'library'
media = 'M9glucose'
condition = 'na'
df_hist2 = df[(df.date == date) & (df.promoter == promoter) \
& (df.strain == strain) & (df.region == region) \
& (df.bin == bin) & (df.sequence == sequence) \
& (df.media == media) & (df.condition == condition)]
# histogram # 1 details
date = 20150210
promoter = 'relB'
strain = 'MG1655'
region = 'mut1'
bin = 3
sequence = 'library'
media = 'M9glucose'
condition = 'na'
df_hist3 = df[(df.date == date) & (df.promoter == promoter) \
& (df.strain == strain) & (df.region == region) \
& (df.bin == bin) & (df.sequence == sequence) \
& (df.media == media) & (df.condition == condition)]
# histogram # 1 details
date = 20150210
promoter = 'relB'
strain = 'MG1655'
region = 'mut1'
bin = 4
sequence = 'library'
media = 'M9glucose'
condition = 'na'
df_hist4 = df[(df.date == date) & (df.promoter == promoter) \
& (df.strain == strain) & (df.region == region) \
& (df.bin == bin) & (df.sequence == sequence) \
& (df.media == media) & (df.condition == condition)]
#===============================================================================
# identify gate windows (approximate)
#===============================================================================
df_hist1['cum_sum'] = df_hist1.fraction.cumsum()
#===============================================================================
# plot the data
#===============================================================================
palette = sns.color_palette()
fig1 = plt.figure(figsize = (5,4))
plt.semilogx(df_hist1['fluorescence'], df_hist1['fraction'], linewidth=2,
label=r'$relB$ library', alpha = 1, color = '#EFF6EF')
plt.fill_between(df_hist1['fluorescence'], df_hist1['fraction'], 0, color = '#EFF6EF')
plt.semilogx(df_hist2['fluorescence'], df_hist2['fraction'], linewidth=2,
label=r'$relB$ wild-type', alpha = 1, color = '#C6E1CD')
plt.fill_between(df_hist2['fluorescence'], df_hist2['fraction'], 0, color = '#C6E1CD')
plt.semilogx(df_hist3['fluorescence'], df_hist3['fraction'], linewidth=2,
label=r'$relB$ wild-type', alpha = 1, color = '#A0CCA9')
plt.fill_between(df_hist3['fluorescence'], df_hist3['fraction'], 0, color = '#A0CCA9')
plt.semilogx(df_hist4['fluorescence'], df_hist4['fraction'], linewidth=2,
label=r'$relB$ wild-type', alpha = 1, color = '#67AC74')
plt.fill_between(df_hist4['fluorescence'], df_hist4['fraction'], 0, color = '#67AC74')
plt.semilogx(df_lib_hist1['fluorescence'], df_lib_hist1['fraction'], linewidth=2,
label=r'$relB$ lib', alpha = 0.4, color = 'k', ls='--')
# plt.yticks([])
plt.xlim(1,1E5)
plt.ylim(bottom=0)
plt.xlabel('Fluorescence (a.u.)')
plt.ylabel('Frequency')
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
fig1.savefig(output + 'fig1_relB_histograms.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
|
py | b40eb85af269c980f6de806582be4928ae5db9bb | # -*- coding: utf-8 -*-
import unittest
from geeksforgeeks import *
class TestGeeksForGeeks(unittest.TestCase):
def test_towers_holding_water(self):
heights = [1, 5, 3, 7, 2]
expected = 2
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'one small gap')
heights = [5, 1, 2, 3, 4]
expected = 6
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'large ascending gap')
heights = [5, 4, 3, 2, 1, 6]
expected = 10
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'large descending gap')
heights = [1, 3, 4, 2]
expected = 0
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'no gap')
heights = [5, 3, 3, 3, 4]
expected = 3
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'large gap, multiple minima')
heights = [5, 5, 5, 3, 4, 4]
expected = 1
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'large gap, multiple minima')
heights = [5, 1, 2, 3, 2, 1, 4]
expected = 11
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'large gap, both descending and ascending')
heights = [5, 1, 5, 2, 5, 3, 5]
expected = 9
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'multiple small gaps')
heights = [4, 2, 3, 1, 2, 1, 2, 1, 3, 2, 4]
expected = 19
actual = towers_holding_water(heights)
self.assertEqual(expected, actual, 'multiple small gaps')
def test_largest_group_of_intersecting_intervals(self):
intervals = [(1,2), (3,4), (5,6), (7,8)]
expected = (1,2)
actual = largest_group_of_intersecting_intervals(intervals)
self.assertEqual(actual, expected, 'should produce the correct intersection')
intervals = [(1,3), (2,4), (5,7), (7,8)]
expected = (2,3)
actual = largest_group_of_intersecting_intervals(intervals)
self.assertEqual(actual, expected, 'should produce the correct intersection')
intervals = [(1,4), (3,6), (5,8)]
expected = (3,4)
actual = largest_group_of_intersecting_intervals(intervals)
self.assertEqual(actual, expected, 'should produce the correct intersection')
intervals = [(1,5), (2,6), (3,7), (6,8)]
expected = (3,5)
actual = largest_group_of_intersecting_intervals(intervals)
self.assertEqual(actual, expected, 'should produce the correct intersection')
def test_nearest_smallest_left_element(self):
arr = [1, 6, 4, 10, 2, 5]
expected = [None, 1, 1, 4, 1, 2]
actual = nearest_smallest_left_element(arr)
self.assertEqual(expected, actual)
arr = [1, 3, 0, 2, 5]
expected = [None, 1, None, 0, 2]
actual = nearest_smallest_left_element(arr)
self.assertEqual(expected, actual)
arr = [5, 4, 3, 2, 1]
expected = [None, None, None, None, None]
actual = nearest_smallest_left_element(arr)
self.assertEqual(expected, actual)
arr = [0, 4, 3, 2, 1]
expected = [None, 0, 0, 0, 0]
actual = nearest_smallest_left_element(arr)
self.assertEqual(expected, actual)
arr = [1, 2, 3, 4, 5]
expected = [None, 1, 2, 3, 4]
actual = nearest_smallest_left_element(arr)
self.assertEqual(expected, actual)
arr = [1, 5, 5, 5, 5]
expected = [None, 1, 1, 1, 1]
actual = nearest_smallest_left_element(arr)
self.assertEqual(expected, actual)
arr = [3, 2, 1, 0, 0]
expected = [None, None, None, None, None]
actual = nearest_smallest_left_element(arr)
self.assertEqual(expected, actual)
def test_max_fruit_gathered_by_birds(self):
fruits = [8, 1, 2, 7, 0]
time = 3
actual = max_fruit_gathered_by_birds(fruits, time)
expected = [7, 0, 8]
self.assertEqual(actual, expected, 'should compute the correct sum')
fruits = [8, 1, 2, 7, 0]
time = 10
actual = max_fruit_gathered_by_birds(fruits, time)
expected = [8, 1, 2, 7, 0]
self.assertEqual(actual, expected, 'should compute the correct sum')
fruits = range(100)
time = 10
actual = max_fruit_gathered_by_birds(fruits, time)
expected = range(90,100)
self.assertEqual(actual, expected, 'should compute the correct sum')
def test_is_interval_overlap(self):
intervals = [[1,2], [4,5], [6,7]]
actual = is_interval_overlap(intervals)
self.assertFalse(actual, 'no overlapping between intervals')
intervals = [[2,3], [1,2], [3,4]]
actual = is_interval_overlap(intervals)
self.assertFalse(actual, 'no overlapping between intervals')
intervals = [[2,4], [3,5], [1,3]]
actual = is_interval_overlap(intervals)
self.assertTrue(actual, 'found overlapping intervals')
def test_dot_product(self):
v1 = [0,0,0,0,0,0,1,1,0,0,0]
v1_compressed = [0,6,1,2,0,3]
v2 = [0,0,0,0,0,0,0,1,1,0,0]
v2_compressed = [0,7,1,2,0,2]
actual = dot_product(v1_compressed, v2_compressed)
self.assertEqual(actual, 1, 'should produce the correct dot product')
def test_find_possible_interpretations(self):
expected = [['a', 'a'], ['k']]
actual = find_possible_interpretations([1,1])
self.assertItemsEqual(actual, expected, 'correct result for [1,1]')
expected = [['a', 'b', 'a'], ['a', 'u'], ['l', 'a']]
actual = find_possible_interpretations([1,2,1])
self.assertItemsEqual(actual, expected), 'correct result for [1,2,1]'
expected = [['i', 'a', 'h'], ['i', 'r']]
actual = find_possible_interpretations([9,1,8])
self.assertItemsEqual(actual, expected), 'correct result for [9,1,8]'
expected = [['c', 'f', 'c', 'f', 'c', 'f']]
actual = find_possible_interpretations([3,6,3,6,3,6])
self.assertItemsEqual(actual, expected), 'correct result for [9,1,8]'
def test_check_palindrome(self):
sample = 'race car'
actual = check_palindrome(sample)
self.assertTrue(actual, 'if you ignore spaces it is a palindrome')
sample = 'alexela'
actual = check_palindrome(sample)
self.assertTrue(actual, 'if you ignore spaces it is a palindrome')
sample = 'alex xela '
actual = check_palindrome(sample)
self.assertTrue(actual, 'if you ignore spaces it is a palindrome')
sample = 'alexs zxela '
actual = check_palindrome(sample)
self.assertFalse(actual, 'if you ignore spaces it is a palindrome')
@unittest.skip("multiply_vectors() is not fully implemented yet")
def test_multiply_vectors(self):
s1 = '11'
s2 = '11'
expected = '121'
actual = multiply_vectors(s1, s2)
self.assertEqual(actual, expected, 'should product the correct output')
s1 = '11111111'
s2 = '11111111'
expected = '123456787654321'
actual = multiply_vectors(s1, s2)
self.assertEqual(actual, expected, 'should product the correct output')
s1 = '99'
s2 = '99'
expected = '9801'
actual = multiply_vectors(s1, s2)
self.assertEqual(actual, expected, 'should product the correct output')
s1 = '99'
s2 = '999'
expected = '98901'
actual = multiply_vectors(s1, s2)
self.assertEqual(actual, expected, 'should product the correct output')
|
py | b40eb984de6238aee128cd865944342581d90203 | from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source
from jesse.helpers import slice_candles
def sma(candles: np.ndarray, period: int = 5, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
SMA - Simple Moving Average
:param candles: np.ndarray
:param period: int - default: 5
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if len(candles.shape) == 1:
source = candles
else:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
res = talib.SMA(source, timeperiod=period)
return res if sequential else res[-1]
|
py | b40eba2a36bcc890ccbf522046e35bf3fc0b8912 | """
Copyright Government of Canada 2015-2020
Written by: National Microbiology Laboratory, Public Health Agency of Canada
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this work except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
class SampleFile:
"""A representation of a sample file obtained from IRIDA"""
def __init__(self, name, path):
"""
Create a sample file instance.
:type name: str
:param name: the name of the sample file
:type path: str
:param path: the URI of the sample file
"""
self.path = path
self.name = name
self.library_dataset_id = None
self.verified = False
def __eq__(self, sample_file):
equal = False
try:
if self.path == sample_file.path:
equal = True
except AttributeError:
pass
return equal
def __repr__(self):
return self.name + " @ " + self.path
def state(self, gi, library_id):
return gi.libraries.show_dataset(library_id, self.library_dataset_id)['state']
def delete(self, gi, library_id):
return gi.libraries.delete_library_dataset(library_id, self.library_dataset_id, purged=True)['deleted']
|
py | b40ebaee9c810eda22004dd4d53f45c2bacb1d77 | # App 9 real estate analysis
|
py | b40ebccf089da7c96a5ac8fcb0b499a15271f135 | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base DatasetBuilderTestCase to test a DatasetBuilder base class."""
import hashlib
import itertools
import numbers
import os
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import dataset_info
from tensorflow_datasets.core import dataset_utils
from tensorflow_datasets.core import download
from tensorflow_datasets.core import load
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.download import checksums
from tensorflow_datasets.core.utils import tf_utils
from tensorflow_datasets.testing import test_utils
# `os` module Functions for which tf.io.gfile equivalent should be preferred.
FORBIDDEN_OS_FUNCTIONS = (
"chmod",
"chown",
"link",
"listdir",
"lstat",
"makedirs",
"mkdir",
"mknod",
"open",
"pathconf",
"readlink",
"remove",
"removedirs",
"rename",
"renames",
"rmdir",
"stat",
"statvfs",
"symlink",
"unlink",
"walk",
)
FORBIDDEN_OS_PATH_FUNCTIONS = (
"exists",
"isdir",
"isfile",
)
_ORGINAL_NP_LOAD = np.load
def _np_load(file_, mmap_mode=None, allow_pickle=False, **kwargs):
if not hasattr(file_, "read"):
raise AssertionError(
"You MUST pass a `tf.io.gfile.GFile` or file-like object to `np.load`.")
if allow_pickle:
raise AssertionError("Unpicling files is forbidden for security reasons.")
return _ORGINAL_NP_LOAD(file_, mmap_mode, allow_pickle, **kwargs)
class DatasetBuilderTestCase(parameterized.TestCase, test_utils.SubTestCase):
"""Inherit this class to test your DatasetBuilder class.
You must set the following class attributes:
* DATASET_CLASS: class object of DatasetBuilder you want to test.
You may set the following class attributes:
* VERSION: `str`. The version used to run the test. eg: '1.2.*'.
Defaults to None (canonical version).
* BUILDER_CONFIG_NAMES_TO_TEST: `list[str]`, the list of builder configs
that should be tested. If None, all the BUILDER_CONFIGS from the class
will be tested.
* DL_EXTRACT_RESULT: `dict[str]`, the returned result of mocked
`download_and_extract` method. The values should be the path of files
present in the `fake_examples` directory, relative to that directory.
If not specified, path to `fake_examples` will always be returned.
* DL_DOWNLOAD_RESULT: `dict[str]`, the returned result of mocked
`download_and_extract` method. The values should be the path of files
present in the `fake_examples` directory, relative to that directory.
If not specified: will use DL_EXTRACT_RESULT (this is due to backwards
compatibility and will be removed in the future).
* EXAMPLE_DIR: `str`, the base directory in in which fake examples are
contained. Optional; defaults to
tensorflow_datasets/testing/test_data/fake_examples/<dataset name>.
* OVERLAPPING_SPLITS: `list[str]`, splits containing examples from other
splits (e.g. a "example" split containing pictures from other splits).
* MOCK_OUT_FORBIDDEN_OS_FUNCTIONS: `bool`, defaults to True. Set to False to
disable checks preventing usage of `os` or builtin functions instead of
recommended `tf.io.gfile` API.
* SKIP_CHECKSUMS: Checks that the urls called by `dl_manager.download`
are registered.
This test case will check for the following:
- the dataset builder is correctly registered, i.e. `tfds.load(name)` works;
- the dataset builder can read the fake examples stored in
testing/test_data/fake_examples/{dataset_name};
- the dataset builder can produce serialized data;
- the dataset builder produces a valid Dataset object from serialized data
- in eager mode;
- in graph mode.
- the produced Dataset examples have the expected dimensions and types;
- the produced Dataset has and the expected number of examples;
- a example is not part of two splits, or one of these splits is whitelisted
in OVERLAPPING_SPLITS.
"""
DATASET_CLASS = None
VERSION = None
BUILDER_CONFIG_NAMES_TO_TEST = None
DL_EXTRACT_RESULT = None
DL_DOWNLOAD_RESULT = None
EXAMPLE_DIR = None
OVERLAPPING_SPLITS = []
MOCK_OUT_FORBIDDEN_OS_FUNCTIONS = True
SKIP_CHECKSUMS = False
@classmethod
def setUpClass(cls):
tf.enable_v2_behavior()
super(DatasetBuilderTestCase, cls).setUpClass()
name = cls.__name__
# Check class has the right attributes
if cls.DATASET_CLASS is None or not callable(cls.DATASET_CLASS):
raise AssertionError(
"Assign your DatasetBuilder class to %s.DATASET_CLASS." % name)
def setUp(self):
super(DatasetBuilderTestCase, self).setUp()
self.patchers = []
self.builder = self._make_builder()
example_dir = self.DATASET_CLASS.code_path.parent / "dummy_data"
if self.EXAMPLE_DIR is not None:
self.example_dir = self.EXAMPLE_DIR
elif example_dir.exists():
self.example_dir = str(example_dir)
else:
self.example_dir = os.path.join(
test_utils.fake_examples_dir(), self.builder.name)
if not tf.io.gfile.exists(self.example_dir):
err_msg = (
"Dummy data not found in {}."
""
).format(self.example_dir)
if not tf.io.gfile.exists(self.example_dir):
# TODO(epot): Better documentation once datasets are migrated to the
# folder model.
err_msg = "fake_examples dir %s not found." % self.example_dir
raise ValueError(err_msg)
if self.MOCK_OUT_FORBIDDEN_OS_FUNCTIONS:
self._mock_out_forbidden_os_functions()
# Track the urls which are downloaded to validate the checksums
# The `dl_manager.download` and `dl_manager.download_and_extract` are
# patched to record the urls in `_download_urls`.
# Calling `dl_manager.download_checksums` stop the url
# registration (as checksums are stored remotelly)
# `_test_checksums` validates the recorded urls.
self._download_urls = set()
self._stop_record_download = False
def tearDown(self):
super(DatasetBuilderTestCase, self).tearDown()
for patcher in self.patchers:
patcher.stop()
def _mock_out_forbidden_os_functions(self):
"""Raises error if forbidden os functions are called instead of gfile."""
err = AssertionError("Do not use `os`, but `tf.io.gfile` module instead. "
"This makes code compatible with more filesystems.")
sep = os.path.sep
mock_os_path = absltest.mock.Mock(os.path, wraps=os.path)
mock_os_path.sep = sep
for fop in FORBIDDEN_OS_PATH_FUNCTIONS:
getattr(mock_os_path, fop).side_effect = err
mock_os = absltest.mock.Mock(os, path=mock_os_path)
for fop in FORBIDDEN_OS_FUNCTIONS:
if os.name == "nt" and not hasattr(os, fop):
continue # Not all `os` functions are available on Windows (ex: chmod).
getattr(mock_os, fop).side_effect = err
os_patcher = absltest.mock.patch(
self.DATASET_CLASS.__module__ + ".os", mock_os, create=True)
os_patcher.start()
self.patchers.append(os_patcher)
mock_builtins = __builtins__.copy() # pytype: disable=module-attr
mock_builtins["open"] = absltest.mock.Mock(side_effect=err)
open_patcher = absltest.mock.patch(
self.DATASET_CLASS.__module__ + ".__builtins__", mock_builtins)
open_patcher.start()
self.patchers.append(open_patcher)
# It's hard to mock open within numpy, so mock np.load.
np_load_patcher = absltest.mock.patch("numpy.load", _np_load)
np_load_patcher.start()
self.patchers.append(np_load_patcher)
def test_baseclass(self):
self.assertIsInstance(
self.builder, dataset_builder.DatasetBuilder,
"Dataset class must inherit from `dataset_builder.DatasetBuilder`.")
# Since class was instantiated and base class is ABCMeta, then we know
# all needed methods were implemented.
def test_registered(self):
is_registered = self.builder.name in load.list_builders()
exceptions = self.builder.IN_DEVELOPMENT
self.assertTrue(is_registered or exceptions,
"Dataset {} was not registered and is "
"not `IN_DEVELOPMENT`.".format(self.builder.name))
def test_info(self):
info = self.builder.info
self.assertIsInstance(info, dataset_info.DatasetInfo)
self.assertEqual(self.builder.name, info.name)
def _add_url(self, url_or_urls):
if self._stop_record_download:
# Stop record the checksums if dl_manager.download_checksums has been
# called (as checksums may be stored remotelly)
return
if isinstance(url_or_urls, download.resource.Resource):
self._download_urls.add(url_or_urls.url)
else:
self._download_urls.add(url_or_urls)
def _get_dl_extract_result(self, url):
tf.nest.map_structure(self._add_url, url)
del url
if self.DL_EXTRACT_RESULT is None:
return self.example_dir
return tf.nest.map_structure(
lambda fname: os.path.join(self.example_dir, fname),
self.DL_EXTRACT_RESULT,
)
def _get_dl_download_result(self, url):
tf.nest.map_structure(self._add_url, url)
if self.DL_DOWNLOAD_RESULT is None:
# This is only to be backwards compatible with old approach.
# In the future it will be replaced with using self.example_dir.
return self._get_dl_extract_result(url)
return tf.nest.map_structure(
lambda fname: os.path.join(self.example_dir, fname),
self.DL_DOWNLOAD_RESULT,
)
def _download_checksums(self, url):
self._stop_record_download = True
def _make_builder(self, config=None):
return self.DATASET_CLASS( # pylint: disable=not-callable
data_dir=self.tmp_dir,
config=config,
version=self.VERSION)
@test_utils.run_in_graph_and_eager_modes()
def test_download_and_prepare_as_dataset(self):
# If configs specified, ensure they are all valid
if self.BUILDER_CONFIG_NAMES_TO_TEST:
for config in self.BUILDER_CONFIG_NAMES_TO_TEST: # pylint: disable=not-an-iterable
assert config in self.builder.builder_configs, (
"Config %s specified in test does not exist. Available:\n%s" % (
config, list(self.builder.builder_configs)))
configs = self.builder.BUILDER_CONFIGS
print("Total configs: %d" % len(configs))
if configs:
for config in configs:
# Skip the configs that are not in the list.
if (self.BUILDER_CONFIG_NAMES_TO_TEST is not None and
(config.name not in self.BUILDER_CONFIG_NAMES_TO_TEST)): # pylint: disable=unsupported-membership-test
print("Skipping config %s" % config.name)
continue
with self._subTest(config.name):
print("Testing config %s" % config.name)
builder = self._make_builder(config=config)
self._download_and_prepare_as_dataset(builder)
else:
self._download_and_prepare_as_dataset(self.builder)
if not self.SKIP_CHECKSUMS:
with self._subTest("url_checksums"):
self._test_checksums()
def _test_checksums(self):
# If no call to `dl_manager.download`, then no need to check url presence.
if not self._download_urls:
return
err_msg = ("If you are developping outside TFDS and want to opt-out, "
"please add `SKIP_CHECKSUMS = True` to the "
"`DatasetBuilderTestCase`")
filepath = self.DATASET_CLASS.code_path.parent / "checksums.tsv"
if filepath.exists():
filepath = str(filepath)
else:
filepath = os.path.join(checksums._get_path(self.builder.name)) # pylint: disable=protected-access
with utils.try_reraise(suffix=err_msg):
url_infos = checksums._get_url_infos(filepath) # pylint: disable=protected-access
missing_urls = self._download_urls - set(url_infos.keys())
self.assertEmpty(
missing_urls,
"Some urls checksums are missing at: {} "
"Did you forget to record checksums with `--register_checksums` ? "
"See instructions at: "
"https://www.tensorflow.org/datasets/add_dataset#2_run_download_and_prepare_locally"
"\n{}".format(filepath, err_msg)
)
def _download_and_prepare_as_dataset(self, builder):
# Provide the manual dir only if builder has MANUAL_DOWNLOAD_INSTRUCTIONS
# set.
missing_dir_mock = absltest.mock.PropertyMock(
side_effect=Exception("Missing MANUAL_DOWNLOAD_INSTRUCTIONS"))
manual_dir = (
self.example_dir
if builder.MANUAL_DOWNLOAD_INSTRUCTIONS else missing_dir_mock)
with absltest.mock.patch.multiple(
"tensorflow_datasets.core.download.DownloadManager",
download_and_extract=self._get_dl_extract_result,
download=self._get_dl_download_result,
download_checksums=self._download_checksums,
manual_dir=manual_dir,
):
if isinstance(builder, dataset_builder.BeamBasedBuilder):
# For Beam datasets, set-up the runner config
import apache_beam as beam # pylint: disable=import-outside-toplevel,g-import-not-at-top
beam_runner = None
beam_options = beam.options.pipeline_options.PipelineOptions()
else:
beam_runner = None
beam_options = None
download_config = download.DownloadConfig(
compute_stats=download.ComputeStatsMode.SKIP,
beam_runner=beam_runner,
beam_options=beam_options,
)
builder.download_and_prepare(download_config=download_config)
with self._subTest("as_dataset"):
self._assertAsDataset(builder)
with self._subTest("num_examples"):
self._assertNumSamples(builder)
with self._subTest("reload"):
# When reloading the dataset, metadata should been reloaded too.
builder_reloaded = self._make_builder(config=builder.builder_config)
self._assertNumSamples(builder_reloaded)
# After reloading, as_dataset should still be working
with self._subTest("as_dataset"):
self._assertAsDataset(builder_reloaded)
def _assertAsDataset(self, builder):
split_to_checksums = {} # {"split": set(examples_checksums)}
for split_name, expected_examples_number in self.SPLITS.items():
ds = builder.as_dataset(split=split_name)
compare_shapes_and_types(
builder.info.features.get_tensor_info(),
tf.compat.v1.data.get_output_types(ds),
tf.compat.v1.data.get_output_shapes(ds),
)
examples = list(dataset_utils.as_numpy(
builder.as_dataset(split=split_name)))
split_to_checksums[split_name] = set(checksum(rec) for rec in examples)
self.assertLen(examples, expected_examples_number)
for (split1, hashes1), (split2, hashes2) in itertools.combinations(
split_to_checksums.items(), 2):
if (split1 in self.OVERLAPPING_SPLITS or
split2 in self.OVERLAPPING_SPLITS):
continue
self.assertFalse(
hashes1.intersection(hashes2),
("Splits '%s' and '%s' are overlapping. Are you sure you want to "
"have the same objects in those splits? If yes, add one one of "
"them to OVERLAPPING_SPLITS class attribute.") % (split1, split2))
def _assertNumSamples(self, builder):
for split_name, expected_num_examples in self.SPLITS.items():
self.assertEqual(
builder.info.splits[split_name].num_examples,
expected_num_examples,
)
self.assertEqual(
builder.info.splits.total_num_examples,
sum(self.SPLITS.values()),
)
def checksum(example):
"""Computes the md5 for a given example."""
def _bytes_flatten(flat_str, element):
"""Recursively flatten an element to its byte representation."""
if isinstance(element, numbers.Number):
# In python3, bytes(-3) is not allowed (or large numbers),
# so convert to str to avoid problems.
element = str(element)
if isinstance(element, dict):
for k, v in sorted(element.items()):
flat_str.append(k)
_bytes_flatten(flat_str, v)
elif isinstance(element, str):
if hasattr(element, "decode"):
# Python2 considers bytes to be str, but are almost always latin-1
# encoded bytes here. Extra step needed to avoid DecodeError.
element = element.decode("latin-1")
flat_str.append(element)
elif isinstance(element,
(tf.RaggedTensor, tf.compat.v1.ragged.RaggedTensorValue)):
flat_str.append(str(element.to_list()))
elif isinstance(element, np.ndarray):
# tf.Tensor() returns np.array of dtype object, which don't work
# with x.to_bytes(). So instead convert numpy into list.
if element.dtype.type is np.object_:
flat_str.append(str(tuple(element.shape)))
flat_str.append(str(list(element.ravel())))
else:
flat_str.append(element.tobytes())
else:
flat_str.append(bytes(element))
return flat_str
flat_str = _bytes_flatten([], example)
flat_bytes = [
s.encode("utf-8") if not isinstance(s, bytes) else s
for s in flat_str
]
flat_bytes = b"".join(flat_bytes)
hash_ = hashlib.md5()
hash_.update(flat_bytes)
return hash_.hexdigest()
def compare_shapes_and_types(tensor_info, output_types, output_shapes):
"""Compare shapes and types between TensorInfo and Dataset types/shapes."""
for feature_name, feature_info in tensor_info.items():
if isinstance(feature_info, dict):
compare_shapes_and_types(feature_info, output_types[feature_name],
output_shapes[feature_name])
else:
expected_type = feature_info.dtype
output_type = output_types[feature_name]
if expected_type != output_type:
raise TypeError("Feature %s has type %s but expected %s" %
(feature_name, output_type, expected_type))
expected_shape = feature_info.shape
output_shape = output_shapes[feature_name]
tf_utils.assert_shape_match(expected_shape, output_shape)
|
py | b40ebed85c2450c7957bfa9522523b88105e6c8e | """
PreactResNet
Kaiming He, Identity Mappings in Deep Residual Networks
https://arxiv.org/abs/1603.05027
"""
import torch.nn as nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_C, C, stride):
super(Bottleneck, self).__init__()
out_C = C * self.expansion
if stride > 1 or in_C != out_C:
self.shortcut = nn.Conv2d(in_C, out_C, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(in_C)
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(in_C, C, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(C)
self.relu2 = nn.ReLU()
self.conv2 = nn.Conv2d(C, C, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(C)
self.relu3 = nn.ReLU()
self.conv3 = nn.Conv2d(C, out_C, kernel_size=1, bias=False)
def forward(self, x):
out = self.relu1(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(self.relu2(self.bn2(out)))
out = self.conv3(self.relu3(self.bn3(out)))
out += shortcut
return out
class PreactResNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(PreactResNet, self).__init__()
self.in_C = cfg['C'][0]
self.head = nn.Conv2d(3, self.in_C, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(cfg['C'][0], cfg['num_blocks'][0], 1)
self.layer2 = self._make_layer(cfg['C'][1], cfg['num_blocks'][1], 2)
self.layer3 = self._make_layer(cfg['C'][2], cfg['num_blocks'][2], 2)
self.layer4 = self._make_layer(cfg['C'][3], cfg['num_blocks'][3], 2)
self.classifier = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(self.in_C, num_classes)
)
def _make_layer(self, C, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for st in strides:
layers.append(Bottleneck(self.in_C, C, st))
self.in_C = C * Bottleneck.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = self.head(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.classifier(x)
return x
def PreactResNet50():
cfg = {
'C': [32, 64, 128, 256],
'num_blocks': [3, 4, 6, 3]
}
return PreactResNet(cfg) |
py | b40ebf4515225091f465019a0340521f22a05db4 | # by [email protected]
import sys
from joblib import delayed, Parallel
import os
import os.path as osp
import sys
import glob
import subprocess
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def resize_video_ffmpeg(v_path, out_path, dim=256):
'''v_path: single video path;
out_path: root to store output videos'''
v_class = v_path.split('/')[-2]
v_name = os.path.basename(v_path)[0:-4]
out_dir = os.path.join(out_path, v_class)
if not os.path.exists(out_dir):
raise ValueError("directory not exist, it shouldn't happen")
vidcap = cv2.VideoCapture(v_path)
width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
if (width == 0) or (height==0):
print(v_path, 'not successfully loaded, drop ..'); return
new_dim = resize_dim(width, height, dim)
if new_dim[0] == dim:
dim_cmd = '%d:-2' % dim
elif new_dim[1] == dim:
dim_cmd = '-2:%d' % dim
cmd = ['ffmpeg', '-loglevel', 'quiet', '-y',
'-i', '%s'%v_path,
'-vf',
'scale=%s'%dim_cmd,
'%s' % os.path.join(out_dir, os.path.basename(v_path))]
ffmpeg = subprocess.call(cmd)
def resize_dim(w, h, target):
'''resize (w, h), such that the smaller side is target, keep the aspect ratio'''
if w >= h:
return [int(target * w / h), int(target)]
else:
return [int(target), int(target * h / w)]
def main_kinetics400(output_path='your_path/kinetics400'):
print('save to %s ... ' % output_path)
for splitname in ['val_split', 'train_split']:
v_root = '/datasets/KineticsVideo' + '/' + splitname
if not os.path.exists(v_root):
print('Wrong v_root')
import ipdb; ipdb.set_trace() # for debug
out_path = os.path.join(output_path, splitname)
if not os.path.exists(out_path):
os.makedirs(out_path)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
v_act_root = sorted(v_act_root)
# if resume, remember to delete the last video folder
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.mp4'))
v_paths = sorted(v_paths)
v_class = j.split('/')[-2]
out_dir = os.path.join(out_path, v_class)
if os.path.exists(out_dir):
print(out_dir, 'exists!'); continue
else:
os.makedirs(out_dir)
print('extracting: %s' % v_class)
Parallel(n_jobs=8)(delayed(resize_video_ffmpeg)(p, out_path, dim=256) for p in tqdm(v_paths, total=len(v_paths)))
if __name__ == '__main__':
your_path = '..' if len(sys.argv) < 2 else sys.argv[1]
main_kinetics400(output_path=osp.abspath(osp.join(your_path, 'kinetics400')))
# users need to change output_path and v_root
|
py | b40ebfa50b9cc5e927b26fbea38d340d466bb758 | from __future__ import division, unicode_literals
import base64
import io
import itertools
import os
import time
import xml.etree.ElementTree as etree
from .common import FileDownloader
from .http import HttpFD
from ..compat import (
compat_urlparse,
)
from ..utils import (
struct_pack,
struct_unpack,
encodeFilename,
sanitize_open,
xpath_text,
)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return struct_unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return struct_unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return struct_unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size - header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
self.read(1)
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
fragments_counter = itertools.count(first_frag_number)
for segment, fragments_count in segment_run_table['segment_run']:
for _ in range(fragments_count):
res.append((segment, next(fragments_counter)))
return res
def write_unsigned_int(stream, val):
stream.write(struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(struct_pack('!I', val)[1:])
def write_flv_header(stream):
"""Writes the FLV header to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
stream.write(b'\x00\x00\x00\x00')
def write_metadata_tag(stream, metadata):
"""Writes optional metadata tag to stream"""
SCRIPT_TAG = b'\x12'
FLV_TAG_HEADER_LEN = 11
if metadata:
stream.write(SCRIPT_TAG)
write_unsigned_int_24(stream, len(metadata))
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class HttpQuietDownloader(HttpFD):
def to_screen(self, *args, **kargs):
pass
class F4mFD(FileDownloader):
"""
A downloader for f4m manifests or AdobeHDS.
"""
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:
self.report_error('Missing ID in f4m DRM')
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
media))
if not media:
self.report_error('Unsupported DRM')
return media
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[download] Downloading f4m manifest')
manifest = self.ydl.urlopen(man_url).read()
doc = etree.fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None:
# get the best format
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
else:
rate, media = list(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
if bootstrap_node.text is None:
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_node.attrib['url'])
bootstrap = self.ydl.urlopen(bootstrap_url).read()
else:
bootstrap = base64.b64decode(bootstrap_node.text)
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text)
else:
metadata = None
boot_info = read_bootstrap_info(bootstrap)
fragments_list = build_fragments_list(boot_info)
if self.params.get('test', False):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
self.report_destination(filename)
http_dl = HttpQuietDownloader(
self.ydl,
{
'continuedl': True,
'quiet': True,
'noprogress': True,
'ratelimit': self.params.get('ratelimit', None),
'test': self.params.get('test', False),
}
)
tmpfilename = self.temp_name(filename)
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
write_flv_header(dest_stream)
write_metadata_tag(dest_stream, metadata)
# This dict stores the download progress, it's updated by the progress
# hook
state = {
'status': 'downloading',
'downloaded_bytes': 0,
'frag_index': 0,
'frag_count': total_frags,
'filename': filename,
'tmpfilename': tmpfilename,
}
start = time.time()
def frag_progress_hook(s):
if s['status'] not in ('downloading', 'finished'):
return
frag_total_bytes = s.get('total_bytes', 0)
if s['status'] == 'finished':
state['downloaded_bytes'] += frag_total_bytes
state['frag_index'] += 1
estimated_size = (
(state['downloaded_bytes'] + frag_total_bytes)
/ (state['frag_index'] + 1) * total_frags)
time_now = time.time()
state['total_bytes_estimate'] = estimated_size
state['elapsed'] = time_now - start
if s['status'] == 'finished':
progress = self.calc_percent(state['frag_index'], total_frags)
else:
frag_downloaded_bytes = s['downloaded_bytes']
frag_progress = self.calc_percent(frag_downloaded_bytes,
frag_total_bytes)
progress = self.calc_percent(state['frag_index'], total_frags)
progress += frag_progress / float(total_frags)
state['eta'] = self.calc_eta(
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
state['speed'] = s.get('speed')
self._hook_progress(state)
http_dl.add_progress_hook(frag_progress_hook)
frags_filenames = []
for (seg_i, frag_i) in fragments_list:
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
url = base_url + name
if akamai_pv:
url += '?' + akamai_pv.strip(';')
frag_filename = '%s-%s' % (tmpfilename, name)
success = http_dl.download(frag_filename, {'url': url})
if not success:
return False
with open(frag_filename, 'rb') as down:
down_data = down.read()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
if box_type == b'mdat':
dest_stream.write(box_data)
break
frags_filenames.append(frag_filename)
dest_stream.close()
elapsed = time.time() - start
self.try_rename(tmpfilename, filename)
for frag_file in frags_filenames:
os.remove(frag_file)
fsize = os.path.getsize(encodeFilename(filename))
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
'elapsed': elapsed,
})
return True
|
py | b40ec0aaeed4fc8963e6373eb3a38fe93adc3069 | """
LanXin+ OpenAPI
LanXin+ OpenAPI Platform # noqa: E501
Generated by: https://openapi.lanxin.cn
"""
import re # noqa: F401
import sys # noqa: F401
from lanxinplus_openapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from lanxinplus_openapi.exceptions import ApiAttributeError
def lazy_import():
from lanxinplus_openapi.model.v1_org_extra_field_ids_fetch_data import V1OrgExtraFieldIdsFetchData
globals()['V1OrgExtraFieldIdsFetchData'] = V1OrgExtraFieldIdsFetchData
class V1OrgExtraFieldIdsFetchResponse(ModelNormal):
"""NOTE: This class is auto generated by LanXin+.
Ref: https://openapi.lanxin.cn
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (V1OrgExtraFieldIdsFetchData,), # noqa: E501
'errCode': (int,), # noqa: E501
'errMsg': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'errCode': 'errCode', # noqa: E501
'errMsg': 'errMsg', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""V1OrgExtraFieldIdsFetchResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (V1OrgExtraFieldIdsFetchData): [optional] # noqa: E501
errCode (int): [optional] # noqa: E501
errMsg (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys:
# self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""V1OrgExtraFieldIdsFetchResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (V1OrgExtraFieldIdsFetchData): [optional] # noqa: E501
errCode (int): [optional] # noqa: E501
errMsg (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys:
# self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | b40ec29febb80f3df345707f54f9e82780351bb0 | """
PPT_UI_RUN.py
User interface tools.
The files in this tool are a modified version of the PPTs tool presented here: https://github.com/lapig-ufg/PPTs
Authors: Guillaume Goodwin, Marina Ruiz Sánchez-Oro
Date: 03/02/2022
"""
################################################################################
################################################################################
"""Import Python packages"""
################################################################################
################################################################################
from PyQt5 import QtGui, QtPrintSupport , QtWidgets, QtCore
from PyQt5.QtWidgets import QApplication, QMessageBox, QWidget, QDesktopWidget, QFileDialog, QTextEdit
from PyQt5.QtCore import QCoreApplication
import sys
import os
import datetime
import time
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_GPM_WINDOW(object):
def setupUi(self, GPM_WINDOW):
End_Date = list(map(int,((datetime.datetime.now()).strftime('%Y-%m-%d')).split('-')))
Year = int(End_Date[0])
Month = int(End_Date[1])
Day = int(End_Date[2])
GPM_WINDOW.setObjectName("GPM_WINDOW")
GPM_WINDOW.setWindowModality(QtCore.Qt.NonModal)
GPM_WINDOW.setEnabled(True)
GPM_WINDOW.resize(350, 449)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(GPM_WINDOW.sizePolicy().hasHeightForWidth())
GPM_WINDOW.setSizePolicy(sizePolicy)
GPM_WINDOW.setMinimumSize(QtCore.QSize(350, 410))
GPM_WINDOW.setMaximumSize(QtCore.QSize(1920, 1080))
GPM_WINDOW.setSizeIncrement(QtCore.QSize(0, 0))
GPM_WINDOW.setBaseSize(QtCore.QSize(350, 350))
self.GPM_Window = QtWidgets.QWidget(GPM_WINDOW)
self.GPM_Window.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.GPM_Window.setObjectName("GPM_Window")
#SPATIAL SLICE
self.Global_Slice_BT = QtWidgets.QRadioButton(self.GPM_Window)
self.Global_Slice_BT.setGeometry(QtCore.QRect(10, 200, 82, 17))
self.Global_Slice_BT.setChecked(True)
self.Global_Slice_BT.setObjectName("Global_Slice_BT")
self.ScaleSlice = QtWidgets.QButtonGroup(GPM_WINDOW)
self.ScaleSlice.setObjectName("ScaleSlice")
self.ScaleSlice.addButton(self.Global_Slice_BT)
self.Regional_Slice_BT = QtWidgets.QRadioButton(self.GPM_Window)
self.Regional_Slice_BT.setGeometry(QtCore.QRect(10, 220, 82, 17))
self.Regional_Slice_BT.setChecked(False)
self.Regional_Slice_BT.setObjectName("Regional_Slice_BT")
##
self.Regional_Slice_BT.toggled.connect(lambda:self.CheckCheck(self.Regional_Slice_BT))
self.ScaleSlice.addButton(self.Regional_Slice_BT)
self.Mask_Insert_BT = QtWidgets.QToolButton(self.GPM_Window)
self.Mask_Insert_BT.setEnabled(False)
self.Mask_Insert_BT.setGeometry(QtCore.QRect(30, 260, 51, 21))
self.Mask_Insert_BT.setCheckable(False)
self.Mask_Insert_BT.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.Mask_Insert_BT.setArrowType(QtCore.Qt.RightArrow)
self.Mask_Insert_BT.setObjectName("Mask_Insert_BT")
self.Mask_Insert_BT.clicked.connect(self.selectSlice)
#self.ScaleSlice.addButton(self.Mask_Insert_BT)
self.Mask_Insert_LB = QtWidgets.QLabel(self.GPM_Window)
self.Mask_Insert_LB.setGeometry(QtCore.QRect(30, 240, 111, 16))
self.Mask_Insert_LB.setObjectName("Mask_Insert_LB")
self.Spatial_Slice_LB = QtWidgets.QLabel(self.GPM_Window)
self.Spatial_Slice_LB.setGeometry(QtCore.QRect(10, 180, 181, 16))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.Spatial_Slice_LB.setFont(font)
self.Spatial_Slice_LB.setObjectName("Spatial_Slice_LB")
self.MaskDir_TX = QtWidgets.QLineEdit(self.GPM_Window)
self.MaskDir_TX.setEnabled(False)
self.MaskDir_TX.setGeometry(QtCore.QRect(90, 260, 221, 20))
self.MaskDir_TX.setObjectName("MaskDir_TX")
#TEMPORAL SLICE
self.Start_Date_Cal = QtWidgets.QDateEdit(self.GPM_Window)
self.Start_Date_Cal.setGeometry(QtCore.QRect(70, 100, 81, 22))
self.Start_Date_Cal.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(1998, 1, 1), QtCore.QTime(0, 0, 0)))
self.Start_Date_Cal.setCalendarPopup(True)
self.Start_Date_Cal.setDate(QtCore.QDate(2014, 3, 12))
self.Start_Date_Cal.setObjectName("Start_Date_Cal")
self.D_M_Y_LB1 = QtWidgets.QLabel(self.GPM_Window)
self.D_M_Y_LB1.setGeometry(QtCore.QRect(70, 80, 81, 16))
self.D_M_Y_LB1.setObjectName("D_M_Y_LB1")
self.Start_Date_LB = QtWidgets.QLabel(self.GPM_Window)
self.Start_Date_LB.setGeometry(QtCore.QRect(10, 100, 61, 16))
self.Start_Date_LB.setObjectName("Start_Date_LB")
self.End_Date_Cal = QtWidgets.QDateEdit(self.GPM_Window)
self.End_Date_Cal.setGeometry(QtCore.QRect(70, 150, 81, 22))
self.End_Date_Cal.setProperty("showGroupSeparator", False)
self.End_Date_Cal.setCalendarPopup(True)
self.End_Date_Cal.setDate(QtCore.QDate(Year, Month, Day))
self.End_Date_Cal.setObjectName("End_Date_Cal")
self.D_M_Y_LB2 = QtWidgets.QLabel(self.GPM_Window)
self.D_M_Y_LB2.setGeometry(QtCore.QRect(70, 130, 81, 16))
self.D_M_Y_LB2.setObjectName("D_M_Y_LB2")
self.End_Date_LB = QtWidgets.QLabel(self.GPM_Window)
self.End_Date_LB.setGeometry(QtCore.QRect(10, 150, 51, 16))
self.End_Date_LB.setObjectName("End_Date_LB")
#PRODUCT TYPE
self.GPM_M_BT = QtWidgets.QRadioButton(self.GPM_Window)
self.GPM_M_BT.setGeometry(QtCore.QRect(10, 30, 82, 17))
self.GPM_M_BT.setChecked(True)
self.GPM_M_BT.setObjectName("GPM_M_BT")
self.Precipitation_Products = QtWidgets.QButtonGroup(GPM_WINDOW)
self.Precipitation_Products.setObjectName("Precipitation_Products")
self.Precipitation_Products.addButton(self.GPM_M_BT)
self.GPM_D_BT = QtWidgets.QRadioButton(self.GPM_Window)
self.GPM_D_BT.setGeometry(QtCore.QRect(90, 30, 82, 17))
self.GPM_D_BT.setObjectName("GPM_D_BT")
self.Precipitation_Products.addButton(self.GPM_D_BT)
self._TRMM_M_BT = QtWidgets.QRadioButton(self.GPM_Window)
self._TRMM_M_BT.setGeometry(QtCore.QRect(160, 30, 82, 17))
self._TRMM_M_BT.setObjectName("_TRMM_M_BT")
self.Precipitation_Products.addButton(self._TRMM_M_BT)
self.TRMM_D_BT = QtWidgets.QRadioButton(self.GPM_Window)
self.TRMM_D_BT.setGeometry(QtCore.QRect(250, 30, 82, 17))
self.TRMM_D_BT.setObjectName("TRMM_D_BT")
self.Precipitation_Products.addButton(self.TRMM_D_BT)
#PRODUCT CHECKS
self.GPM_M_BT.toggled.connect(lambda:self.CheckProd(self.GPM_M_BT))
self.GPM_D_BT.toggled.connect(lambda:self.CheckProd(self.GPM_D_BT))
self._TRMM_M_BT.toggled.connect(lambda:self.CheckProd(self._TRMM_M_BT))
self.TRMM_D_BT.toggled.connect(lambda:self.CheckProd(self.TRMM_D_BT))
self.PPT_TypeLb = QtWidgets.QLabel(self.GPM_Window)
self.PPT_TypeLb.setGeometry(QtCore.QRect(10, 10, 181, 16))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.PPT_TypeLb.setFont(font)
self.PPT_TypeLb.setObjectName("PPT_TypeLb")
self.Date_Slice_LB = QtWidgets.QLabel(self.GPM_Window)
self.Date_Slice_LB.setGeometry(QtCore.QRect(10, 60, 181, 16))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.Date_Slice_LB.setFont(font)
self.Date_Slice_LB.setObjectName("Date_Slice_LB")
self.DW_PC_DIR_LB = QtWidgets.QLabel(self.GPM_Window)
self.DW_PC_DIR_LB.setGeometry(QtCore.QRect(10, 290, 241, 16))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.DW_PC_DIR_LB.setFont(font)
self.DW_PC_DIR_LB.setObjectName("DW_PC_DIR_LB")
self.OutDir_BT = QtWidgets.QToolButton(self.GPM_Window)
self.OutDir_BT.setGeometry(QtCore.QRect(30, 310, 51, 21))
self.OutDir_BT.setCheckable(False)
self.OutDir_BT.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.OutDir_BT.setArrowType(QtCore.Qt.RightArrow)
self.OutDir_BT.setObjectName("OutDir_BT")
#self.myTextBox = QtWidgets.QTextEdit()
self.window = GPM_WINDOW
self.OutDir_BT.clicked.connect(self.selectOUT)
self.OutDir_TX = QtWidgets.QLineEdit(self.GPM_Window)
self.OutDir_TX.setGeometry(QtCore.QRect(90, 310, 221, 20))
self.OutDir_TX.setObjectName("OutDir_TX")
#PHOTOS
self.label = QtWidgets.QLabel(self.GPM_Window)
self.label.setEnabled(True)
self.label.setGeometry(QtCore.QRect(160, 60, 181, 121))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("gpm_1.jpg"))
self.label.setScaledContents(True)
self.label.setObjectName("label")
#STATUSBAR
self.statusbar = QtWidgets.QStatusBar(GPM_WINDOW)
self.statusbar.setObjectName("statusbar")
#RUN EXIT BUTTONS
self.Run_BT = QtWidgets.QPushButton(self.GPM_Window)
self.Run_BT.setGeometry(QtCore.QRect(90, 360, 75, 23))
self.Run_BT.setObjectName("Run_BT")
self.Run_BT.clicked.connect(self.exec_Processing)
self.Exit_BT = QtWidgets.QPushButton(self.GPM_Window)
self.Exit_BT.setGeometry(QtCore.QRect(180, 360, 75, 23))
self.Exit_BT.setObjectName("Exit_BT")
self.Exit_BT.clicked.connect(QCoreApplication.instance().quit)
self.OP_BT = QtWidgets.QCheckBox(self.GPM_Window)
self.OP_BT.setGeometry(QtCore.QRect(10, 390, 81, 21))
self.OP_BT.setObjectName("OP_BT")
self.OP_LB = QtWidgets.QLabel(self.GPM_Window)
self.OP_LB.setGeometry(QtCore.QRect(10, 410, 271, 21))
self.OP_LB.setObjectName("OP_LB")
self.retranslateUi(GPM_WINDOW)
QtCore.QMetaObject.connectSlotsByName(GPM_WINDOW)
def retranslateUi(self, GPM_WINDOW,):
_translate = QtCore.QCoreApplication.translate
GPM_WINDOW.setWindowTitle(_translate("GPM_WINDOW", "Precipitation Processing Tools"))
self.Global_Slice_BT.setText(_translate("GPM_WINDOW", "Global"))
self.Regional_Slice_BT.setText(_translate("GPM_WINDOW", "Regional slice"))
self.Mask_Insert_BT.setText(_translate("GPM_WINDOW", "..."))
self.Mask_Insert_LB.setText(_translate("GPM_WINDOW", "Insert the slice mask:"))
self.D_M_Y_LB1.setText(_translate("GPM_WINDOW", "Day/Month/Year"))
self.Start_Date_LB.setText(_translate("GPM_WINDOW", "Start Date:"))
self.D_M_Y_LB2.setText(_translate("GPM_WINDOW", "Day/Month/Year"))
self.End_Date_LB.setText(_translate("GPM_WINDOW", "End Date:"))
self.GPM_M_BT.setText(_translate("GPM_WINDOW", "GPM Month"))
self.GPM_D_BT.setText(_translate("GPM_WINDOW", "GPM Day"))
self._TRMM_M_BT.setText(_translate("GPM_WINDOW", "TRMM Month"))
self.TRMM_D_BT.setText(_translate("GPM_WINDOW", "TRMM Day"))
self.PPT_TypeLb.setText(_translate("GPM_WINDOW", "Precipitation Product type:"))
self.Date_Slice_LB.setText(_translate("GPM_WINDOW", "Date slice:"))
self.Spatial_Slice_LB.setText(_translate("GPM_WINDOW", "Spatial slice:"))
self.DW_PC_DIR_LB.setText(_translate("GPM_WINDOW", "Download and Processing directory:"))
self.OutDir_BT.setText(_translate("GPM_WINDOW", "..."))
self.OutDir_TX.setText(_translate("GPM_WINDOW", ''))
self.MaskDir_TX.setText(_translate("GPM_WINDOW", ''))
self.Run_BT.setText(_translate("GPM_WINDOW", "Run"))
self.Exit_BT.setText(_translate("GPM_WINDOW", "Close"))
self.OP_BT.setText(_translate("GPM_WINDOW", "Only Process"))
self.OP_LB.setText(_translate("GPM_WINDOW", "(Mark this only if you wanna process downloaded data)"))
def selectOUT(self):
outdir = QFileDialog.getExistingDirectory(self.window, 'Select the download and/or processing directory', os.getenv('HOME'))
outdir_txt = r'%s' % str(outdir);
self.OutDir_TX.setText(outdir_txt)
def selectSlice(self):
dataMask = QFileDialog.getOpenFileName(self.window, 'Select the data you want to use like mask (Shapefiles ONLY!!!!)', os.getenv('HOME'),"ESRI Shapefile (*.shp)")
dataMask_txt = r'%s' % str(dataMask[0]);
self.MaskDir_TX.setText(dataMask_txt)
#return()
def CheckCheck(self,b):
if b.isChecked() == True:
self.Global_Slice_BT.setChecked(False)
self.Mask_Insert_BT.setEnabled(True)
self.Mask_Insert_BT.setCheckable(True)
self.MaskDir_TX.setEnabled(True)
else:
self.Global_Slice_BT.setChecked(True)
self.Mask_Insert_BT.setEnabled(False)
self.Mask_Insert_BT.setCheckable(False)
self.MaskDir_TX.setEnabled(False)
self.MaskDir_TX.setText('')
def CheckProd(self,a):
DateNow = list(map(int,((datetime.datetime.now()).strftime('%Y-%m-%d')).split('-')))
YearNow = int(DateNow[0])
MonthNow = int(DateNow[1])
DayNow = int(DateNow[2])
if a.isChecked() == True and a.text() == "GPM Month":
self.Start_Date_Cal.setDate(QtCore.QDate(2014, 3, 12))
self.End_Date_Cal.setDate(QtCore.QDate(YearNow, MonthNow, DayNow))
elif a.isChecked() == True and a.text() == "GPM Day":
self.Start_Date_Cal.setDate(QtCore.QDate(2014, 3, 12))
self.End_Date_Cal.setDate(QtCore.QDate(YearNow, MonthNow, DayNow))
elif a.isChecked() == True and a.text() == "TRMM Month":
self.Start_Date_Cal.setDate(QtCore.QDate(1998, 1, 1))
self.End_Date_Cal.setDate(QtCore.QDate(YearNow, MonthNow, DayNow))
elif a.isChecked() == True and a.text() == "TRMM Day":
self.Start_Date_Cal.setDate(QtCore.QDate(1998, 1, 1))
self.End_Date_Cal.setDate(QtCore.QDate(YearNow, MonthNow, DayNow))
else:
pass
def exec_Processing(self):
ProdTp = None
if self.GPM_M_BT.isChecked() == True and self.GPM_M_BT.text() == "GPM Month":
ProdTp = 'GPM_M'
elif self.GPM_D_BT.isChecked() == True and self.GPM_D_BT.text() == "GPM Day":
ProdTp = 'GPM_D'
elif self._TRMM_M_BT.isChecked() == True and self._TRMM_M_BT.text() == "TRMM Month":
ProdTp = 'TRMM_M'
elif self.TRMM_D_BT.isChecked() == True and self.TRMM_D_BT.text() == "TRMM Day":
ProdTp = 'TRMM_D'
else:
pass
OP_Info = ''
if self.OP_BT.isChecked() == True:
OP_Info = '--OP'
StartDate = str((datetime.datetime.strptime(str(self.Start_Date_Cal.text()),'%d/%M/%Y')).strftime('%Y-%M-%d'))
EndDate = str((datetime.datetime.strptime(str(self.End_Date_Cal.text()),'%d/%M/%Y')).strftime('%Y-%M-%d'))
Donwload_Dir = r'%s' % str(self.OutDir_TX.text())
Slice_Dir = r'%s' % str(self.MaskDir_TX.text())
if Slice_Dir == '':
Slice_Dir = 'None'
#zz = (ProdTp,StartDate,EndDate,Donwload_Dir,Slice_Dir,OP_Info)
#print(zz);
os.system('python Integration.py --ProdTP ' + ProdTp + ' --StartDate ' + StartDate + ' --EndDate ' + EndDate + ' --ProcessDir ' + Donwload_Dir + ' --SptSlc ' + Slice_Dir + ' ' + OP_Info)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_GPM_WINDOW()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
py | b40ec49982d8397ef918ad8e69b64e903c438b6b | # Generated by Django 2.1.2 on 2018-10-27 13:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dapp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1000)),
('address', models.CharField(max_length=1000)),
('status', models.CharField(max_length=1000)),
('category', models.CharField(max_length=1000)),
('homepage', models.CharField(max_length=1000)),
('icon', models.CharField(max_length=1000)),
],
),
]
|
py | b40ec5d56ddc0e75f7f69798e2075fdd99725084 | # Copyright 2017 Huawei, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from six.moves.urllib import parse as urllib_parse
def get_pagination_options(limit=None, marker=None, sorts=None):
options = []
if limit:
options.append("limit=%d" % limit)
if marker:
options.append("marker=%s" % urllib_parse.quote(marker))
for sort in sorts or []:
options.append("sort=%s" % urllib_parse.quote(sort))
return "&".join(options)
def filtersdict_to_url(filters):
urls = []
for k, v in sorted(filters.items()):
url = "q.field=%s&q.op=eq&q.value=%s" % (k, v)
urls.append(url)
return '&'.join(urls)
|
py | b40ec5e121b75f0ab4e28902cab8331128659319 | import time
import numpy as np
import tkinter as tk
from PIL import ImageTk, Image
np.random.seed(1)
PhotoImage = ImageTk.PhotoImage
unit = 50
height = 15
width = 15
class Env(tk.Tk):
def __init__(self):
super(Env, self).__init__()
self.action_space = ['w', 's', 'a', 'd'] # wsad 키보드 자판을 기준으로 상하좌우
self.actions_length = len(self.action_space)
self.height = height*unit
self.width = width*unit
self.title('codefair qlearning')
self.geometry(f'{self.height}x{self.width}')
self.shapes = self.load_images()
self.canvas = self.build_canvas()
def build_canvas(self):
canvas = tk.Canvas(self, bg="white", height=self.height, width=self.width)
for c in range(0, self.width, unit):
x0, y0, x1, y1 = c, 0, c, self.height
canvas.create_line(x0, y0, x1, y1)
for r in range(0, self.height, unit):
x0, y0, x1, y1 = 0, r, self.height, r
canvas.create_line(x0, y0, x1, y1)
# mark images to canvas
self.agent = canvas.create_image(50, 50, image=self.shapes[0])
self.virus = canvas.create_image(175, 175, image=self.shapes[1])
self.destination = canvas.create_image(275, 275, image=self.shapes[2])
canvas.pack()
return canvas
def load_images(self):
agent = PhotoImage(Image.open("./img/agent.png").resize((50, 50)))
virus = PhotoImage(Image.open("./img/virus.jpg").resize((50, 50)))
destination = PhotoImage(Image.open("./img/destination.png").resize((50, 50)))
return agent, virus, destination
def coords_to_state(self, coords):
x = int((coords[0] - 50) / 100)
y = int((coords[1] - 50) / 100)
return [x, y]
def state_to_coords(self, state):
x = int(state[0] * 100 + 50)
y = int(state[1] * 100 + 50)
return [x, y]
def reset(self):
self.update()
time.sleep(.5)
x, y = self.canvas.coords(self.agent)
self.canvas.move(self.agent, unit/2 - x, unit/2 - y)
self.render()
return self.coords_to_state(self.canvas.coords(self.agent))
def step(self, action):
state = self.canvas.coords(self.agent)
base_action = np.array([0, 0])
self.render()
# actions
if action == 0: # w
if state[1] > unit:
base_action[1] -= unit
elif action == 1: # s
if state[1] < (height - 1) * unit:
base_action[1] += unit
elif action == 2: # a
if state[0] > unit:
base_action[0] -= unit
elif action == 3: # d
if state[0] < (width - 1) * unit:
base_action[0] += unit
self.canvas.move(self.agent, base_action[0], base_action[1])
self.canvas.tag_raise(self.agent)
next_state = self.canvas.coords(self.agent)
# reward
if next_state == self.canvas.coords(self.destination):
reward = 100
finish = True
elif next_state == self.canvas.coords(self.virus):
reward = -100
finish = True
else:
reward = 0
finish = False
next_state = self.coords_to_state(next_state)
return next_state, reward, finish
def render(self):
time.sleep(.03)
self.update() |
py | b40ec654addc7c1cd2307af18529fb4504f1207d | """
ASGI config for hotels_django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hotels_django.settings')
application = get_asgi_application()
|
py | b40ec700a5373e862cdeecb0ed38c344b944e2c2 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Functions related to converting between Python and numpy types and ngraph types."""
import logging
from typing import List, Union
import numpy as np
from ngraph.exceptions import NgraphTypeError
from ngraph.impl import Node, Shape, Output
from ngraph.impl import Type as NgraphType
from ngraph.impl.op import Constant
log = logging.getLogger(__name__)
TensorShape = List[int]
NumericData = Union[int, float, np.ndarray]
NumericType = Union[type, np.dtype]
ScalarData = Union[int, float]
NodeInput = Union[Node, NumericData]
ngraph_to_numpy_types_map = [
(NgraphType.boolean, np.bool),
(NgraphType.f16, np.float16),
(NgraphType.f32, np.float32),
(NgraphType.f64, np.float64),
(NgraphType.i8, np.int8),
(NgraphType.i16, np.int16),
(NgraphType.i32, np.int32),
(NgraphType.i64, np.int64),
(NgraphType.u8, np.uint8),
(NgraphType.u16, np.uint16),
(NgraphType.u32, np.uint32),
(NgraphType.u64, np.uint64),
(NgraphType.bf16, np.uint16),
]
ngraph_to_numpy_types_str_map = [
("boolean", np.bool),
("f16", np.float16),
("f32", np.float32),
("f64", np.float64),
("i8", np.int8),
("i16", np.int16),
("i32", np.int32),
("i64", np.int64),
("u8", np.uint8),
("u16", np.uint16),
("u32", np.uint32),
("u64", np.uint64),
]
def get_element_type(data_type: NumericType) -> NgraphType:
"""Return an ngraph element type for a Python type or numpy.dtype."""
if data_type is int:
log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.")
return NgraphType.i32
if data_type is float:
log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.")
return NgraphType.f32
ng_type = next(
(ng_type for (ng_type, np_type) in ngraph_to_numpy_types_map if np_type == data_type), None
)
if ng_type:
return ng_type
raise NgraphTypeError("Unidentified data type %s", data_type)
def get_element_type_str(data_type: NumericType) -> str:
"""Return an ngraph element type string representation for a Python type or numpy dtype."""
if data_type is int:
log.warning("Converting int type of undefined bitwidth to 32-bit ngraph integer.")
return "i32"
if data_type is float:
log.warning("Converting float type of undefined bitwidth to 32-bit ngraph float.")
return "f32"
ng_type = next(
(ng_type for (ng_type, np_type) in ngraph_to_numpy_types_str_map if np_type == data_type),
None,
)
if ng_type:
return ng_type
raise NgraphTypeError("Unidentified data type %s", data_type)
def get_dtype(ngraph_type: NgraphType) -> np.dtype:
"""Return a numpy.dtype for an ngraph element type."""
np_type = next(
(np_type for (ng_type, np_type) in ngraph_to_numpy_types_map if ng_type == ngraph_type),
None,
)
if np_type:
return np.dtype(np_type)
raise NgraphTypeError("Unidentified data type %s", ngraph_type)
def get_ndarray(data: NumericData) -> np.ndarray:
"""Wrap data into a numpy ndarray."""
if type(data) == np.ndarray:
return data
return np.array(data)
def get_shape(data: NumericData) -> TensorShape:
"""Return a shape of NumericData."""
if type(data) == np.ndarray:
return data.shape # type: ignore
elif type(data) == list:
return [len(data)] # type: ignore
return []
def make_constant_node(value: NumericData, dtype: NumericType = None) -> Constant:
"""Return an ngraph Constant node with the specified value."""
ndarray = get_ndarray(value)
if dtype:
element_type = get_element_type(dtype)
else:
element_type = get_element_type(ndarray.dtype)
return Constant(element_type, Shape(ndarray.shape), ndarray.flatten().tolist())
def as_node(input_value: NodeInput) -> Node:
"""Return input values as nodes. Scalars will be converted to Constant nodes."""
if issubclass(type(input_value), Node):
return input_value
if issubclass(type(input_value), Output):
return input_value
return make_constant_node(input_value)
def as_nodes(*input_values: NodeInput) -> List[Node]:
"""Return input values as nodes. Scalars will be converted to Constant nodes."""
return [as_node(input_value) for input_value in input_values]
|
py | b40ec7ef43c3fdf1bdc11989633b51e7e0ae7288 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-07-12 11:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testCon', '0007_auto_20160712_1057'),
]
operations = [
migrations.AddField(
model_name='student_info',
name='StuId',
field=models.CharField(default=1, max_length=20),
preserve_default=False,
),
]
|
py | b40ec8235a2442ae967edcd6507aede6e22d1157 | import shutil
import numpy as np
np.random.seed(111)
import argparse
import os
import time
import sys
import json
import matplotlib
from axelerate.networks.yolo.frontend import create_yolo, get_object_labels
from axelerate.networks.classifier.frontend_classifier import create_classifier, get_labels
from axelerate.networks.segnet.frontend_segnet import create_segnet
from axelerate.networks.common_utils.convert import Converter
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
argparser = argparse.ArgumentParser(
description='Train and validate YOLO_v2 model on any dataset')
argparser.add_argument(
'-c',
'--config',
default="configs/from_scratch.json",
help='path to configuration file')
def train_from_config(config,project_folder):
try:
matplotlib.use('Agg')
except:
pass
#added for compatibility with < 0.5.7 versions
try:
input_size = config['model']['input_size'][:]
except:
input_size = [config['model']['input_size'],config['model']['input_size']]
# Create the converter
converter = Converter(config['converter']['type'], config['model']['architecture'], config['train']['valid_image_folder'])
# Segmentation network
if config['model']['type']=='SegNet':
print('Segmentation')
# 1. Construct the model
segnet = create_segnet(config['model']['architecture'],
input_size,
config['model']['n_classes'],
config['weights']['backend'])
# 2. Load the pretrained weights (if any)
segnet.load_weights(config['weights']['full'], by_name=True)
# 3. actual training
model_layers, model_path = segnet.train(config['train']['train_image_folder'],
config['train']['train_annot_folder'],
config['train']['actual_epoch'],
project_folder,
config["train"]["batch_size"],
config["train"]["augmentation"],
config['train']['learning_rate'],
config['train']['train_times'],
config['train']['valid_times'],
config['train']['valid_image_folder'],
config['train']['valid_annot_folder'],
config['train']['first_trainable_layer'],
config['train']['ignore_zero_class'],
config['train']['valid_metric'])
# Classifier
if config['model']['type']=='Classifier':
print('Classifier')
if config['model']['labels']:
labels = config['model']['labels']
else:
labels = get_labels(config['train']['train_image_folder'])
# 1. Construct the model
classifier = create_classifier(config['model']['architecture'],
labels,
input_size,
config['model']['fully-connected'],
config['model']['dropout'],
config['weights']['backend'],
config['weights']['save_bottleneck'])
# 2. Load the pretrained weights (if any)
classifier.load_weights(config['weights']['full'], by_name=True)
# 3. actual training
model_layers, model_path = classifier.train(config['train']['train_image_folder'],
config['train']['actual_epoch'],
project_folder,
config["train"]["batch_size"],
config["train"]["augmentation"],
config['train']['learning_rate'],
config['train']['train_times'],
config['train']['valid_times'],
config['train']['valid_image_folder'],
config['train']['first_trainable_layer'],
config['train']['valid_metric'])
# Detector
if config['model']['type']=='Detector':
if config['train']['is_only_detect']:
labels = ["object"]
else:
if config['model']['labels']:
labels = config['model']['labels']
else:
labels = get_object_labels(config['train']['train_annot_folder'])
print(labels)
# 1. Construct the model
yolo = create_yolo(config['model']['architecture'],
labels,
input_size,
config['model']['anchors'],
config['model']['obj_thresh'],
config['model']['iou_thresh'],
config['model']['coord_scale'],
config['model']['object_scale'],
config['model']['no_object_scale'],
config['weights']['backend'])
# 2. Load the pretrained weights (if any)
yolo.load_weights(config['weights']['full'], by_name=True)
# 3. actual training
model_layers, model_path = yolo.train(config['train']['train_image_folder'],
config['train']['train_annot_folder'],
config['train']['actual_epoch'],
project_folder,
config["train"]["batch_size"],
config["train"]["augmentation"],
config['train']['learning_rate'],
config['train']['train_times'],
config['train']['valid_times'],
config['train']['valid_image_folder'],
config['train']['valid_annot_folder'],
config['train']['first_trainable_layer'],
config['train']['valid_metric'])
# 4 Convert the model
time.sleep(2)
converter.convert_model(model_path)
return model_path
def setup_training(config_file=None, config_dict=None):
"""make directory to save weights & its configuration """
if config_file:
with open(config_file) as config_buffer:
config = json.loads(config_buffer.read())
elif config_dict:
config = config_dict
else:
print('No config found')
sys.exit()
dirname = os.path.join("projects", config['train']['saved_folder'])
if os.path.isdir(dirname):
print("Project folder {} already exists. Creating a folder for new training session.".format(dirname))
else:
print("Project folder {} is created.".format(dirname, dirname))
os.makedirs(dirname)
return(train_from_config(config, dirname))
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
description='Train and validate YOLO_v2 model on any dataset')
argparser.add_argument(
'-c',
'--config',
default="configs/classifer.json",
help='path to configuration file')
args = argparser.parse_args()
setup_training(config_file=args.config)
shutil.rmtree("logs", ignore_errors=True)
# python axelerate/train.py -c configs/raccoon_detector.json
|
py | b40ec93b17d9788d5179aeb870c7de5d8afe6999 | '''
Evaluate the value of an arithmetic expression in Reverse Polish Notation.
Valid operators are +, -, *, /. Each operand may be an integer or another expression.
Note:
- Division between two integers should truncate toward zero.
- The given RPN expression is always valid. That means the expression would
always evaluate to a result and there won't be any divide by zero operation.
Example 1:
Input: ["2", "1", "+", "3", "*"]
Output: 9
Explanation: ((2 + 1) * 3) = 9
Example 2:
Input: ["4", "13", "5", "/", "+"]
Output: 6
Explanation: (4 + (13 / 5)) = 6
Example 3:
Input: ["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"]
Output: 22
Explanation:
((10 * (6 / ((9 + 3) * -11))) + 17) + 5
= ((10 * (6 / (12 * -11))) + 17) + 5
= ((10 * (6 / -132)) + 17) + 5
= ((10 * 0) + 17) + 5
= (0 + 17) + 5
= 17 + 5
= 22
'''
from typing import List
class Solution:
def evalRPN(self, tokens: List[str]) -> int:
stack = []
for elem in tokens:
if elem.isdigit() or elem[1:].isdigit(): # if elem is negative number, isdigit() will return false
stack.append(int(elem))
else:
second_num = stack.pop()
first_num = stack.pop()
if elem == "+":
tmp = first_num + second_num
elif elem == "-":
tmp = first_num - second_num
elif elem == "*":
tmp = first_num * second_num
else:
sign = -1 if first_num * second_num < 0 else 1
tmp = abs(first_num) // abs(second_num)
tmp *= sign
stack.append(tmp)
return stack[0]
print(Solution().evalRPN(["4","-2","/","2","-3","-","-"]))
print(Solution().evalRPN(["4", "13", "5", "/", "+"]))
print(Solution().evalRPN(["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"]))
|
py | b40ec9468a2da07583b8039a1e43ee36e64b918c | import os
import math
import re
from werkzeug.security import generate_password_hash, check_password_hash
from flask import Flask, render_template, redirect, request, url_for, flash
from flask_pymongo import PyMongo, pymongo
from forms import RegistrationForm, LoginForm, RecipeForm
from bson.objectid import ObjectId
from flask import session
# App Config
app = Flask(__name__)
if os.path.exists('env.py'):
import env
app.config["MONGO_DB"] = os.environ.get('MONGO_DB')
app.config["MONGO_URI"] = os.environ.get('MONGO_URI')
app.config["SECRET_KEY"] = os.environ.get('SECRET_KEY')
app.config["RECAPTCHA_PUBLIC_KEY"] = os.environ.get('RECAPTCHA_PUBLIC_KEY')
app.config["RECAPTCHA_PRIVATE_KEY"] = os.environ.get('RECAPTCHA_PRIVATE_KEY')
mongo = PyMongo(app)
# Routes
# Home Route
@app.route('/')
def index():
"""
Pagination- Adapated and taken code from Code with Harry and
S.MuirHead(Recipe CookBook App - MIT License)
"""
page_limit = 6
current_page = int(request.args.get('current_page', 1))
total = mongo.db.tasks.count()
pages = range(1, int(math.ceil(total / page_limit)) + 1)
tasks = mongo.db.tasks.find().sort('_id', pymongo.ASCENDING).skip(
(current_page - 1) * page_limit).limit(page_limit)
return render_template(
'index.html',
tasks=tasks,
title='Home',
current_page=current_page,
pages=pages)
# View Each Recipe- adapated from Code Institute task lectures
@app.route('/task/<task_id>')
def task(task_id):
"""
Allows User to view the full
individual recipe
"""
task_count = mongo.db.tasks.count()
return render_template('recipe.html',
task_count=task_count,
task=mongo.db.tasks.find_one({'_id': ObjectId(task_id)}))
# Search
@app.route('/findtask')
def findtask():
"""
Allows user to perform case insensitive matches
for recipe names
"""
query = request.args.get('query')
# Partial Search and case insensitive search
results = mongo.db.tasks.find(
{"recipe_name": {"$regex": query, "$options": "i"}})
if results.count():
return render_template(
'search.html',
results=results,
query=query,
title="Search")
else:
flash('No results were found', 'info')
return redirect(url_for('index'))
# Filters
"""
Filter for course only- Adapated and taken code from pretty printed and
S.MuirHead(Recipe CookBook App - MIT License.)
"""
@app.route('/filtercourses', methods=['GET', 'POST'])
def filtercourses():
"""
Allows user to filter through
courses
"""
# Request Post method
if request.method == "POST":
for i in request.form:
if i == "recipe_course":
# Filter through courses
filter_items = []
items = request.form.getlist("recipe_course")
my_key = request.form
# iterate through items and key
for item in items:
for key in my_key:
# Append key and item together
filter_items.append({key: item})
results = mongo.db.tasks.find(
{'$and': [{'$or': filter_items}]})
return render_template('filter.html', results=results, title="Filter")
@app.route('/filterallergens', methods=['GET', 'POST'])
def filterallergens():
"""
Allows user to filter through
allergens
"""
# Request Post method
if request.method == "POST":
for i in request.form:
if i == "allergen":
# Filter through allergens
filter_items = []
# Put list into variable items
items = request.form.getlist("allergen")
my_key = request.form
# iterate through items and key
for item in items:
for key in my_key:
# attach key with items
filter_items.append({key: item})
results = mongo.db.tasks.find(
{'$and': [{'$or': filter_items}]})
return render_template('filter.html', results=results, title="Filter")
# End of taken and adapated code for Filtering
# Add Recipe- adapated from Code Institute Task Lectures
@app.route('/create_task', methods=['GET', 'POST'])
def create_task():
"""Create a new recipe to db collection"""
if 'logged_in' not in session: # Check if its a logged in user
flash(
'Sorry, only logged in users can create recipes. Please register/login',
'info')
return redirect(url_for('index'))
form = RecipeForm(request.form) # Initialise the form
user = mongo.db.user.find_one({"name": session['username'].title()})
if form.validate_on_submit(): # Insert new recipe if form is submitted
tasks = mongo.db.tasks
tasks.insert_one({
'recipe_name': request.form['recipe_name'],
'recipe_image': request.form['recipe_image'],
'ingredients': request.form['ingredients'],
'serving_size': request.form['serving_size'],
'recipe_course': request.form['recipe_course'],
'allergen': request.form['allergen'],
'calories': request.form['calories'],
'description': request.form['description'],
'cooking_time': request.form['cooking_time'],
'instruction': request.form['instruction'],
'instruction1': request.form['instruction1'],
'instruction2': request.form['instruction2'],
'instruction3': request.form['instruction3'],
'instruction4': request.form['instruction4'],
'instruction5': request.form['instruction5'],
'instruction6': request.form['instruction6'],
'username': session['username'].title(),
})
flash('Your Recipe has been added successfully', 'success')
return redirect(url_for('index'))
return render_template('add_recipe.html', form=form, title="Add Recipe")
# Update Recipe- adapated from Code Institute Task Lectures
@app.route('/update_task/<task_id>', methods=['GET', 'POST'])
def update_task(task_id):
# Check if user is logged in
if 'logged_in' not in session: # Check if its a logged in user
flash(
'Sorry, only logged in users can edit their own recipes. Please login',
'info')
return redirect(url_for('index'))
user = mongo.db.user.find_one({"name": session['username'].title()})
the_task = mongo.db.tasks.find_one_or_404({'_id': ObjectId(task_id)})
form = RecipeForm()
# If user created then they can edit
if user['name'].title() == the_task['username'].title():
if request.method == 'GET':
form = RecipeForm(data=the_task)
return render_template(
'edit_recipe.html',
task=the_task,
form=form,
title='Edit Recipe')
if form.validate_on_submit():
task = mongo.db.tasks
task.update_one({
'_id': ObjectId(task_id),
}, {
'$set': {
'recipe_name': request.form['recipe_name'],
'recipe_image': request.form['recipe_image'],
'ingredients': request.form['ingredients'],
'serving_size': request.form['serving_size'],
'recipe_course': request.form['recipe_course'],
'allergen': request.form['allergen'],
'calories': request.form['calories'],
'description': request.form['description'],
'cooking_time': request.form['cooking_time'],
'instruction': request.form['instruction'],
'instruction1': request.form['instruction1'],
'instruction2': request.form['instruction2'],
'instruction3': request.form['instruction3'],
'instruction4': request.form['instruction4'],
'instruction5': request.form['instruction5'],
'instruction6': request.form['instruction6'],
}})
flash('Your Recipe has been updated', 'info')
return redirect(url_for('task', task_id=task_id))
flash('Sorry not your recipe to edit!', 'danger')
return redirect(url_for('task', task_id=task_id))
# Delete Recipe- adapted from Code Institute Task Lectures
@app.route('/delete_task/<task_id>')
def delete_task(task_id):
if 'logged_in' in session:
user = mongo.db.user.find_one({"name": session['username'].title()})
the_task = mongo.db.tasks.find_one({'_id': ObjectId(task_id)})
if user['name'].title() == the_task['username'].title():
task = mongo.db.tasks
task.delete_one({
'_id': ObjectId(task_id)
})
flash('Your recipe has been deleted', 'success')
return redirect(url_for('index'))
flash('Sorry this is not your recipe to delete', 'danger')
return redirect(url_for('task', task_id=task_id))
else:
flash('Only logged in users can delete recipes', 'info')
return redirect(url_for('index'))
# Upvotes
@app.route('/upvotes/<task_id>', methods=['POST'])
def upvotes(task_id):
"""
Allows User to upvote a specific
recipe
"""
mongo.db.tasks.update_one(
{'_id': ObjectId(task_id)},
{'$inc': {'upvotes': 1}})
return redirect(url_for('task', task_id=task_id))
# About
@app.route('/about')
def about():
"""
Allows a user to view about me
page
"""
return render_template('about.html')
# Login- Code adapted from Pretty printed video
@app.route('/login', methods=['GET', 'POST'])
def login():
"""
Function for handling the logging in of users
"""
# Check if user is logged in
if 'logged_in' in session:
return redirect(url_for('index'))
form = LoginForm()
# Check is form is valid and find user in database
if form.validate_on_submit():
user = mongo.db.user
logged_in_user = user.find_one(
{'name': request.form['username'].title()})
# Check is user - password is hashed and does the password match
if logged_in_user:
if check_password_hash(logged_in_user['pass'],
request.form['password']):
session['username'] = request.form['username']
session['logged_in'] = True
# If password matches redirect to index
flash('You are successfully logged in', 'success')
return redirect(url_for('index'))
# If not show message below and redirect to login
flash('Sorry incorrect password!', 'danger')
return redirect(url_for('login'))
"""
If none of the form is valid or nothing matches username or password
then redirect to login
"""
flash(
'Sorry Your credentials are incorrect (username) please check and try again',
'danger')
return render_template('login.html', form=form, title='Login')
# Register- Code adapted from Pretty printed video- PyMongo Login/Register
# - Debugging aided by Tutor Tim Nelson
@app.route('/register', methods=['GET', 'POST'])
def register():
""" Function for allowing users to register"""
form = RegistrationForm()
# Check if user is already registered
if request.method == 'GET':
if 'username' in session:
flash('You are already logged in!', 'success')
return redirect(url_for('index'))
return render_template('register.html', form=form)
if request.method == 'POST':
if form.validate_on_submit():
user = mongo.db.user
# check existing username
exist_user = user.find_one(
{'name': request.form.get('username').title()})
print(exist_user)
if exist_user is None:
"""
If new user insert username, password and email into
collection
"""
hash_pass = generate_password_hash(
request.form.get('password'))
user.insert_one({'name': request.form.get('username').title(),
'pass': hash_pass})
session['username'] = request.form.get('username')
flash(
'You are successfully registered, Please log in!',
'success')
return redirect(url_for('login'))
else:
flash(
'Sorry, username already taken. Please try another.',
'warning')
return redirect(
url_for(
'register',
form=form,
title='Register'))
else:
return redirect(url_for('register', form=form, title='Register'))
return redirect(url_for('register', form=form, title='Register'))
# Logout
@app.route('/logout')
def logout():
"""
Allowing users to logout
"""
session.clear()
flash('You are successfully logged out', 'success')
return redirect(url_for('index'))
# Error Handling - 404 and 500
# Error 404- adapted from Corey Schafer Flask Series
@app.errorhandler(404)
def page_not_found(e):
"""
Route for handling 404 error
"""
return render_template('404.html', title='Page not found'), 404
# Error 500- adapted from Corey Schafer Flask Series
@app.errorhandler(500)
def internal_server_error(e):
"""
Route for handling 500 error
"""
return render_template('500.html', title='Internal server error'), 500
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=int(os.environ.get('PORT')),
debug=False) |
py | b40ec949a30adc2b0f81f0eb758f2d28c5d05d0a | # ---------------------------------------------------------------------
# Zyxel.ZyNOS_EE.get_arp
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.sa.interfaces.igetarp import IGetARP
from noc.core.script.base import BaseScript
class Script(BaseScript):
name = "Zyxel.ZyNOS_EE.get_arp"
interface = IGetARP
rx_arp = re.compile(
r"^(?P<ip>\d+\.\d+\.\d+\.\d+)\s+\S+\s+\d+\s+(?P<mac>\S+)\s+\d+\s+(?P<interface>\S+)",
re.MULTILINE,
)
def execute(self):
r = []
# Try SNMP first
if self.has_snmp():
try:
mac_ip = {}
for mac, ip in self.snmp.join_tables(
"1.3.6.1.2.1.4.22.1.2", "1.3.6.1.2.1.4.22.1.3", cached=True
): # IP-MIB
mac = ":".join(["%02x" % ord(c) for c in mac])
ip = ["%02x" % ord(c) for c in ip]
mac_ip[mac] = ".".join(str(int(c, 16)) for c in ip)
for i, mac in self.snmp.join_tables(
"1.3.6.1.2.1.4.22.1.1", "1.3.6.1.2.1.4.22.1.2", cached=True
): # IP-MIB
mac = ":".join(["%02x" % ord(c) for c in mac])
interface = self.snmp.get(
"1.3.6.1.2.1.2.2.1.1." + str(i), cached=True
) # IF-MIB
try:
r.append({"ip": mac_ip[mac], "mac": mac, "interface": interface})
except KeyError:
pass
return r
except self.snmp.TimeOutError:
pass
# Fallback to CLI
arp = self.cli("ip arp status")
for match in self.rx_arp.finditer(arp):
r.append(
{
"ip": match.group("ip"),
"mac": match.group("mac"),
"interface": match.group("interface"),
}
)
return r
|
py | b40ec99c6d7ee788106fa3cff6a748d0e4ea5754 | """
Slightly modified version of:
https://github.com/pytorch/vision/blob/master/torchvision/transforms/transforms.py
"""
import numbers
import random
import bayescache.api.data as data
class RandomCrop(data.Augmentation):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception.
"""
def __init__(self, size, padding=0, padding_mode='constant', pad_if_needed=False, mode='x', tags=None):
super().__init__(mode, tags)
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.padding_mode = padding_mode
self.padding_mode_cv = data.mode_to_cv2(self.padding_mode)
self.pad_if_needed = pad_if_needed
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h, *_ = img.shape
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
if self.padding > 0:
img = data.pad(img, self.padding, mode=self.padding_mode_cv)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.size[1]:
img = data.pad(img, (int((1 + self.size[1] - img.size[0]) / 2), 0), mode=self.padding_mode_cv)
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.size[0]:
img = data.pad(img, (0, int((1 + self.size[0] - img.size[1]) / 2)), mode=self.padding_mode_cv)
i, j, h, w = self.get_params(img, self.size)
return data.crop(img, j, i, w, h)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
def create(width, height, padding=0, padding_mode='constant', mode='x', tags=None):
""" Vel factory function """
return RandomCrop(size=(width, height), padding=padding, padding_mode=padding_mode, mode=mode, tags=tags)
|
py | b40ec9d62c2508c7b5045be3730a37599c4fc33b | # Copyright 2020 H2O.ai, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import socket
from contextlib import closing
import subprocess
import platform
import uvicorn
import click
import os
from urllib.parse import urlparse
_localhost = '127.0.0.1'
def _scan_free_port(port: int = 8000):
while True:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex((_localhost, port)):
return port
port += 1
@click.group()
def main():
pass
@main.command()
@click.argument('app')
@click.option("--no-reload", is_flag=True, default=False, help="Disable auto-reload.")
def run(app: str, no_reload: bool):
"""Run an app.
\b
Run app.py with auto reload:
$ wave run app
$ wave run app.py
\b
Run path/to/app.py with auto reload:
$ wave run path.to.app
$ wave run path/to/app.py
\b
Run path/to/app.py without auto reload:
$ wave run --no-reload path.to.app
$ wave run --no-reload path/to/app.py
"""
app_address = urlparse(os.environ.get('H2O_WAVE_APP_ADDRESS', f'http://{_localhost}:{_scan_free_port()}'))
host = app_address.hostname
port = app_address.port
addr = f'http://{host}:{port}'
os.environ['H2O_WAVE_INTERNAL_ADDRESS'] = addr # TODO deprecated
os.environ['H2O_WAVE_EXTERNAL_ADDRESS'] = addr # TODO deprecated
os.environ['H2O_WAVE_APP_ADDRESS'] = addr
# Make "python -m h2o_wave run" behave identical to "wave run":
# Insert cwd into path, otherwise uvicorn fails to locate the app module.
# uvicorn.main() does this before calling uvicorn.run().
sys.path.insert(0, '.')
# DevX: treat foo/bar/baz.py as foo.bar.baz
app_path, ext = os.path.splitext(app)
if ext.lower() == '.py':
app = app_path.replace(os.path.sep, '.')
# Try to start Wave daemon if not running or turned off.
try:
server_port = int(os.environ.get('H2O_WAVE_LISTEN', 10101))
server_not_running = _scan_free_port(server_port) == server_port
if os.environ.get('H2O_WAVE_NO_AUTOSTART', None) is None and server_not_running:
subprocess.Popen(['waved.exe' if 'Windows' in platform.system() else './waved'],
cwd=sys.exec_prefix, env=os.environ.copy(), shell=True)
finally:
uvicorn.run(f'{app}:main', host=_localhost, port=port, reload=not no_reload)
@main.command()
def ide():
uvicorn.run('h2o_wave.ide:ide', host=_localhost, port=10100)
|
py | b40eca966828847a4b5dbc95b77ca6271ddd25fb | """
WSGI config for trialscompendium project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
application = get_wsgi_application()
|
py | b40ecb019cd0372b8e324d765c6ff95ca9d7086e | import numpy as np
from ..morphology import dilation, square
from ..util import img_as_float
def find_boundaries(label_img):
boundaries = np.zeros(label_img.shape, dtype=np.bool)
boundaries[1:, :] += label_img[1:, :] != label_img[:-1, :]
boundaries[:, 1:] += label_img[:, 1:] != label_img[:, :-1]
return boundaries
def visualize_boundaries(img, label_img):
img = img_as_float(img, force_copy=True)
boundaries = find_boundaries(label_img)
outer_boundaries = dilation(boundaries.astype(np.uint8), square(2))
img[outer_boundaries != 0, :] = np.array([0, 0, 0]) # black
img[boundaries, :] = np.array([1, 1, 0]) # yellow
return img
|
py | b40ecb3d4390a945cf6454092a9c86bf9a0ae2f8 | def sayhi(name):
print("hi",name)
def sayhello(n1,n2):
print("hello",n1,n2) |
py | b40ecb53f460dc6361d0d15b962011cdc1373069 | import pytest
from vyper.cli.vyper_compile import compile_files
def test_combined_json_keys(tmp_path):
bar_path = tmp_path.joinpath("bar.vy")
with bar_path.open("w") as fp:
fp.write("")
combined_keys = {
"bytecode",
"bytecode_runtime",
"abi",
"source_map",
"method_identifiers",
"userdoc",
"devdoc",
}
compile_data = compile_files([bar_path], ["combined_json"], root_folder=tmp_path)
assert set(compile_data.keys()) == {"bar.vy", "version"}
assert set(compile_data["bar.vy"].keys()) == combined_keys
def test_invalid_root_path():
with pytest.raises(FileNotFoundError):
compile_files([], [], root_folder="path/that/does/not/exist")
def test_evm_versions(tmp_path):
# should compile differently because of SELFBALANCE
code = """
@external
def foo() -> uint256:
return self.balance
"""
bar_path = tmp_path.joinpath("bar.vy")
with bar_path.open("w") as fp:
fp.write(code)
byzantium_bytecode = compile_files(
[bar_path], output_formats=["bytecode"], evm_version="byzantium"
)[str(bar_path)]["bytecode"]
istanbul_bytecode = compile_files(
[bar_path], output_formats=["bytecode"], evm_version="istanbul"
)[str(bar_path)]["bytecode"]
assert byzantium_bytecode != istanbul_bytecode
# SELFBALANCE opcode is 0x47
assert "47" not in byzantium_bytecode
assert "47" in istanbul_bytecode
|
py | b40eccb81981c1d6d0900ba64d40933bf96218a8 | # -*- coding: utf-8 -*-
import datetime
import urllib
from math import ceil
import scrapy
from scrapy import Selector
from spider.consts import DOWNLOADER_MIDDLEWARES_SQUID_PROXY_ON, MYSQL_ITEM_PIPELINES
from spider.items import SpiderLoaderItem, GameGeimuItem, GameDownloadItem
class Ali213Spider(scrapy.Spider):
name = "ali213"
custom_settings = {
'ITEM_PIPELINES': MYSQL_ITEM_PIPELINES,
'SHOW_SQL': False,
'DOWNLOADER_MIDDLEWARES': DOWNLOADER_MIDDLEWARES_SQUID_PROXY_ON,
# 'DOWNLOAD_DELAY': 1
}
allowed_domains = ['www.ali213.net']
batch_date = datetime.datetime.now().date()
default_data = {
}
default_data = urllib.parse.urlencode(default_data)
default_headers = {
'Cache-Control': 'max-age=0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'ja-JP,ja;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5,zh-TW;q=0.4',
'Cookie': 'UM_distinctid=163266add7e506-0cc0c8ebeaf0d5-f373567-1fa400-163266add7f8; bdshare_firstime=1525357966484; Hm_lvt_ef39e4f1e1037647abfbd15efdf8044f=1525357928,1525529010; Hm_lvt_90d3f2ca77e99acb3fad6f24d83a031d=1525590241; Hm_lpvt_90d3f2ca77e99acb3fad6f24d83a031d=1525590249; CNZZDATA680195=cnzz_eid%3D1598056172-1525353225-%26ntime%3D1525591806; Hm_lvt_2207c39aecfe7b9b0f144ab7f8316fad=1525357928,1525529010,1525592705; checkIMGCode=9185; CNZZDATA2573307=cnzz_eid%3D1431571239-1525353695-%26ntime%3D1525592465; Hm_lpvt_2207c39aecfe7b9b0f144ab7f8316fad=1525596098; Hm_lpvt_ef39e4f1e1037647abfbd15efdf8044f=1525596098',
'Referer': 'http://down.ali213.net/new/index_2.html',
'Upgrade-Insecure-Requests': 1,
'Connection': 'keep-alive',
'Host': 'down.ali213.net'
}
def start_requests(self):
yield scrapy.Request(
url='http://down.ali213.net/new/',
headers=self.default_headers, body=self.default_data, callback=self.get_final_url, dont_filter=True)
def get_final_url(self, response):
total_page = response.xpath('//div[@class="list_body_page"]//a[1]//span[@class="sec"]/text()').extract()[0].split("/")[1]
pages = int(total_page)
# pages=10
for page in range(1,pages+1):
url=''
if page==1:
url='http://down.ali213.net/new/index.html'
else:
url='http://down.ali213.net/new/index_'+str(page)+'.html'
yield scrapy.Request(
url=url,
headers=self.default_headers, body=self.default_data, callback=self.parse_basic_info, dont_filter=True)
def parse_basic_info(self, response):
contents=response.xpath('//div[@class="list_body"]//div[@class="list_body_contain"]//div[@class="list_body_con"]').extract()
for content in contents:
img=Selector(text=content).xpath('//div[@class="list_body_con"]//a[@class="list_body_con_img"]//img/@data-original').extract()[0]
name = Selector(text=content).xpath('//div[@class="list_body_con"]//a[@class="list_body_con_img"]//img/@alt').extract()[0]
zh_name = Selector(text=content).xpath('//div[@class="list_body_con"]//div[@class="list_body_con_con"]/a/text()').extract()[0]
url_tmp=Selector(text=content).xpath('//div[@class="list_body_con"]//a[@class="list_body_con_img"]/@href').extract()[0]
if not url_tmp.startswith('/'):
continue
url='http://down.ali213.net'+url_tmp
yield scrapy.Request(
url=url,
headers=self.default_headers, body=self.default_data, callback=self.parse_detail_info, meta={'img':img,'name':name,'zh_name':zh_name}, dont_filter=True)
def parse_detail_info(self, response):
url=response.url
img=response.meta['img']
name = response.meta['name']
zh_name=response.meta['zh_name']
en_name=''
if len(response.xpath('//div[@class="detail_body_down"]//div[@itemprop="alias"]/text()').extract())>0:
en_name = response.xpath('//div[@class="detail_body_down"]//div[@itemprop="alias"]/text()').extract()[0]
type=''
if len(response.xpath('//div[@class="detail_body_down"]//div[@class="newdown_l_con_con_info"][1]/a/text()').extract())>0:
type = response.xpath('//div[@class="detail_body_down"]//div[@class="newdown_l_con_con_info"][1]/a/text()').extract()[0]
creator_name=''
if len(response.xpath('//div[@class="detail_body_down"]//div[@class="newdown_l_con_con_info"][2]/text()').extract())>0:
creator_name = response.xpath('//div[@class="detail_body_down"]//div[@class="newdown_l_con_con_info"][2]/text()').extract()[0].split(':')[1]
publish_date = ''
if len(response.xpath('//div[@class="detail_body_down"]//div[@class="newdown_l_con_con_info"][3]//span[@itemprop="dateModified"]/text()').extract())>0:
publish_date = response.xpath('//div[@class="detail_body_down"]//div[@class="newdown_l_con_con_info"][3]//span[@itemprop="dateModified"]/text()').extract()[0]
size=''
if len(response.xpath('//div[@class="detail_body_down"]//div[@class="newdown_l_con_con_info"][4]//span[@itemprop="fileSize"]/text()').extract())>0:
size = response.xpath('//div[@class="detail_body_down"]//div[@class="newdown_l_con_con_info"][4]//span[@itemprop="fileSize"]/text()').extract()[0]
introduction=''
if len(response.xpath('//div[@class="detail_body_con_bb"]//div[@class="detail_body_con_bb_con"]').extract())>0:
introduction=response.xpath('//div[@class="detail_body_con_bb"]//div[@class="detail_body_con_bb_con"]').extract()[0]
item_geimu = SpiderLoaderItem(item=GameGeimuItem(image_urls=[img]), response=response)
item_geimu.add_value('batch_date', self.batch_date)
item_geimu.add_value('host', self.allowed_domains[0])
item_geimu.add_value('url', url)
item_geimu.add_value('img', img)
item_geimu.add_value('name', name)
item_geimu.add_value('zh_name', zh_name)
item_geimu.add_value('en_name', en_name)
item_geimu.add_value('creator_name', creator_name)
item_geimu.add_value('type', type)
item_geimu.add_value('publish_date', publish_date)
item_geimu.add_value('size', size)
item_geimu.add_value('introduction', introduction)
item_geimu.add_value('category', '游戏')
item_geimu.add_value('table_name', 'game_geimu')
yield item_geimu.load_item()
if len(response.xpath('//div[@class="detail_down_adress_con"]/div[@class="detail_down_adress_con_bottom"]//div[@class="detail_down_adress_con_bottom_left"]//script[2]/text()').extract())>0:
fake_down_url='http://www.soft50.com'+response.xpath('//div[@class="detail_down_adress_con"]/div[@class="detail_down_adress_con_bottom"]//div[@class="detail_down_adress_con_bottom_left"]//script[2]/text()').extract()[0][14:-2]
yield scrapy.Request(
url=fake_down_url,
headers=self.default_headers, body=self.default_data, callback=self.parse_fake_download_info,
meta={'game_url': url}, dont_filter=True)
def parse_fake_download_info(self, response):
if len(response.xpath('//div[@class="result1"]//a/@href'))>0:
fake_down_url=response.xpath('//div[@class="result1"]//a/@href').extract()[0]
yield scrapy.Request(
url=fake_down_url,
headers=self.default_headers, body=self.default_data, callback=self.parse_download_info,
meta={'game_url': response.meta['game_url']}, dont_filter=True)
def parse_download_info(self, response):
downloads = []
download01=''
if len(response.xpath('//a[@id="jsbtn"]/@data-id').extract())>0:
download01 = 'http://d.soft5566.com/setup_a'+str(response.xpath('//a[@id="jsbtn"]/@data-id').extract()[0])+'.exe'
downloads.append({'download_name':'极速下载','download_url':download01})
download02_url = response.xpath('//div[@class="n1_content"]//font/a/@href').extract()
download02_name = response.xpath('//div[@class="n1_content"]//font/a/text()').extract()
for idx in range(0,len(download02_url)):
downloads.append({'download_name': download02_name[idx], 'download_url': download02_url[idx]})
download03=''
if len(response.xpath('//div[@class="ed2k_content"]//ul[@class="content_part"]//span/a/@href').extract())>0:
download03 = response.xpath('//div[@class="ed2k_content"]//ul[@class="content_part"]//span/a/@href').extract()[0]
downloads.append({'download_name':'eD2K下载地址','download_url':download03})
for download in downloads:
download_name = download['download_name']
download_url = download['download_url']
item_download = SpiderLoaderItem(item=GameDownloadItem(), response=response)
item_download.add_value('batch_date', self.batch_date)
item_download.add_value('game_url', response.meta['game_url'])
item_download.add_value('download_name', download_name)
item_download.add_value('download_url', download_url)
item_download.add_value('table_name', 'game_download')
yield item_download.load_item() |
py | b40ecd310e7bcd913a7955f956d99d81151fa787 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointConnection']
class PrivateEndpointConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
parent_name: Optional[pulumi.Input[str]] = None,
parent_type: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['ConnectionStateArgs']]] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ResourceProvisioningState']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Create a PrivateEndpointConnection resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_ids: GroupIds from the private link service resource.
:param pulumi.Input[str] parent_name: The name of the parent resource (namely, either, the topic name or domain name).
:param pulumi.Input[str] parent_type: The type of the parent resource. This can be either \'topics\' or \'domains\'.
:param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The Private Endpoint resource for this Connection.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection connection.
:param pulumi.Input[pulumi.InputType['ConnectionStateArgs']] private_link_service_connection_state: Details about the state of the connection.
:param pulumi.Input[Union[str, 'ResourceProvisioningState']] provisioning_state: Provisioning state of the Private Endpoint Connection.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['group_ids'] = group_ids
if parent_name is None and not opts.urn:
raise TypeError("Missing required property 'parent_name'")
__props__['parent_name'] = parent_name
if parent_type is None and not opts.urn:
raise TypeError("Missing required property 'parent_type'")
__props__['parent_type'] = parent_type
__props__['private_endpoint'] = private_endpoint
__props__['private_endpoint_connection_name'] = private_endpoint_connection_name
__props__['private_link_service_connection_state'] = private_link_service_connection_state
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventgrid:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:eventgrid/latest:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20200401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20201015preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-nextgen:eventgrid/v20200601:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupIds")
def group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
GroupIds from the private link service resource.
"""
return pulumi.get(self, "group_ids")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The Private Endpoint resource for this Connection.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.ConnectionStateResponse']]:
"""
Details about the state of the connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the Private Endpoint Connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b40ecd73acaf25218bc147661a3a09e6da75a278 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Test export of PyTorch operators using ONNX Runtime contrib ops."""
import torch
import onnxruntime
from onnxruntime.tools import pytorch_export_contrib_ops
import numpy as np
import unittest
import io
import copy
def ort_test_with_input(ort_sess, input, output, rtol, atol):
input, _ = torch.jit._flatten(input)
output, _ = torch.jit._flatten(output)
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
inputs = list(map(to_numpy, input))
outputs = list(map(to_numpy, output))
ort_inputs = dict((ort_sess.get_inputs()[i].name, input) for i, input in enumerate(inputs))
ort_outs = ort_sess.run(None, ort_inputs)
# compare onnxruntime and PyTorch results
assert len(outputs) == len(ort_outs), "number of outputs differ"
# compare onnxruntime and PyTorch results
[np.testing.assert_allclose(out, ort_out, rtol=rtol, atol=atol) for out, ort_out in zip(outputs, ort_outs)]
# These set of tests verify ONNX model export and compares outputs between
# PyTorch and ORT.
class ONNXExporterTest(unittest.TestCase):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
keep_initializers_as_inputs = True # For IR version 3 type export.
def setUp(self):
torch.manual_seed(0)
pytorch_export_contrib_ops.register()
def run_test(self, model, input=None,
custom_opsets=None,
batch_size=2,
rtol=0.001, atol=1e-7,
do_constant_folding=True,
dynamic_axes=None, test_with_inputs=None,
input_names=None, output_names=None):
model.eval()
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
with torch.no_grad():
if isinstance(input, torch.Tensor):
input = (input,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy = copy.deepcopy(input)
output = model(*input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
# export the model to ONNX
f = io.BytesIO()
torch.onnx.export(model, input_copy, f,
opset_version=self.opset_version,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=self.keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names, output_names=output_names,
custom_opsets=custom_opsets)
# compute onnxruntime output prediction
ort_sess = onnxruntime.InferenceSession(f.getvalue(),
providers=onnxruntime.get_available_providers())
input_copy = copy.deepcopy(input)
ort_test_with_input(ort_sess, input_copy, output, rtol, atol)
# if additional test inputs are provided run the onnx
# model with these inputs and check the outputs
if test_with_inputs is not None:
for test_input in test_with_inputs:
if isinstance(test_input, torch.Tensor):
test_input = (test_input,)
test_input_copy = copy.deepcopy(test_input)
output = model(*test_input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
ort_test_with_input(ort_sess, test_input, output, rtol, atol)
def test_inverse(self):
class CustomInverse(torch.nn.Module):
def forward(self, x):
return torch.inverse(x) + x
x = torch.randn(2, 3, 3)
self.run_test(CustomInverse(), x, custom_opsets={"com.microsoft": 1})
def test_gelu(self):
model = torch.nn.GELU()
x = torch.randn(3, 3)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
def test_triu(self):
for i in range(-5, 5):
class Module(torch.nn.Module):
def forward(self, input):
return input.triu(diagonal=i)
model = Module()
x = torch.randn(5, 4, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(5, 4, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(5, 0, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
for i in range(-5, 5):
class Module2D(torch.nn.Module):
def forward(self, input):
return input.triu(diagonal=i)
model = Module2D()
x = torch.randn(4, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(0, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(0, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
def test_tril(self):
for i in range(-5, 5):
class Module(torch.nn.Module):
def forward(self, input):
return input.tril(diagonal=i)
model = Module()
x = torch.randn(5, 4, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(5, 4, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(5, 0, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
for i in range(-5, 5):
class Module2D(torch.nn.Module):
def forward(self, input):
return input.tril(diagonal=i)
model = Module2D()
x = torch.randn(4, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(0, 7, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
x = torch.randn(0, 0, dtype=torch.float32)
self.run_test(model, x, custom_opsets={"com.microsoft": 1})
# opset 9 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
ONNXExporterTest_opset9_IRv4 = type(str("TestONNXRuntime_opset9_IRv4"),
(unittest.TestCase,),
dict(ONNXExporterTest.__dict__,
keep_initializers_as_inputs=False))
if __name__ == "__main__":
unittest.main()
|
py | b40ecda195a6509ce4dd481ba7c4f4a19aabbc0b | import os
from PIL import Image
d = os.path.dirname(__file__)
u = 0.45
for f, im in [ (k, Image.open(os.path.join(d, 'icon.transparent', k))) for k in os.listdir(os.path.join(d, 'icon.transparent')) if k.endswith('.png') ]:
for x in range(im.width):
for y in range(im.height):
r, g, b, a = im.getpixel((x, y))
if a < 255:
p = min(1, max(0, (a / 255) - u))
q = 1 - p
im.putpixel((x, y), (int(p*r + q*255), int(p*g + q*255), int(p*b + q*255)))
im.save(os.path.join(d, 'icon', f))
im.close() |
py | b40ecdcaed758228b5fbea794ff4ed7d433c611b | /home/runner/.cache/pip/pool/0c/e2/15/65a5cdedb30e01e304ab320e6471a5e0e14ef8a1ab12a05407ce3c0cfa |
py | b40ece5360b5db6fe3e140cb4fb84e6c9353fc39 | #!/usr/bin/env python
import pandas as pd
import sys
import os
file = open(sys.argv[1], "rb")
df = pd.read_csv(file, sep=",", skiprows=4, header=True)
for i, col in enumerate(df.columns):
df.iloc[:,i] = df.iloc[:,i].str.replace('"','')
newfile = os.path.splitext(sys.argv[1])[0] + "_cleaned.csv"
df.to_csv(newfile, sep=',', header=True) |
py | b40eceb0afcd496a7c531654a9bc75dc37453f3e | import json
import os
import warnings
import cv2
import numpy as np
import scipy
from common import utils
from tensorflow import cast
from tensorflow.keras.utils import Sequence
class SegmentationDataset:
def __init__(self, images_dir=None, class_key='object', augmentation=None,
preprocess_transform=False, json_annotation_key='_via_img_metadata', **kwargs):
"""
Dataset class for VGG Image Annotator. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir: (str): path to images folder
class_key: (str): class_key may be a key for class name for polygons
augmentation: (albumentations.Compose): data transfromation pipeline
preprocess_transform: (albumentations.Compose): transformation of an image
json_annotation_key: (str): default key to extract annotations from .json.
By default, it is '_via_img_metadata' for VGG Image Annotator
**kwargs: additional processing configuration parameters
"""
super(SegmentationDataset, self).__init__()
self.kwargs = kwargs
self.class_key = class_key
self.json_annotation_key = json_annotation_key
if images_dir:
self.images_names = [x for x in os.listdir(images_dir) if '.json' not in x]
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.images_names]
# Find annotation file and make sure that folder contains only one annotation file
annot_file = [x for x in os.listdir(images_dir) if '.json' in x]
assert len(annot_file) == 1
annot_file = annot_file[0]
print(f'Found annotation file: {annot_file} in dataset path: {images_dir}')
if self.json_annotation_key:
self.annotation_dict = json.load(open(os.path.join(images_dir, annot_file)))[json_annotation_key]
else:
self.annotation_dict = json.load(open(os.path.join(images_dir, annot_file)))
# Make sure that keys in json are equal to images filenames
# Some versions of VIA may violate this rule
remapped_annotation_dict = {}
for k, v in self.annotation_dict.items():
remapped_annotation_dict.update({v['filename']: v})
self.annotation_dict.clear()
self.annotation_dict.update(remapped_annotation_dict)
else:
print('None passed to images_dir argument.\n',
'This means that the dataset class is a child of SegmentationDataset and its'
'behaviour differs from datasets created with VGG Image Annotator.\n',
'If it is not true, please, check your class arguments carefully.\n')
# Get class indexes from class_dict
self.classes_dict = self.kwargs['class_dict']
self.class_values = list(self.classes_dict.values())
self.augmentation = augmentation
self.preprocess_transform = preprocess_transform
self.backbone_shapes = utils.compute_backbone_shapes(self.kwargs)
self.anchors = utils.generate_pyramid_anchors(scales=self.kwargs['rpn_anchor_scales'],
ratios=self.kwargs['rpn_anchor_ratios'],
feature_shapes=self.backbone_shapes,
feature_strides=self.kwargs['backbone_strides'],
anchor_stride=self.kwargs['rpn_anchor_stride']
)
def get_points_from_annotation(self, annotation_key):
"""
Get polygon points for a segment. [[x1,y1], [x2, y2], ....[]]
Example:
{'filename': '250024424orig.jpeg',
'size': 164044,
'regions': [{'shape_attributes': {'name': 'polygon',
'all_points_x': [213, 199, 126, 140],
'all_points_y': [339, 404, 350, 298]},
'region_attributes': {'object': 'licence'}},
{'shape_attributes': {'name': 'polygon',
'all_points_x': [485, 468, 533, 593, 627, 644, 649, 623, 564, 520],
'all_points_y': [554, 677, 704, 683, 648, 599, 540, 504, 498, 518]},
'region_attributes': {'object': 'wheel'}}],
'file_attributes': {}}
The key for class names is 'object'
Args:
annotation_key: key to get info about polygons to make masks
Returns: polygon_data_list, class_id_list
"""
polygon_data_list = []
class_id_list = []
_region_list = self.annotation_dict[annotation_key]['regions']
# If there is more than one object described as polygons, find each class id for each polygon
# If there is no information about classed in 'region_attributes', add class 1 as binary
for region in _region_list:
if 'all_points_x' not in region['shape_attributes'].keys():
print(f'\n[SegmentationDataset] Skipping incorrect observation:\n',
f"""annotation_key: {annotation_key}\n_region_list: {region['shape_attributes']}\n""")
continue
polygon_points = [[x, y] for x, y in zip(region['shape_attributes']['all_points_x'],
region['shape_attributes']['all_points_y']
)
]
polygon_data_list.append(np.array([polygon_points]))
# If there is no any keyfields for classes, mark everything as class 1
if len(region['region_attributes'].keys()) == 0:
class_id_list.append(1)
else:
# In VGG Image Annotator there is an option to add attributes for polygons.
# We can write class_name to the specified attribute of a polygon
# For example, by default, attribute name which contains class name is 'object'
class_name = region['region_attributes'][self.class_key]
if len(class_name) == 0:
raise ValueError(f'Class name is empty. Full annotation: {_region_list}')
class_id_list.append(self.classes_dict[class_name])
return polygon_data_list, class_id_list
def create_mask(self, image, idx):
"""
Create mask image from VGG Image Annotator metadata
Args:
image: original image. numpy array,
idx: annotation key to get polygon info about mask
Returns: masks_array: A bool array of shape [height, width, instance count] with one mask per instance.
class_ids_array: class ids array for each mask
"""
annotation_key = self.images_names[idx] # Get image name as annotation key in annotation_dict
points_list, class_id_list = self.get_points_from_annotation(annotation_key)
mask_template = np.zeros(image.shape[:2]) # Create mask template with grayscale shape=(width, height)
instance_masks_list = []
# Generate one mask per instance
for points, class_id in zip(points_list, class_id_list):
instance_masks_list.append(cv2.fillPoly(mask_template, points, (class_id)))
masks_array = np.stack(instance_masks_list, axis=2).astype(np.bool) # (w, h, array index)
class_ids_array = np.array(class_id_list, dtype=np.int32)
return masks_array, class_ids_array
def load_image(self, image_id):
return cv2.imread(self.images_fps[image_id])
def resize_mask(self, mask, scale, padding, crop=None):
"""Resizes a mask using the given scale and padding.
Typically, you get the scale and padding from resize_image() to
ensure both, the image and the mask, are resized consistently.
scale: mask scaling factor
padding: Padding to add to the mask in the form
[(top, bottom), (left, right), (0, 0)]
"""
# Suppress warning from scipy 0.13.0, the output shape of zoom() is
# calculated with round() instead of int()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)
if crop is not None:
y, x, h, w = crop
mask = mask[y:y + h, x:x + w]
else:
mask = np.pad(mask, padding, mode='constant', constant_values=0)
return mask
def __getitem__(self, id):
"""
Generate item
Args:
id: index of the image to read
Returns: image, mask, bbox, image_meta, class_ids
"""
image = self.load_image(id) # Read image
original_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Image to RGB color space
original_image_shape = original_image.shape
if self.preprocess_transform:
image = self.preprocess_transform(original_image)
original_masks_array, class_ids_array = self.create_mask(image, id) # Create image masks from annotation
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.kwargs['image_min_dim'],
min_scale=self.kwargs['image_min_scale'],
max_dim=self.kwargs['image_max_dim'],
mode=self.kwargs['image_resize_mode'])
masks_array = self.resize_mask(original_masks_array, scale, padding, crop)
# Apply augmentation
_image_shape = image.shape
if self.augmentation:
masks_list = [masks_array[:, :, i].astype('float') for i in range(masks_array.shape[2])]
transformed = self.augmentation(image=image, masks=masks_list)
proc_image, proc_masks = transformed['image'], transformed['masks']
assert proc_image.shape == _image_shape
proc_masks = np.stack(proc_masks, axis=2)
else:
proc_image = image
proc_masks = masks_array
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(proc_masks, axis=(0, 1)) > 0
proc_masks = proc_masks[:, :, _idx]
proc_class_ids = class_ids_array[_idx]
_orig_idx = np.sum(original_masks_array, axis=(0, 1)) > 0
original_masks_array = original_masks_array[:, :, _orig_idx]
original_class_ids = class_ids_array[_orig_idx]
# Compute bboxes
bboxes = utils.extract_bboxes(proc_masks)
original_bboxes = utils.extract_bboxes(original_masks_array)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([len(self.classes_dict.keys())], dtype=np.int32)
# 1 for classes that are in the dataset of the image
# 0 for classes that are not in the dataset.
# The position of ones and zeros means the class index.
source_class_ids = list(
self.classes_dict.values()) # self.classes_dict['licence'] or list(self.classes_dict.values())
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if self.kwargs['use_mini_masks']:
proc_masks = utils.minimize_mask(bboxes, proc_masks, self.kwargs['mini_mask_shape'])
# Image meta data
image_meta = utils.compose_image_meta(id, original_image_shape, window, scale, active_class_ids, self.kwargs)
return proc_image, proc_masks, proc_class_ids, bboxes, image_meta, \
original_image, original_masks_array, original_class_ids, original_bboxes
def __len__(self):
return len(self.images_names)
class DataLoader(Sequence):
"""Load data from dataset and form batches
Args:
dataset: Instance of Dataset class for image loading and preprocessing.
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in training detection targets are generated by DetectionTargetLayer.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
seed: Seed for pseudo-random generator
name: DataLoader name
cast_output: Cast output to tensorflow.float32
return_original: Return original images in batch
"""
def __init__(self, dataset, detection_targets=False, shuffle=True, seed=42, name='dataloader',
cast_output=True, return_original=False, **kwargs):
self.seed = seed
np.random.seed(self.seed)
self.dataset = dataset
self.random_rois = kwargs['random_rois']
self.detection_targets = detection_targets
self.indexes = np.arange(len(self.dataset))
self.anchors = self.dataset.anchors
self.backbone_shapes = self.dataset.backbone_shapes
self.shuffle = shuffle
self.cast_output = cast_output
self.kwargs = kwargs
self.batch_size = self.kwargs['batch_size']
self.return_original = return_original
self.on_epoch_end()
self.name = name
self.steps_per_epoch = self.__len__() // self.batch_size
print(f'{self.name} DataLoader. Steps per epoch: {self.steps_per_epoch}')
def generate_batch(self, index):
"""
Args:
index: int to get an image
Returns: python list
'batch_images': tf.random.uniform(shape=(batch, 512, 512, 3), dtype=tf.float32),
'batch_images_meta': tf.random.uniform(shape=(batch, 14), dtype=tf.float32),
'batch_rpn_match': tf.random.uniform(shape=(batch, 65472, 1), dtype=tf.float32),
'batch_rpn_bbox': tf.random.uniform(shape=(batch, 256, 4), dtype=tf.float32),
'batch_gt_class_ids': tf.random.uniform(shape=(batch, 100), dtype=tf.float32),
'batch_gt_boxes': tf.random.uniform(shape=(batch, 100, 4), dtype=tf.float32),
'batch_gt_masks': tf.random.uniform(shape=(batch, 512, 512, 100), dtype=tf.float32),
"""
# Set batch size counter
gen_batch = 0
while gen_batch < self.batch_size:
image, gt_masks, gt_class_ids, gt_boxes, image_meta, \
original_image, original_masks_array, original_class_ids, original_bboxes = self.dataset[index]
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
index = min(index + 1, len(self.indexes) - 1)
continue
# RPN Targets
rpn_match, rpn_bbox = utils.build_rpn_targets(
anchors=self.anchors,
gt_class_ids=gt_class_ids,
gt_boxes=gt_boxes,
rpn_train_anchors_per_image=self.kwargs['rpn_train_anchors_per_image'],
rpn_bbox_std=self.kwargs['rpn_bbox_std_dev']
)
# Mask R-CNN Targets
if self.random_rois:
rpn_rois = utils.generate_random_rois(image.shape, self.random_rois, gt_boxes)
if self.detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = utils.build_detection_targets(
rpn_rois=rpn_rois, gt_class_ids=gt_class_ids, gt_boxes=gt_boxes, gt_masks=gt_masks,
train_rois_per_image=self.kwargs['train_rois_per_image'],
roi_pos_ratio=self.kwargs['roi_pos_ratio'],
num_classes=len(self.dataset.classes_dict.keys()),
bbox_std=self.kwargs['bbox_std'],
use_mini_mask=self.kwargs['use_mini_mask'],
mask_shape=self.kwargs['mask_shape'],
image_shape=self.kwargs['image_shape']
)
# Init batch arrays
if gen_batch == 0:
batch_image_meta = np.zeros(
(self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[self.batch_size, self.kwargs['rpn_train_anchors_per_image'], 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(self.batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(self.batch_size, self.kwargs['max_gt_instances']), dtype=np.int32)
batch_gt_boxes = np.zeros(
(self.batch_size, self.kwargs['max_gt_instances'], 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(self.batch_size, gt_masks.shape[0], gt_masks.shape[1],
self.kwargs['max_gt_instances']), dtype=gt_masks.dtype)
if self.random_rois:
batch_rpn_rois = np.zeros(
(self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if self.detection_targets:
batch_rois = np.zeros(
(self.batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
if self.return_original:
batch_original_imgs = []
batch_original_masks = []
batch_original_class_ids = []
batch_original_bboxes = []
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.kwargs['max_gt_instances']:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.kwargs['max_gt_instances'], replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to a batch
batch_image_meta[gen_batch] = image_meta
batch_rpn_match[gen_batch] = rpn_match[:, np.newaxis]
batch_rpn_bbox[gen_batch] = rpn_bbox
batch_images[gen_batch] = utils.normalize_image(image)
batch_gt_class_ids[gen_batch, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[gen_batch, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[gen_batch, :, :, :gt_masks.shape[-1]] = gt_masks
if self.random_rois:
batch_rpn_rois[gen_batch] = rpn_rois
if self.detection_targets:
batch_rois[gen_batch] = rois
batch_mrcnn_class_ids[gen_batch] = mrcnn_class_ids
batch_mrcnn_bbox[gen_batch] = mrcnn_bbox
batch_mrcnn_mask[gen_batch] = mrcnn_mask
if self.return_original:
batch_original_imgs.append(original_image)
batch_original_masks.append(original_masks_array)
batch_original_class_ids.append(original_class_ids)
batch_original_bboxes.append(original_bboxes)
# Update info about batch size
gen_batch += 1
# Choose next index for the next image in batch or take the last image if one epoch is about to end.
index = min(index + 1, len(self.indexes) - 1)
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if self.random_rois:
inputs.extend([batch_rpn_rois])
if self.detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(batch_mrcnn_class_ids, -1)
outputs.extend([batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
if self.cast_output:
inputs = [cast(x, 'float32') for x in inputs]
outputs = [cast(x, 'float32') for x in outputs]
if self.return_original:
inputs.extend([batch_original_imgs, batch_original_masks, batch_original_class_ids, batch_original_bboxes])
return inputs, outputs
def __getitem__(self, i):
inputs, outputs = self.generate_batch(i)
return inputs, outputs
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.floor(len(self.indexes) / self.batch_size))
def on_epoch_end(self):
"""
Обновление порядка данных после каждой эпохи
Returns: None
"""
self.indexes = np.arange(len(self.dataset))
if self.shuffle:
np.random.shuffle(self.indexes)
|
py | b40ecf2b9ecbf85e04341d9bc16eb50500aa55bd | import numpy as np
# import sys
import math
import os, sys, platform
import astropy.units as u
from sunpy import map as smap
from astropy.coordinates import SkyCoord
from suncasa.io import ndfits
from . import gstools # initialization library - located either in the current directory or in the system path
from suncasa.utils import mstools
import lmfit
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
from suncasa.utils import mstools
from suncasa.utils import qlookplot as ql
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
from astropy.io import fits
import numpy.ma as ma
# name of the fast gyrosynchrotron codes shared library
if platform.system() == 'Linux' or platform.system() == 'Darwin':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr.so')
if platform.system() == 'Windows':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr64.dll')
def kev2k(eng):
return 11604525.00617 * eng
def ff_emission(em, T=1.e7, Z=1., mu=1.e10):
from astropy import constants as const
import astropy.units as u
T = T * u.k
mu = mu * u.Hz
esu = const.e.esu
k_B = const.k_B.cgs
m_e = const.m_e.cgs
c = const.c.cgs
bmax = (3 * k_B * T * u.k / m_e) ** 0.5 / 2.0 / np.pi / (mu * u.Hz)
bmin = Z * esu ** 2 / 3. / k_B / T
lnbb = np.log((bmax / bmin).value)
ka_mu = 1. / mu ** 2 / T ** 1.5 * (
Z ** 2 * esu ** 6 / c / np.sqrt(2. * np.pi * (m_e * k_B) ** 3)) * np.pi ** 2 / 4.0 * lnbb
# print(ka_mu, em)
opc = ka_mu * em
return T.value * (1 - np.exp(-opc.value))
def sfu2tb(freq, flux, area):
# frequency in Hz
# flux in sfu
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
Tb = flux * sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr)
return Tb
def tb2sfu(freq, tb, area):
# frequency in Hz
# brightness temperature in K
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
flux = tb / (sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr))
return flux
def initspecplot(axes, cplts):
errobjs = []
for cpltidx, cplt in enumerate(cplts):
errobjs.append(axes.errorbar([], [], yerr=[], linestyle='', marker='o', mfc='none', mec=cplt, alpha=1.0))
axes.set_yscale("log")
axes.set_xscale("log")
axes.set_xlim([1, 20])
axes.set_ylim([0.1, 1000])
axes.set_xticks([1, 5, 10, 20])
axes.set_xticklabels([1, 5, 10, 20])
axes.set_xticks([1, 5, 10, 20])
axes.set_yticks([])
axes.set_yticks([0.01, 0.1, 1, 10, 100, 1000])
axes.set_ylabel('T$_b$ [MK]')
axes.set_xlabel('Frequency [GHz]')
x = np.linspace(1, 20, 10)
for ll in [-1, 0, 1, 2, 3, 4]:
y = 10. ** (-2 * np.log10(x) + ll)
axes.plot(x, y, 'k--', alpha=0.1)
# y2 = 10. ** (-4 * np.log10(x) + ll)
# y3 = 10. ** (-8 * np.log10(x) + ll)
# ax_eospec.plot(x, y, 'k--', x, y2, 'k:', x, y3, 'k-.', alpha=0.1)
return errobjs
def set_errorobj(xout, yout, errobj, yerr=None):
eospec, dummy, (errbar_eospec,) = errobj
eospec.set_data(xout, yout)
if yerr is not None:
yerr_top = yout + yerr
yerr_bot = yout - yerr
new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
errbar_eospec.set_segments(new_segments_y)
def mwspec2min_1src(params, freqghz, tb=None, tb_err=None, arcsec2cm=0.725e8, showplt=False):
# params are defined by lmfit.Paramters()
'''
params: parameters defined by lmfit.Paramters()
freqghz: frequencies in GHz
ssz: pixel size in arcsec
tb: reference brightness temperature in K
tb_err: uncertainties of reference brightness temperature in K
'''
from scipy import interpolate
GET_MW = gstools.initGET_MW(libname) # load the library
ssz = float(params['ssz'].value) # # source area in arcsec^2
depth = float(params['depth'].value) # total source depth in arcsec
Bmag = float(params['Bmag'].value) # magnetic field strength in G
Tth = float(params['Tth'].value) # thermal temperature in MK
nth = float(params['nth'].value) # thermal density in 1e10 cm^{-3}
nrlh = 10. ** float(params['lognrlh'].value) # total nonthermal density above 0.1 MeV
delta = float(params['delta'].value) # powerlaw index
theta = float(params['theta'].value) # viewing angle in degrees
Emin = float(params['Emin'].value) # low energy cutoff of nonthermal electrons in MeV
Emax = float(params['Emax'].value) # high energy cutoff of nonthermal electrons in MeV
E_hi = 0.1
nrl = nrlh * (Emin ** (1. - delta) - Emax * (1. - delta)) / (E_hi ** (1. - delta) - Emax ** (1. - delta))
Nf = 100 # number of frequencies
NSteps = 1 # number of nodes along the line-of-sight
N_E = 15 # number of energy nodes
N_mu = 15 # number of pitch-angle nodes
Lparms = np.zeros(11, dtype='int32') # array of dimensions etc.
Lparms[0] = NSteps
Lparms[1] = Nf
Lparms[2] = N_E
Lparms[3] = N_mu
Rparms = np.zeros(5, dtype='double') # array of global floating-point parameters
Rparms[0] = ssz * arcsec2cm ** 2 # Area, cm^2
# Rparms[0] = 1e20 # area, cm^2
Rparms[1] = 1e9 # starting frequency to calculate spectrum, Hz
Rparms[2] = 0.02 # logarithmic step in frequency
Rparms[3] = 12 # f^C
Rparms[4] = 12 # f^WH
ParmLocal = np.zeros(24, dtype='double') # array of voxel parameters - for a single voxel
ParmLocal[0] = depth * arcsec2cm / NSteps # voxel depth, cm
ParmLocal[1] = Tth * 1e6 # T_0, K
ParmLocal[2] = nth * 1e10 # n_0 - thermal electron density, cm^{-3}
ParmLocal[3] = Bmag # B - magnetic field, G
Parms = np.zeros((24, NSteps), dtype='double', order='F') # 2D array of input parameters - for multiple voxels
for i in range(NSteps):
Parms[:, i] = ParmLocal # most of the parameters are the same in all voxels
# if NSteps > 1:
# Parms[4, i] = 50.0 + 30.0 * i / (NSteps - 1) # the viewing angle varies from 50 to 80 degrees along the LOS
# else:
# Parms[4, i] = 50.0 # the viewing angle varies from 50 to 80 degrees along the LOS
Parms[4, i] = theta
# parameters of the electron distribution function
n_b = nrl # n_b - nonthermal electron density, cm^{-3}
mu_c = np.cos(np.pi * 70 / 180) # loss-cone boundary
dmu_c = 0.2 # Delta_mu
E_arr = np.logspace(np.log10(Emin), np.log10(Emax), N_E, dtype='double') # energy grid (logarithmically spaced)
mu_arr = np.linspace(-1.0, 1.0, N_mu, dtype='double') # pitch-angle grid
f0 = np.zeros((N_E, N_mu), dtype='double') # 2D distribution function array - for a single voxel
# computing the distribution function (equivalent to PLW & GLC)
A = n_b / (2.0 * np.pi) * (delta - 1.0) / (Emin ** (1.0 - delta) - Emax ** (1.0 - delta))
B = 0.5 / (mu_c + dmu_c * np.sqrt(np.pi) / 2 * math.erf((1.0 - mu_c) / dmu_c))
for i in range(N_E):
for j in range(N_mu):
amu = abs(mu_arr[j])
f0[i, j] = A * B * E_arr[i] ** (-delta) * (1.0 if amu < mu_c else np.exp(-((amu - mu_c) / dmu_c) ** 2))
f_arr = np.zeros((N_E, N_mu, NSteps), dtype='double',
order='F') # 3D distribution function array - for multiple voxels
for k in range(NSteps):
f_arr[:, :, k] = f0 # electron distribution function is the same in all voxels
RL = np.zeros((7, Nf), dtype='double', order='F') # input/output array
# calculating the emission for array distribution (array -> on)
res = GET_MW(Lparms, Rparms, Parms, E_arr, mu_arr, f_arr, RL)
if res:
# retrieving the results
f = RL[0]
I_L = RL[5]
I_R = RL[6]
if showplt:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(f, I_L + I_R)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Total intensity (array)')
ax.set_xlabel('Frequency, GHz')
ax.set_ylabel('Intensity, sfu')
flx_model = I_L + I_R
flx_model = np.nan_to_num(flx_model) + 1e-11
logf = np.log10(f)
logflx_model = np.log10(flx_model)
logfreqghz = np.log10(freqghz)
interpfunc = interpolate.interp1d(logf, logflx_model, kind='linear')
logmflx = interpfunc(logfreqghz)
mflx = 10. ** logmflx
mtb = sfu2tb(np.array(freqghz) * 1.e9, mflx, ssz)
else:
print("Calculation error!")
if tb is None:
return mtb
if tb_err is None:
# return mTb - Tb
return mtb - tb
# wt = 1./flx_err
# wt = 1./(Tb_err/Tb/np.log(10.))
# residual = np.abs((logmTb - np.log10(Tb))) * wt
# residual = np.abs((mflx - flx)) * wt
residual = (mtb - tb) / tb_err
return residual
class RegionSelector:
# def set_errorobj(self, xout, yout, errobj, yerr):
# eospec, dummy, (errbar_eospec,) = errobj
# eospec.set_data(xout, yout)
# if yerr is not None:
# yerr_top = yout + yerr
# yerr_bot = yout - yerr
# new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
# errbar_eospec.set_segments(new_segments_y)
# return 1
def subdata(self, xs, ys, rfile):
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(rfile)
ny, nx = rmap.data.shape
tr_coord = rmap.top_right_coord
bl_coord = rmap.bottom_left_coord
x0 = bl_coord.Tx.to(u.arcsec).value
y0 = bl_coord.Ty.to(u.arcsec).value
x1 = tr_coord.Tx.to(u.arcsec).value
y1 = tr_coord.Ty.to(u.arcsec).value
dx = rmap.scale.axis1.to(u.arcsec / u.pix).value
dy = rmap.scale.axis2.to(u.arcsec / u.pix).value
mapx, mapy = np.linspace(x0, x1, nx) - dx / 2.0, np.linspace(y0, y1, ny) - dy / 2.0
xsmin = np.nanmin(xs)
xsmax = np.nanmax(xs)
ysmin = np.nanmin(ys)
ysmax = np.nanmax(ys)
if np.abs(xsmax - xsmin) < dx:
xsmax = xsmin + dx
if np.abs(ysmax - ysmin) < dy:
ysmax = ysmin + dy
xmask = np.logical_and(mapx >= xsmin, mapx <= xsmax)
nxnew = np.count_nonzero(xmask)
ymask = np.logical_and(mapy >= ysmin, mapy <= ysmax)
nynew = np.count_nonzero(ymask)
xmask = np.tile(xmask, ny).reshape(ny, nx)
ymask = np.tile(ymask, nx).reshape(nx, ny).transpose()
mask = xmask & ymask
# print(np.count_nonzero(mask))
self.npix = np.count_nonzero(mask)
self.area = self.npix * dx * dy
data = rdata[:, mask]
# print(rdata[:, :, mask])
# print(mask.shape, rdata.shape, data.shape)
data = np.squeeze(data)
# print(data.shape)
return data
def __init__(self, clkpnts, boxlines, eofiles, errobjs, cfreqs=None, rms=None, eofile_ref=None, errobj_ref=None,
wTmap=None, outspec_ff=None, scatter_gsfit=None,
get_peak=False, get_sum=False):
self.boxline = []
self.clkpnt = []
self.xs = list(clkpnts[0].get_xdata())
self.ys = list(clkpnts[0].get_ydata())
self.npix = None
self.area = None
self.xout = []
self.yout = []
self.xouterr = []
self.youterr = []
for errobj in errobjs:
eospec, dummy, (errbar_eospec,) = errobj
self.xout.append(eospec.get_xdata())
self.yout.append(eospec.get_ydata())
self.errobjs = errobjs
self.errobj_ref = errobj_ref
self.outspec_ff = outspec_ff
self.scatter_gsfit = scatter_gsfit
self.cfreqs = cfreqs
self.rms = rms
self.eofiles = eofiles
self.eofile_ref = eofile_ref
self.wTmap = wTmap
self.wT = None
self.em = None
self.get_peak = get_peak
self.get_sum = get_sum
self.tps = []
self.params = None
for idx, s in enumerate(clkpnts):
self.boxline.append(boxlines[idx])
self.clkpnt.append(s)
self.cid = s.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
axes = [clkpnt.axes for clkpnt in self.clkpnt]
if self.clkpnt[0].figure.canvas.toolbar.mode == '':
if event.inaxes not in axes:
return
nxs = len(self.xs)
if event.button == 1:
if nxs < 2:
self.xs.append(event.xdata)
self.ys.append(event.ydata)
else:
self.xs = [event.xdata]
self.ys = [event.ydata]
elif event.button == 3:
if len(self.xs) > 0:
self.xs.pop()
self.ys.pop()
self.get_flux()
def get_flux(self):
if len(self.xs) > 0:
xs = np.array(self.xs, dtype=np.float64)
ys = np.array(self.ys, dtype=np.float64)
for clkpnt in self.clkpnt:
clkpnt.set_data(xs, ys)
else:
for clkpnt in self.clkpnt:
clkpnt.set_data([], [])
nxs = len(self.xs)
if nxs <= 1:
for line in self.boxline:
line.set_data([], [])
elif nxs == 2:
datas = []
# eofile = self.eofiles[0]
# rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofile)
# data = self.subdata(xs, ys, eofile)
# datas.append(data)
for tidx, eofile in enumerate(self.eofiles):
data = self.subdata(xs, ys, eofile)
datas.append(data)
if self.eofile_ref is not None:
data_ref = self.subdata(xs, ys, self.eofile_ref)
if self.wTmap is not None:
datawT = self.subdata(xs, ys, self.wTmap)
if self.get_peak:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmax(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
youts_outspec_ref = np.nanmax(data_ref[0, dd, :, :]) / 1e6
else:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmean(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
if data.ndim > 1:
youts_outspec_ref = np.nanmean(data_ref, axis=-1) / 1e6
else:
youts_outspec_ref = data_ref / 1e6
self.tps = []
for data in datas:
if data.ndim > 1:
self.tps.append(np.nansum(data, axis=-1) / 1e6)
else:
self.tps.append(data / 1e6)
xout = self.cfreqs
for tidx, errobj in enumerate(self.errobjs):
set_errorobj(xout, youts_outspec[tidx], errobj, self.rms)
if self.eofile_ref is not None:
set_errorobj(xout, youts_outspec_ref, self.errobj_ref, self.rms)
if self.wTmap is not None:
print(datawT.shape)
wT = np.nanmean(datawT[..., 1]) * 1e6
em = np.nanmean(datawT[..., 0])
arcsec2cm = (self.wTmap[0].rsun_meters / self.wTmap[0].rsun_obs).to(u.cm / u.arcsec).value
# nele = 4.0e10
# depth = em / nele ** 2 / arcsec2cm
# print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, depth: {:.1f} arcsec if nele is {:.2e} cm-3'.format(wT / 1e6, em, depth, nele))
depth = 20. ## arcsec
nele = np.sqrt(em / (depth * arcsec2cm))
print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, nele: {:.2e} cm-3 if depth is {:.1f} arcsec'.format(
wT / 1e6, em, nele, depth))
self.wT = wT
self.em = em
yout_ff = np.array([ff_emission(em, T=wT, Z=1., mu=ll) for ll in xout * 1e9]) / 1.e6
self.outspec_ff.set_data(xout, yout_ff)
self.errobjs[0][0].figure.canvas.draw_idle()
for line in self.boxline:
line.set_data([xs[0], xs[1], xs[1], xs[0], xs[0]], [ys[0], ys[0], ys[1], ys[1], ys[0]])
clkpnt.figure.canvas.draw_idle()
class GStool:
# def get_showaia(self):
# return self._showaia
#
# def set_showaia(self, value):
# self._showaia = value
#
# showaia = property(fget=get_showaia, fset=set_showaia, doc="`Boolean`-like: Display AIA image or not")
def __init__(self, eofiles, aiafile=None, xycen=None, fov=None, freqghz_bound=[-1, 100], calpha=0.5,
clevels=np.array([0.3, 1.0]), opencontour=None):
self.aiafile = aiafile
self.eofiles = eofiles
self.xycen = xycen
self.fov = fov
self.calpha = calpha
self.clevels = clevels
self.freqghz_bound = freqghz_bound
self.opencontour = opencontour
self._showaia = False
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofiles[0])
self.bdinfo = bdinfo = ndfits.get_bdinfo(rfreqs, rdelts)
self.cfreqs = cfreqs = bdinfo['cfreqs']
self.cfreqs_all = cfreqs_all = bdinfo['cfreqs_all']
self.freq_dist = lambda fq: (fq - cfreqs_all[0]) / (cfreqs_all[-1] - cfreqs_all[0])
self.ntim = ntim = len(eofiles)
self.xlim = xlim = xycen[0] + np.array([-1, 1]) * 0.5 * fov[0]
self.ylim = ylim = xycen[1] + np.array([-1, 1]) * 0.5 * fov[1]
nspw = len(rfreqs)
eodate = Time(rmap.date.mjd + rmap.exposure_time.value / 2. / 24 / 3600, format='mjd')
ny, nx = rmap.data.shape
x0, x1 = (np.array([1, rmap.meta['NAXIS1']]) - rmap.meta['CRPIX1']) * rmap.meta['CDELT1'] + \
rmap.meta['CRVAL1']
y0, y1 = (np.array([1, rmap.meta['NAXIS2']]) - rmap.meta['CRPIX2']) * rmap.meta['CDELT2'] + \
rmap.meta['CRVAL2']
dx = rmap.meta['CDELT1']
dy = rmap.meta['CDELT2']
mapx, mapy = np.linspace(x0, x1, nx), np.linspace(y0, y1, ny)
fig = plt.figure(figsize=(15, 6))
self.fig = fig
grids = fig.add_gridspec(ncols=3, nrows=1, width_ratios=[1, 1, 0.6])
self.grids = grids
axs = []
axs.append(fig.add_subplot(grids[0, 0]))
axs.append(fig.add_subplot(grids[0, 1], sharex=axs[-1], sharey=axs[-1]))
axs.append(fig.add_subplot(grids[0, 2]))
if aiafile:
if os.path.exists(aiafile):
try:
aiacmap = plt.get_cmap('gray_r')
aiamap = smap.Map(aiafile)
ax = axs[0]
aiamap.plot(axes=ax, cmap=aiacmap)
ax = axs[1]
aiamap.plot(axes=ax, cmap=aiacmap)
self._showaia = True
except:
self._showaia = False
if self._showaia:
if self.opencontour is None:
self.opencontour = False
else:
if self.opencontour is None:
self.opencontour = True
## Plot EOVSA images as filled contour on top of the AIA image
icmap = plt.get_cmap('RdYlBu')
cts = []
## color map for spectra from the image series
tcmap = plt.get_cmap('turbo')
for s, sp in enumerate(rfreqs):
data = rdata[s, ...]
clvls = clevels * np.nanmax(data)
rcmap = [icmap(self.freq_dist(self.cfreqs[s]))] * len(clvls)
if self.opencontour:
cts.append(ax.contour(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
else:
cts.append(ax.contourf(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
for ax in axs[:2]:
ax.set_xlabel('Solar-X [arcsec]')
ax.set_ylabel('Solar-y [arcsec]')
ax.set_title('')
ax.text(0.02, 0.01,
' '.join(['AIA {:.0f} Å'.format(aiamap.wavelength.value),
aiamap.date.datetime.strftime('%Y-%m-%dT%H:%M:%S')]),
ha='left',
va='bottom',
color='k', transform=ax.transAxes)
ax.text(0.02, 0.05, ' '.join(['EOVSA ', eodate.datetime.strftime('%Y-%m-%dT%H:%M:%S')]), ha='left',
va='bottom',
color='k', transform=ax.transAxes)
divider = make_axes_locatable(axs[0])
cax = divider.append_axes("right", size="8%", pad=0.08)
cax.set_visible(False)
divider = make_axes_locatable(axs[1])
cax = divider.append_axes("right", size="8%", pad=0.08)
ticks, bounds, vmax, vmin, freqmask = ql.get_colorbar_params(bdinfo)
cb = colorbar.ColorbarBase(cax, norm=colors.Normalize(vmin=vmin, vmax=vmax), cmap=icmap,
orientation='vertical', boundaries=bounds, spacing='proportional',
ticks=ticks, format='%4.1f', alpha=calpha)
for fbd_lo, fbd_hi in freqmask:
if fbd_hi is not None:
cax.axhspan(fbd_lo, fbd_hi, hatch='//', edgecolor='k', facecolor='#BBBBBB')
plt.text(0.5, 1.05, 'MW', ha='center', va='bottom', transform=cax.transAxes, color='k', fontweight='normal')
plt.text(0.5, 1.01, '[GHz]', ha='center', va='bottom', transform=cax.transAxes, color='k',
fontweight='normal')
cax.xaxis.set_visible(False)
cax.tick_params(axis="y", pad=-20., length=0, colors='k', labelsize=7)
cax.axhline(vmin, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.axhline(vmax, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.text(1.25, 0.0, '{:.1f}'.format(vmin), fontsize=9, transform=cax.transAxes, va='center', ha='left')
cax.text(1.25, 1.0, '{:.1f}'.format(vmax), fontsize=9, transform=cax.transAxes, va='center', ha='left')
boxlines = []
clkpnts = []
for idx, ax in enumerate(axs[:2]):
if idx == 0:
c = 'g'
elif idx == 1:
c = 'b'
else:
c = 'k'
line, = ax.plot([], [], '-', c=c, alpha=1.0) # empty line
boxlines.append(line)
clkpnt, = ax.plot([], [], '+', c='white', alpha=0.7) # empty line
clkpnts.append(clkpnt)
if ntim < 2:
cplts = ['k']
else:
cplts = tcmap(np.linspace(0, 1, ntim))
self.cplts = cplts
self.ax_eospec = axs[-1]
errobjs = initspecplot(self.ax_eospec, cplts)
grids.tight_layout(fig)
self.region = RegionSelector(clkpnts, boxlines, eofiles, errobjs, cfreqs=cfreqs, rms=None, wTmap=None)
self.scatter_eospecs_fit = []
self.scatter_eospecs = []
def set_params(self, params):
ssz = self.region.area # source area in arcsec^2
params.add('ssz', value=ssz, vary=False) # pixel size in arcsec^2
self.params = params
def plot_components(self):
ti = 0
tb = self.region.errobjs[ti][0].get_ydata() * 1e6
tb_ma = ma.masked_less_equal(tb, 0)
freqghz = self.region.errobjs[0][0].get_xdata()
# freqghz_ma = ma.masked_outside(freqghz, 1.0, 15.0)
freqghz_ma = ma.masked_outside(freqghz, self.freqghz_bound[0], self.freqghz_bound[1])
mask_fit = np.logical_or(freqghz_ma.mask, tb_ma.mask)
freqghz_ma = ma.masked_array(freqghz, mask_fit)
tb_ma = ma.masked_array(tb, mask_fit)
# scatter_eospecs_fit.append(
# ax_spec.plot(freqghz_ma, tb_ma / 1.e6, marker='o', linestyle='', c=cplts[ti]))
# flx_rms = rms
tb_err = tb * 0.0
tb_err[:] = 1.e6
tb_err_ma = ma.masked_array(tb_err, tb_ma.mask)
if len(self.scatter_eospecs_fit) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs_fit.append(
self.ax_eospec.errorbar(freqghz_ma, tb_ma / 1.e6, yerr=tb_err_ma / 1.e6, marker='.', ms=1,
linestyle='',
c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
set_errorobj(freqghz_ma, tb_ma / 1.e6, self.scatter_eospecs_fit[ti], yerr=tb_err_ma / 1.e6)
def fit(self):
ti = 0
tb = self.region.errobjs[ti][0].get_ydata() * 1e6
tb_ma = ma.masked_less_equal(tb, 0)
freqghz = self.region.errobjs[0][0].get_xdata()
# freqghz_ma = ma.masked_outside(freqghz, 1.0, 15.0)
freqghz_ma = ma.masked_outside(freqghz, self.freqghz_bound[0], self.freqghz_bound[1])
mask_fit = np.logical_or(freqghz_ma.mask, tb_ma.mask)
freqghz_ma = ma.masked_array(freqghz, mask_fit)
tb_ma = ma.masked_array(tb, mask_fit)
# scatter_eospecs_fit.append(
# ax_spec.plot(freqghz_ma, tb_ma / 1.e6, marker='o', linestyle='', c=cplts[ti]))
# flx_rms = rms
tb_err = tb * 0.1
# tb_err[:] = 0.2e6
tb_err_ma = ma.masked_array(tb_err, tb_ma.mask)
if len(self.scatter_eospecs_fit) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs_fit.append(
self.ax_eospec.errorbar(freqghz_ma, tb_ma / 1.e6, yerr=tb_err_ma / 1.e6, marker='.', ms=1,
linestyle='', c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
set_errorobj(freqghz_ma, tb_ma / 1.e6, self.scatter_eospecs_fit[ti], yerr=tb_err_ma / 1.e6)
mini = lmfit.Minimizer(mwspec2min_1src, self.params, fcn_args=(freqghz_ma.compressed(),),
fcn_kws={'tb': tb_ma.compressed(), 'tb_err': tb_err_ma.compressed()},
nan_policy='omit')
method = 'nelder'
# # method = 'differential_evolution'
mi = mini.minimize(method=method)
print(method + ' minimization results')
print(lmfit.fit_report(mi.params))
tb_fit = mwspec2min_1src(mi.params, freqghz)
if len(self.scatter_eospecs) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs.append(self.ax_eospec.plot(freqghz, tb_fit / 1.e6, linestyle='-', c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs[ti][0].set_data(freqghz, tb_fit / 1.e6)
|
py | b40ed01b7571928cc6964e81035ca91a7144e28d | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedPartyValentineDanceActivityAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPartyValentineDanceActivityAI')
|
py | b40ed08cc8743cf44d9f9e8354bd3e0c81a10dfd | import pschitt.emission as em
import numpy as np
def test_verify_normalisation_solid_angle():
assert em.angular_profile.verify_normalisation_solid_angle(lambda x: 1. / np.pi ** 3)
def test_angular_profile_exp_peak():
assert em.angular_profile.exp_peak(0, 0.1, 1) == 0
assert em.angular_profile.exp_peak(0.1, 0.1, 1) == 1
def test_angular_profile_constant():
assert em.angular_profile.constant(np.pi * np.random.rand()) == 1
def test_angular_profile_heaviside():
assert em.angular_profile.heaviside(np.random.rand(), 1) == 1
assert em.angular_profile.heaviside(np.random.rand() + 1, 1) == 0
def test_angular_profile_lgdt06():
eta = np.random.rand()
assert em.angular_profile.lgdt06(np.random.rand()*eta, eta) == 1
assert em.angular_profile.lgdt06(eta, eta) == 1
assert em.angular_profile.lgdt06(2 * eta, eta) == 0.5 * np.exp(-0.25)
|
py | b40ed0ce83e47da6d6e7ef602c243e5a49d1acdb | import swissparlpy
# print all tables with their properties
overview = swissparlpy.get_overview()
for table, props in overview.items():
print(table)
for prop in props:
print(f' + {prop}')
print('')
|
py | b40ed24bfe8a4ab482764a15309d0f6b48530460 | import numpy as np
import tensorflow as tf
from tqdm import trange
from .algorithm_utils import Algorithm, TensorflowUtils
class RecurrentEBM(Algorithm, TensorflowUtils):
""" Recurrent Energy-Based Model implementation using TensorFlow.
The interface of the class is sklearn-like.
"""
def __init__(self, num_epochs=100, n_hidden=50, n_hidden_recurrent=100,
min_lr=1e-3, min_energy=None, batch_size=10,
seed: int=None, gpu: int=None):
Algorithm.__init__(self, __name__, 'Recurrent EBM', seed)
TensorflowUtils.__init__(self, seed, gpu)
self.num_epochs = num_epochs
self.n_hidden = n_hidden # Size of RBM's hidden layer
self.n_hidden_recurrent = n_hidden_recurrent # Size of RNN's hidden layer
self.min_lr = min_lr
self.min_energy = min_energy # Threshold for anomaly
self.batch_size = batch_size
# Placeholders
self.input_data = None
self.lr = None
self._batch_size = None
# Variables
self.W, self.Wuh, self.Wux, self.Wxu, self.Wuu, self.bu, \
self.u0, self.bh, self.bx, self.BH_t, self.BX_t = \
None, None, None, None, None, None, None, None, None, None, None
self.tvars = []
self.update = None
self.cost = None
self.tf_session = None
def fit(self, X):
X.interpolate(inplace=True)
X.bfill(inplace=True)
with self.device:
self._build_model(X.shape[1])
self.tf_session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self._initialize_tf()
self._train_model(X, self.batch_size)
def predict(self, X):
X.interpolate(inplace=True)
X.bfill(inplace=True)
with self.device:
scores = []
labels = []
for i in range(len(X)):
reconstruction_err = self.tf_session.run([self.cost],
feed_dict={self.input_data: X[i:i + 1],
self._batch_size: 1})
scores.append(reconstruction_err[0])
if self.min_energy is not None:
labels = np.where(scores >= self.min_energy)
scores = np.array(scores)
return (labels, scores) if self.min_energy is not None else scores
def _train_model(self, train_set, batch_size):
for epoch in trange(self.num_epochs):
costs = []
for i in range(0, len(train_set), batch_size):
x = train_set[i:i + batch_size]
if len(x) == batch_size:
alpha = self.min_lr # min(self.min_lr, 0.1 / float(i + 1))
_, C = self.tf_session.run([self.update, self.cost],
feed_dict={self.input_data: x, self.lr: alpha,
self._batch_size: batch_size})
costs.append(C)
self.logger.debug(f'Epoch: {epoch+1} Cost: {np.mean(costs)}')
def _initialize_tf(self):
init = tf.global_variables_initializer()
self.tf_session.run(init)
def _build_model(self, n_visible):
self.input_data, self.lr, self._batch_size = self._create_placeholders(n_visible)
self.W, self.Wuh, self.Wux, self.Wxu, self.Wuu, self.bu, \
self.u0, self.bh, self.bx, self.BH_t, self.BX_t = self._create_variables(n_visible)
def rnn_recurrence(u_tmin1, sl):
# Iterate through the data in the batch and generate the values of the RNN hidden nodes
sl = tf.reshape(sl, [1, n_visible])
u_t = tf.nn.softplus(self.bu + tf.matmul(sl, self.Wxu) + tf.matmul(u_tmin1, self.Wuu))
return u_t
def visible_bias_recurrence(bx_t, u_tmin1):
# Iterate through the values of the RNN hidden nodes and generate the values of the visible bias vectors
bx_t = tf.add(self.bx, tf.matmul(u_tmin1, self.Wux))
return bx_t
def hidden_bias_recurrence(bh_t, u_tmin1):
# Iterate through the values of the RNN hidden nodes and generate the values of the hidden bias vectors
bh_t = tf.add(self.bh, tf.matmul(u_tmin1, self.Wuh))
return bh_t
self.BH_t = tf.tile(self.BH_t, [self._batch_size, 1])
self.BX_t = tf.tile(self.BX_t, [self._batch_size, 1])
# Scan through the rnn and generate the value for each hidden node in the batch
u_t = tf.scan(rnn_recurrence, self.input_data, initializer=self.u0)
# Scan through the rnn and generate the visible and hidden biases for each RBM in the batch
self.BX_t = tf.reshape(tf.scan(visible_bias_recurrence, u_t, tf.zeros([1, n_visible], tf.float32)),
[n_visible, self._batch_size])
self.BH_t = tf.reshape(tf.scan(hidden_bias_recurrence, u_t, tf.zeros([1, self.n_hidden], tf.float32)),
[self.n_hidden, self._batch_size])
self.cost = self._run_ebm(self.input_data, self.W, self.BX_t, self.BH_t)
self.tvars = [self.W, self.Wuh, self.Wux, self.Wxu, self.Wuu, self.bu, self.u0, self.bh, self.bx]
opt_func = tf.train.AdamOptimizer(learning_rate=self.lr)
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, self.tvars), 1)
self.update = opt_func.apply_gradients(zip(grads, self.tvars))
def _run_ebm(self, x, W, b_prime, b):
""" Runs EBM for time step and returns reconstruction error.
1-layer implementation, TODO: implement and test deep structure
"""
x = tf.transpose(x) # For batch processing
forward = tf.matmul(tf.transpose(W), x) + b
reconstruction = tf.matmul(W, tf.sigmoid(forward)) + b_prime
loss = tf.reduce_sum(tf.square(x - reconstruction))
return loss
def _create_placeholders(self, n_visible):
x = tf.placeholder(tf.float32, [None, n_visible], name='x_input')
lr = tf.placeholder(tf.float32)
batch_size = tf.placeholder(tf.int32)
return x, lr, batch_size
def _create_variables(self, n_visible):
W = tf.Variable(tf.random_normal([n_visible, self.n_hidden], stddev=0.01), name='W')
Wuh = tf.Variable(tf.random_normal([self.n_hidden_recurrent, self.n_hidden], stddev=0.01), name='Wuh')
Wux = tf.Variable(tf.random_normal([self.n_hidden_recurrent, n_visible], stddev=0.01), name='Wux')
Wxu = tf.Variable(tf.random_normal([n_visible, self.n_hidden_recurrent], stddev=0.01), name='Wxu')
Wuu = tf.Variable(tf.random_normal([self.n_hidden_recurrent, self.n_hidden_recurrent], stddev=0.01), name='Wuu')
bu = tf.Variable(tf.zeros([1, self.n_hidden_recurrent]), name='bu')
u0 = tf.Variable(tf.zeros([1, self.n_hidden_recurrent]), name='u0')
bh = tf.Variable(tf.zeros([1, self.n_hidden]), name='bh')
bx = tf.Variable(tf.zeros([1, n_visible]), name='bx')
BH_t = tf.Variable(tf.zeros([1, self.n_hidden]), name='BH_t')
BX_t = tf.Variable(tf.zeros([1, n_visible]), name='BX_t')
return W, Wuh, Wux, Wxu, Wuu, bu, u0, bh, bx, BH_t, BX_t
|
py | b40ed3b778128e604ff12750d464dfdc46562058 | SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'queued_storage',
'queued_storage.tests',
]
TEST_RUNNER = 'discover_runner.DiscoverRunner'
SECRET_KEY = 'top_secret'
|
py | b40ed41259038f58ccfbb4406e9598581aaefd1b | from brownie import (
accounts, UbeswapV1Oracle, network
)
import json
import time
network.gas_limit(8000000)
def main():
deployer = accounts.load('dahlia_admin')
with open('scripts/dahlia_addresses.json', 'r') as f:
addr = json.load(f)
mainnet_addr = addr.get('mainnet')
ubeswap_oracle = UbeswapV1Oracle.at(mainnet_addr.get('ube_oracle'))
while True:
if ubeswap_oracle.workable():
ubeswap_oracle.work({'from': deployer})
print("work")
else:
print("no work")
time.sleep(60) |
py | b40ed5ac9a550e13d59fe0102e5238b44e1e93aa | import sqlite3
import_file_name = "data/pubmed/descriptor.txt"
cell_separator = "|||"
lines = []
insert_query = "INSERT INTO descriptor (pmid, descriptorid, descriptorterm) VALUES (?, ?, ?);"
db_connection = sqlite3.connect("sqlite/descriptor.sqlite")
db_cursor = db_connection.cursor()
db_cursor.execute("CREATE TABLE descriptor (pmid INTEGER, descriptorid TEXT, descriptorterm TEXT);")
db_cursor.execute("BEGIN TRANSACTION")
with open(import_file_name, 'r') as import_file:
for line in import_file:
cleaned_columns = [column.strip() for column in line.split(cell_separator)]
db_cursor.execute(insert_query, tuple(cleaned_columns))
db_connection.commit()
db_cursor.execute("VACUUM;")
db_cursor.close()
db_connection.close()
|
py | b40ed850aa4b8a4ee944de7adc349b3f422fa627 | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
from .base import hash, hash_next
from typing import List, Set, Mapping
from task.util import utils
from tqdm import tqdm
import gzip
def make_hash(emb_path):
vocab = {}
max_tok_len = 0
max_word = None
with gzip.open(emb_path, mode='rt', compresslevel=6) as file:
word_size, dim = [int(i) for i in file.readline().rstrip().split()]
for id, line in tqdm(enumerate(file)):
head, *_ = line.split()
if ' ' in head.strip():
print(head)
continue
hash_id, tlen = hash(head)
vocab[hash_id] = id
if tlen > max_tok_len:
max_tok_len = tlen
max_word = head
print('word vector size = ', len(vocab))
print('{}, token_len={}'.format([w for t, w in utils.replace_entity(max_word)], max_tok_len))
return vocab, max_tok_len
def filter(dataset: List[str], vocab: Mapping[int, int], max_tok_len):
small = {}
for path in dataset:
with open(path) as file:
for line in tqdm(file):
items = line.strip().split()
if 0 < len(items) < 150:
items = [t.rsplit('#', maxsplit=1) for t in items]
tokens = [t[0] for t in items]
tags = [t[1] for t in items]
for s in range(len(tokens)):
hash_id = None
for l in range(min(max_tok_len, len(tokens) - s)):
hash_id = hash_next(hash_id, tokens[s+l])
line_id = vocab.get(hash_id)
if line_id is not None:
small[hash_id] = line_id
return small
def extract(big_emb:str, small_emb: str, small: Mapping[int, int]):
with gzip.open(big_emb, mode='rt', compresslevel=6) as reader,\
gzip.open(small_emb, mode='wt', compresslevel=6) as writer:
word_size, dim = [int(i) for i in reader.readline().rstrip().split()]
writer.write('{} {}\n'.format(len(small), dim))
for lid, line in tqdm(enumerate(reader)):
word, weights = line.rstrip().split(maxsplit=1)
hash_id, tlen = hash(word)
if hash_id in small:
writer.write('{}\t{}\t{}\n'.format(lid, word, weights))
if __name__ == '__main__':
big_path = 'wordvec/Tencent_AILab_ChineseEmbedding.txt.gz'
small_path = 'wordvec/Tencent_AILab_ChineseEmbedding.small.txt.gz'
big_vocab, max_len = make_hash(big_path)
small_vocab = filter(['./pos/data/std.train', './pos/data/std.valid', './pos/data/std.gold'], big_vocab, max_len)
extract(big_path, small_path, small_vocab)
|
py | b40ed8939b5501ebadb4b2d8da9c47dd46a388d7 | import pglet
from pglet import Dropdown, dropdown, Button, Text
with pglet.page("basic-dropdown") as page:
def button_clicked(e):
t.value = f"Dropdown value is: {dd.value}"
page.update()
t = Text()
b = Button(text='Submit', on_click=button_clicked)
dd = Dropdown(width=100, options=[
dropdown.Option('Red'),
dropdown.Option('Green'),
dropdown.Option('Blue')
])
page.add(dd, b, t)
input()
|
py | b40ed8b38293e07bfd2eb5f7272f21e666304bf6 | """
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
import os
import pathlib
from typing import Any, Dict
from collections import OrderedDict
import pandas as pd
import numpy as np
from fuse.utils import set_seed
from fuse.eval.metrics.metrics_common import GroupAnalysis, CI, Filter
from fuse.eval.metrics.metrics_model_comparison import PairedBootstrap
from fuse.eval.metrics.classification.metrics_classification_common import MetricAUCPR, MetricAUCROC, MetricAccuracy, MetricConfusion, MetricConfusionMatrix, MetricBSS, MetricROCCurve
from fuse.eval.metrics.classification.metrics_model_comparison_common import MetricContingencyTable, MetricDelongsTest, MetricMcnemarsTest
from fuse.eval.metrics.classification.metrics_thresholding_common import MetricApplyThresholds
from fuse.eval.metrics.classification.metrics_calibration_common import MetricReliabilityDiagram, MetricECE, MetricFindTemperature, MetricApplyTemperature
from fuse.eval.evaluator import EvaluatorDefault
def example_0() -> Dict[str, Any]:
"""
Simple evaluation example for binary classification task
Input is a single dataframe pickle file including 3 columns: "id", "pred" (numpy arrays) and "target"
"""
# path to prediction and target files
dir_path = pathlib.Path(__file__).parent.resolve()
input_filename = os.path.join(dir_path, "inputs/example0.pickle")
# list of metrics
metrics = OrderedDict([
("auc", MetricAUCROC(pred="pred", target="target")),
])
# read files
data = input_filename
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics) # ids == None -> run evaluation on all available samples
return results
def example_1() -> Dict[str, Any]:
"""
Simple evaluation example
Inputs are two .csv files: one including predictions and one targets
The predictions requires a simple pre-processing to create prediction in a format of vector of probabilities
"""
# path to prediction and target files
dir_path = pathlib.Path(__file__).parent.resolve()
prediction_filename = os.path.join(dir_path, "inputs/example1_predictions.csv")
targets_filename = os.path.join(dir_path, "inputs/example1_targets.csv")
# define data
data = {"pred": prediction_filename, "target": targets_filename}
# pre collect function to change the format
def pre_collect_process(sample_dict: dict) -> dict:
# convert scores to numpy array
task1_pred = []
for cls_name in ("NoAT", "CanAT"):
task1_pred.append(sample_dict[f"pred.{cls_name}-score"])
task1_pred_array = np.array(task1_pred)
sample_dict['pred.array'] = task1_pred_array
return sample_dict
# list of metrics
metrics = OrderedDict([
("auc", MetricAUCROC(pred="pred.array", target="target.Task1-target",
pre_collect_process_func=pre_collect_process)),
])
# specify just the list you want to evaluate - predictions may contain more samples that will be ignored
task_target_df = pd.read_csv(targets_filename)
ids = list(task_target_df["id"])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=ids, data=data, metrics=metrics)
return results
def example_2():
"""
Cross validation example - evaluation the entire data, built from few folds at once
Multiple inference files - each include prediction of a different fold - binary predictions (single probability)
Single target file with labels for all the folds
"""
# path to prediction and target files
dir_path = pathlib.Path(__file__).parent.resolve()
prediction_fold0_filename = os.path.join(dir_path, "inputs/example2_predictions_fold0.csv")
prediction_fold1_filename = os.path.join(dir_path, "inputs/example2_predictions_fold1.csv")
targets_filename = os.path.join(dir_path, "inputs/example2_targets.csv")
# define data
data = {"pred": [prediction_fold0_filename, prediction_fold1_filename], "target": targets_filename}
# list of metrics
metrics = OrderedDict([
("auc", MetricAUCROC(pred="pred.CanAT-score", target="target.Task1-target")),
("auc_per_fold", GroupAnalysis(MetricAUCROC(pred="pred.CanAT-score", target="target.Task1-target"), group="pred.evaluator_fold"))
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics)
return results
def example_3():
"""
General group analysis example - compute the AUC for each group separately.
In this case the grouping is done according to gender
"""
data = {"pred": [0.1, 0.2, 0.6, 0.7, 0.8, 0.3, 0.6, 0.2, 0.7, 0.9],
"target": [0, 0, 1, 1, 1, 0, 0, 1, 1, 1],
"id": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"gender": ['female', 'female', 'female', 'female', 'female', \
'male', 'male', 'male', 'male', 'male']
}
data = pd.DataFrame(data)
metrics = OrderedDict([
("auc_per_group", GroupAnalysis(MetricAUCROC(pred="pred", target="target"), group="gender"))
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics)
return results
def example_4() -> Dict[str, Any]:
"""
Simple evaluation example with Confidence Interval
Inputs are two .csv files: one including predictions and one targets
"""
# set seed
seed = 1234
# path to prediction and target files - reuse example1 inputs
dir_path = pathlib.Path(__file__).parent.resolve()
prediction_filename = os.path.join(dir_path, "inputs/example1_predictions.csv")
targets_filename = os.path.join(dir_path, "inputs/example1_targets.csv")
# define data
data = {"pred": prediction_filename, "target": targets_filename}
# list of metrics
metrics = OrderedDict([
("auc", CI(MetricAUCROC(pred="pred.CanAT-score", target="target.Task1-target"),
stratum="target.Task1-target",
rnd_seed=seed)),
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics)
return results
def example_5():
"""
Model comparison using paired bootstrap metric
Compare model a binary classification sensitivity to model b binary classification sensitivity
"""
# set seed
seed = 0
# define data
data = {
"id": [0, 1, 2, 3, 4],
"model_a_pred": [0.4, 0.3, 0.7, 0.8, 0.0],
"model_b_pred": [0.4, 0.3, 0.7, 0.2, 1.0],
"target": [0, 1, 1, 1, 0]
}
data_df = pd.DataFrame(data)
# list of metrics
metric_model_test = MetricConfusion(pred="results:metrics.apply_thresh_a.cls_pred", target="target")
metric_model_reference = MetricConfusion(pred="results:metrics.apply_thresh_b.cls_pred", target="target")
metrics = OrderedDict([
("apply_thresh_a", MetricApplyThresholds(pred="model_a_pred", operation_point=0.5)),
("apply_thresh_b", MetricApplyThresholds(pred="model_b_pred", operation_point=0.5)),
("compare_a_to_b", PairedBootstrap(metric_model_test, metric_model_reference,
stratum="target", metric_keys_to_compare=["sensitivity"],
rnd_seed=seed)),
])
# read files
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data_df, metrics=metrics)
return results
def example_6() -> Dict:
"""
Simple test of the DeLong's test implementation
Also "naively" test the multiclass mode (one vs. all) by simply extending the
binary classifier prediction vectors into a 2D matrix
For this simple example, DeLong's test was computed manually in this blog post by Rachel Draelos:
https://glassboxmedicine.com/2020/02/04/comparing-aucs-of-machine-learning-models-with-delongs-test/
"""
target = np.array([0, 0, 1, 1, 1])
pred1 = np.array([0.1, 0.2, 0.6, 0.7, 0.8])
pred2 = np.array([0.3, 0.6, 0.2, 0.7, 0.9])
# convert to Nx2 for multi-class generalization:
pred1 = np.stack((1-pred1, pred1), 1)
pred2 = np.stack((1-pred2, pred2), 1)
index = np.arange(0,len(target))
# convert to dataframes:
pred_df = pd.DataFrame(columns = ['pred1','pred2','id'])
pred_df["pred1"] = list(pred1)
pred_df["pred2"] = list(pred2)
pred_df["id"] = index
target_df = pd.DataFrame(columns = ['target', 'id'])
target_df["target"] = target
target_df["id"] = index
data = {"pred": pred_df, "target": target_df}
# list of metrics
metrics = OrderedDict([
("delongs_test", MetricDelongsTest(target="target.target", class_names=['negative', 'positive'], pred1="pred.pred1", pred2="pred.pred2")),
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics)
return results
def example_7() -> Dict:
"""
Another example for testing the DeLong's test implementation. This time in "binary classifier" mode
The sample data in this example was used in the above blog post and verified against an R implementation.
Three different sources: dataframe per model prediction and a target dataframe
"""
target = np.array([0,0,0,0,0,0,1,1,1,1,1,1,1])
pred1 = np.array([0.1,0.2,0.05,0.3,0.1,0.6,0.6,0.7,0.8,0.99,0.8,0.67,0.5])
pred2 = np.array([0.3,0.6,0.2,0.1,0.1,0.9,0.23,0.7,0.9,0.4,0.77,0.3,0.89])
ids = np.arange(0,len(target))
# convert to dataframes:
pred1_df = pd.DataFrame(columns = ['pred1','pred2','id'])
pred1_df["output"] = pred1
pred1_df["id"] = ids
pred2_df = pd.DataFrame(columns = ['pred1','pred2','id'])
pred2_df["output"] = pred2
pred2_df["id"] = ids
target_df = pd.DataFrame(columns = ['target', 'id'])
target_df["target"] = target
target_df["id"] = ids
data = {"pred1": pred1_df, "pred2": pred2_df, "target": target_df}
# list of metrics
metrics = OrderedDict([
("delongs_test", MetricDelongsTest(target="target.target", pred1="pred1.output", pred2="pred2.output")),
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics)
return results
def example_8():
"""
Classification Multiclass example: five classes evaluation with metrics AUC-ROC AUC-PR, sensitivity, specificity and precision
Input: one .csv prediction file that requires processing to convert the predictions to numpy array and one target file.
"""
# path to prediction and target files
dir_path = pathlib.Path(__file__).parent.resolve()
prediction_filename = os.path.join(dir_path, "inputs/example7_predictions.csv")
targets_filename = os.path.join(dir_path, "inputs/example1_targets.csv") # same target file as in example1
data = {"target": targets_filename, "pred": prediction_filename}
class_names = ["B","LR","IR","HR","VHR"]
# pre collect function to change the format
def pre_collect_process(sample_dict: dict) -> dict:
# convert scores to numpy array
task2_pred = []
for cls_name in class_names:
task2_pred.append(sample_dict[f"pred.{cls_name}-score"])
task2_pred_array = np.array(task2_pred)
sample_dict['pred.output'] = task2_pred_array
sample_dict['pred.output_class'] = task2_pred_array.argmax()
return sample_dict
# list of metrics
metrics = OrderedDict([
("auc", MetricAUCROC(pred="pred.output", target="target.Task2-target", class_names=class_names, pre_collect_process_func=pre_collect_process)),
("auc_pr", MetricAUCPR(pred="pred.output", target="target.Task2-target", class_names=class_names, pre_collect_process_func=pre_collect_process)),
("confusion", MetricConfusion(pred="pred.output_class", target="target.Task2-target", class_names=class_names, pre_collect_process_func=pre_collect_process,
metrics=["sensitivity", "specificity", "precision"])), # default operation point is argmax
("accuracy", MetricAccuracy(pred="pred.output_class", target="target.Task2-target", pre_collect_process_func=pre_collect_process)), # default operation point is argmax
("confusion_matrix", MetricConfusionMatrix(cls_pred="pred.output_class", target="target.Task2-target", class_names=class_names, pre_collect_process_func=pre_collect_process)),
("bss", MetricBSS(pred="pred.output", target="target.Task2-target", pre_collect_process_func=pre_collect_process)),
("roc", MetricROCCurve(pred="pred.output", target="target.Task2-target", class_names=class_names, output_filename="roc.png", pre_collect_process_func=pre_collect_process))
])
# read files
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics) # ids == None -> run evaluation on all available samples
return results
def example_9():
"""
Classification example with single-process iterator.
This example requires fuse-med-ml-data and torchvision packages installed
"""
import torchvision
# set seed
set_seed(1234)
# Create dataset
torch_dataset = torchvision.datasets.MNIST('/tmp/mnist', download=True, train=False)
# define iterator
def data_iter():
for sample_index, (image, label) in enumerate(torch_dataset):
sample_dict = {}
sample_dict["id"] = sample_index
sample_dict["pred"] = np.random.randint(0, 10)
sample_dict["label"] = label
yield sample_dict
# list of metrics
metrics = OrderedDict([
("accuracy", MetricAccuracy(pred="pred", target="label")), # operation_point=None -> no need to convert pred from probabilities to class
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data_iter(), batch_size=5, metrics=metrics) # ids == None -> run evaluation on all available samples
return results
def example_10() -> Dict:
"""
Test of McNemar's test implementation
"""
# Toy example:
# The correct target (class) labels (optional)
ground_truth = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
# in case ground_truth is not provided (variable set to None),
# the significance test checks the similarity between model predictions only.
# Class labels predicted by model 1
cls_pred1 = [0, 1, 1, 1, 0, 1, 1, 0, 1, 1]
# Class labels predicted by model 2
cls_pred2 = [1, 1, 0, 1, 0, 1, 1, 0, 0, 1]
index = np.arange(0,len(cls_pred1))
# convert to dataframes:
df = pd.DataFrame(columns = ['cls_pred1','cls_pred2','id'])
df["cls_pred1"] = cls_pred1
df["cls_pred2"] = cls_pred2
df["ground_truth"] = ground_truth
df["id"] = index
data = {"data": df}
# list of metrics
metrics = OrderedDict([
("mcnemars_test", MetricMcnemarsTest(pred1="data.cls_pred1", pred2="data.cls_pred2", target="data.ground_truth")),
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics)
return results
def example_11() -> Dict:
"""
Sub group analysis example
"""
# define data
data = {
"id": [0, 1, 2, 3, 4],
"pred": [0, 1, 0, 0, 0],
"gender": ["male", "female", "female", "male", "female"],
"target": [0, 1, 1, 1, 0]
}
data_df = pd.DataFrame(data)
def pre_collect_process(sample_dict: dict) -> dict:
sample_dict["filter"] = sample_dict["gender"] != "male"
return sample_dict
acc = MetricAccuracy(pred="pred", target="target")
metrics = OrderedDict([
("accuracy", Filter(acc, "filter", pre_collect_process_func=pre_collect_process)), # operation_point=None -> no need to convert pred from probabilities to class
])
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data_df, metrics=metrics) # ids == None -> run evaluation on all available samples
return results
def example_12() -> Dict:
"""
Example of a metric pipeline which includes a per-sample metric/operation.
First, we apply a simple thresholding operation (per sample "metric"/operation) to generate class predictions.
Then, we apply the accuracy metric on the class predictions vs. targets.
"""
target = np.array([0,0,0,0,0,0,1,1,1,1,1,1,1])
pred = np.array([0.3,0.6,0.2,0.1,0.1,0.9,0.23,0.7,0.9,0.4,0.77,0.3,0.89])
ids = np.arange(0,len(target))
# convert to dataframes:
pred_df = pd.DataFrame(columns = ['pred','id'])
pred_df["pred"] = pred
pred_df["id"] = ids
target_df = pd.DataFrame(columns = ['target', 'id'])
target_df["target"] = target
target_df["id"] = ids
data = {"pred": pred_df, "target": target_df}
metrics = OrderedDict([
("apply_thresh", MetricApplyThresholds(pred="pred.pred", operation_point=0.5)),
("acc", MetricAccuracy(pred="results:metrics.apply_thresh.cls_pred", target="target.target"))
])
# read files
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics) # ids == None -> run evaluation on all available samples
return results
def example_13() -> Dict:
"""
Test reliability diagram and ECE metrics
We use multi-class input data as in example_7:
One .csv prediction file that requires processing to convert the predictions to numpy array and one target file.
We define a metric pipeline. First the reliability diagram is computed on the original predictions.
Then, we compute the expected calibration error (ECE). Then, we find an optimal "temperature" value to calibrate the logits.
Then, we apply this temperature scale on the original logits, to obtain calibrated ones.
Finally, we compute the ECE and reliability diagram for the calibrated predictions.
"""
# path to prediction and target files
dir_path = pathlib.Path(__file__).parent.resolve()
prediction_filename = os.path.join(dir_path, "inputs/example7_predictions.csv")
targets_filename = os.path.join(dir_path, "inputs/example1_targets.csv") # same target file as in example1
data = {"target": targets_filename, "pred": prediction_filename}
class_names = ["B","LR","IR","HR","VHR"]
num_bins = 10
num_quantiles = None
# pre collect function to change the format
def pre_collect_process(sample_dict: dict) -> dict:
# convert scores to numpy array
task2_pred = []
for cls_name in class_names:
task2_pred.append(sample_dict[f"pred.{cls_name}-score"])
task2_pred_array = np.array(task2_pred)
logit_array = np.log(task2_pred_array) # "make up" logits up to a constant
sample_dict['pred.output'] = task2_pred_array
sample_dict['pred.logits'] = logit_array
return sample_dict
# list of metrics
metrics = OrderedDict([
("reliability", MetricReliabilityDiagram(pred="pred.output", target="target.Task2-target", num_bins=num_bins, num_quantiles=num_quantiles, output_filename='reliability.png', pre_collect_process_func=pre_collect_process)),
("ece", MetricECE(pred="pred.output", target="target.Task2-target", num_bins=num_bins, num_quantiles=num_quantiles, pre_collect_process_func=pre_collect_process)),
("find_temperature", MetricFindTemperature(pred="pred.logits", target="target.Task2-target", pre_collect_process_func=pre_collect_process)),
("apply_temperature", MetricApplyTemperature(pred="pred.logits", temperature="results:metrics.find_temperature", pre_collect_process_func=pre_collect_process)),
("ece_calibrated", MetricECE(pred="results:metrics.apply_temperature", target="target.Task2-target", num_bins=num_bins, num_quantiles=num_quantiles, pre_collect_process_func=pre_collect_process)),
("reliability_calibrated", MetricReliabilityDiagram(pred="results:metrics.apply_temperature", target="target.Task2-target", num_bins=num_bins, num_quantiles=num_quantiles, output_filename='reliability_calibrated.png', pre_collect_process_func=pre_collect_process)),
])
# read files
evaluator = EvaluatorDefault()
results = evaluator.eval(ids=None, data=data, metrics=metrics) # ids == None -> run evaluation on all available samples
return results
|
py | b40ed8cf43fa54d8390cdf60e4bda1ac177d0e1d | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourceNavigationLinksOperations(object):
"""ResourceNavigationLinksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ResourceNavigationLinksListResult"
"""Gets a list of resource navigation links for a subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceNavigationLinksListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ResourceNavigationLinksListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceNavigationLinksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceNavigationLinksListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ResourceNavigationLinks'} # type: ignore
|
py | b40edac244115bb30c8caaf9779efb182f7d4a67 | import pytest
from ssg import boolean_expression
from xml.dom import expatbuilder
class PlatformFunction(boolean_expression.Function):
def as_cpe_lang_xml(self):
return '<cpe-lang:logical-test negate="' + ('true' if self.is_not() else 'false') + \
'" operator="' + ('OR' if self.is_or() else 'AND') + '">' + \
''.join([arg.as_cpe_lang_xml() for arg in self.args]) + "</cpe-lang:logical-test>"
class PlatformSymbol(boolean_expression.Symbol):
def as_cpe_lang_xml(self):
return '<cpe-lang:fact-ref name="cpe:/a:' + self.name + ':' + ':'.join([v for (op, v) in self.specs]) + '"/>'
class PlatformAlgebra(boolean_expression.Algebra):
def __init__(self):
super(PlatformAlgebra, self).__init__(symbol_cls=PlatformSymbol, function_cls=PlatformFunction)
@staticmethod
def as_cpe_lang_xml(expr):
s = '<cpe-lang:platform id="' + expr.as_id() + '">' + expr.as_cpe_lang_xml() + '</cpe-lang:platform>'
# A primitive but simple way to pretty-print an XML string
return expatbuilder.parseString(s, False).toprettyxml()
@pytest.fixture
def algebra():
return PlatformAlgebra()
@pytest.fixture
def expression1(algebra):
return algebra.parse(u'(oranges==2.0 | banana) and not ~apple + !pie', simplify=True)
def test_dyn():
alg = boolean_expression.Algebra(symbol_cls=PlatformSymbol, function_cls=PlatformFunction)
exp = alg.parse('not banana and not apple or anything')
assert str(exp) == '(~banana&~apple)|anything'
assert str(exp.simplify()) == 'anything|(~apple&~banana)'
def test_id(expression1):
assert str(expression1.as_id()) == 'apple_and_banana_or_oranges_eq_2.0_or_not_pie'
def test_cnf(algebra, expression1):
assert str(algebra.cnf(expression1)) == '(apple|~pie)&(banana|oranges==2.0|~pie)'
def test_dnf(algebra, expression1):
assert str(algebra.dnf(expression1)) == '(apple&banana)|(apple&oranges==2.0)|~pie'
def test_as_cpe_xml(algebra, expression1):
xml = algebra.as_cpe_lang_xml(algebra.dnf(expression1))
assert xml == """<?xml version="1.0" ?>
<cpe-lang:platform id="apple_and_banana_or_apple_and_oranges_eq_2.0_or_not_pie">
\t<cpe-lang:logical-test negate="false" operator="OR">
\t\t<cpe-lang:logical-test negate="false" operator="AND">
\t\t\t<cpe-lang:fact-ref name="cpe:/a:apple:"/>
\t\t\t<cpe-lang:fact-ref name="cpe:/a:banana:"/>
\t\t</cpe-lang:logical-test>
\t\t<cpe-lang:logical-test negate="false" operator="AND">
\t\t\t<cpe-lang:fact-ref name="cpe:/a:apple:"/>
\t\t\t<cpe-lang:fact-ref name="cpe:/a:oranges:2.0"/>
\t\t</cpe-lang:logical-test>
\t\t<cpe-lang:logical-test negate="true" operator="AND">
\t\t\t<cpe-lang:fact-ref name="cpe:/a:pie:"/>
\t\t</cpe-lang:logical-test>
\t</cpe-lang:logical-test>
</cpe-lang:platform>
"""
def test_underscores_and_dashes_in_name(algebra):
exp = algebra.parse(u'not_s390x_arch and dashed-name')
assert exp(**{'not_s390x_arch': True, 'dashed-name': True})
def test_evaluate_simple_boolean_ops(algebra):
exp = algebra.parse(u'(oranges | banana) and not not apple or !pie')
assert exp(**{'oranges': True, 'apple': True, 'pie': True})
assert not exp(**{'oranges': True, 'apple': False, 'pie': True})
def test_evaluate_simple_version_ops(algebra):
exp = algebra.parse(u'oranges==2')
assert exp(**{'oranges': '2'})
assert exp(**{'oranges': '2.0'})
assert exp(**{'oranges': '2.0.0'})
assert not exp(**{'oranges': '2.0.1'})
assert not exp(**{'oranges': '3.0'})
assert not exp(**{'oranges': True})
def test_evaluate_advanced_version_ops(algebra):
exp = algebra.parse(u'oranges>=1.0,<3.0 and oranges!=2.6')
assert exp(**{'oranges': '2'})
assert exp(**{'oranges': '2.9'})
assert exp(**{'oranges': '2.0.1'})
assert exp(**{'oranges': '2.9.0-rc'})
assert not exp(**{'oranges': '3.0'})
assert not exp(**{'oranges': '0.9.999'})
assert not exp(**{'oranges': '0.9.999_beta_2'})
assert not exp(**{'oranges': '2.6.0'})
|
py | b40edbe57b5a81a44fc24199ef42875b1b781f1f | """
This module is the geometrical part of the ToFu general package
It includes all functions and object classes necessary for tomography
on Tokamaks
"""
# Built-in
import os
import warnings
import copy
import inspect
# Common
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# ToFu-specific
from tofu import __version__ as __version__
import tofu.pathfile as tfpf
import tofu.utils as utils
# test global import else relative
try:
import tofu.geom._def as _def
import tofu.geom._GG as _GG
import tofu.geom._comp as _comp
import tofu.geom._comp_solidangles as _comp_solidangles
import tofu.geom._plot as _plot
except Exception:
from . import _def as _def
from . import _GG as _GG
from . import _comp as _comp
from . import _comp_solidangles
from . import _plot as _plot
__all__ = [
"PlasmaDomain",
"Ves",
"PFC",
"CoilPF",
"CoilCS",
"Config",
"Rays",
"CamLOS1D",
"CamLOS2D",
]
_arrayorder = "C"
_Clock = False
_Type = "Tor"
# rotate / translate instance
_UPDATE_EXTENT = True
_RETURN_COPY = False
# Parallelization
_NUM_THREADS = 10
_PHITHETAPROJ_NPHI = 2000
_PHITHETAPROJ_NTHETA = 1000
_RES = 0.005
_DREFLECT = {"specular": 0, "diffusive": 1, "ccube": 2}
# Saving
_COMMENT = '#'
"""
###############################################################################
###############################################################################
Ves class and functions
###############################################################################
"""
class Struct(utils.ToFuObject):
""" A class defining a Linear or Toroidal vaccum vessel (i.e. a 2D polygon
representing a cross-section and assumed to be linearly or toroidally
invariant)
A Ves object is mostly defined by a close 2D polygon, which can be
understood as a poloidal cross-section in (R,Z) cylindrical coordinates
if Type='Tor' (toroidal shape) or as a straight cross-section through a
cylinder in (Y,Z) cartesian coordinates if Type='Lin' (linear shape).
Attributes such as the surface, the angular volume (if Type='Tor') or the
center of mass are automatically computed.
The instance is identified thanks to an attribute Id (which is itself a
tofu.ID class object) which contains informations on the specific instance
(name, Type...).
Parameters
----------
Id : str / tfpf.ID
A name string or a pre-built tfpf.ID class to be used to identify this
particular instance, if a string is provided, it is fed to tfpf.ID()
Poly : np.ndarray
An array (2,N) or (N,2) defining the contour of the vacuum vessel in a
cross-section, if not closed, will be closed automatically
Type : str
Flag indicating whether the vessel will be a torus ('Tor') or a linear
device ('Lin')
Lim : list / np.ndarray
Array or list of len=2 indicating the limits of the linear device
volume on the x axis
Sino_RefPt : None / np.ndarray
Array specifying a reference point for computing the sinogram (i.e.
impact parameter), if None automatically set to the (surfacic) center
of mass of the cross-section
Sino_NP : int
Number of points in [0,2*pi] to be used to plot the vessel sinogram
envelop
Clock : bool
Flag indicating whether the input polygon should be made clockwise
(True) or counter-clockwise (False)
arrayorder: str
Flag indicating whether the attributes of type=np.ndarray (e.g.: Poly)
should be made C-contiguous ('C') or Fortran-contiguous ('F')
Exp : None / str
Flag indicating which experiment the object corresponds to, allowed
values are in [None,'AUG','MISTRAL','JET','ITER','TCV','TS','Misc']
shot : None / int
Shot number from which this Ves is usable (in case of change of
geometry)
SavePath : None / str
If provided, forces the default saving path of the object to the
provided value
Returns
-------
Ves : Ves object
The created Ves object, with all necessary computed attributes and
methods
"""
# __metaclass__ = ABCMeta
# Fixed (class-wise) dictionary of default properties
_ddef = {
"Id": {
"shot": 0,
"include": [
"Mod",
"Cls",
"Exp",
"Diag",
"Name",
"shot",
"version",
],
},
"dgeom": {"Type": "Tor", "Lim": [], "arrayorder": "C"},
"dsino": {},
"dphys": {},
"dreflect": {"Type": "specular"},
"dmisc": {"color": "k"},
}
_dplot = {
"cross": {
"Elt": "P",
"dP": {"color": "k", "lw": 2},
"dI": {"color": "k", "ls": "--", "marker": "x", "ms": 8, "mew": 2},
"dBs": {
"color": "b",
"ls": "--",
"marker": "x",
"ms": 8,
"mew": 2,
},
"dBv": {
"color": "g",
"ls": "--",
"marker": "x",
"ms": 8,
"mew": 2,
},
"dVect": {"color": "r", "scale": 10},
},
"hor": {
"Elt": "P",
"dP": {"color": "k", "lw": 2},
"dI": {"color": "k", "ls": "--"},
"dBs": {"color": "b", "ls": "--"},
"dBv": {"color": "g", "ls": "--"},
"Nstep": 50,
},
"3d": {
"Elt": "P",
"dP": {
"color": (0.8, 0.8, 0.8, 1.0),
"rstride": 1,
"cstride": 1,
"linewidth": 0.0,
"antialiased": False,
},
"Lim": None,
"Nstep": 50,
},
}
_DREFLECT_DTYPES = {"specular": 0, "diffusive": 1, "ccube": 2}
# Does not exist beofre Python 3.6 !!!
def __init_subclass__(cls, color="k", **kwdargs):
# Python 2
super(Struct, cls).__init_subclass__(**kwdargs)
# Python 3
# super().__init_subclass__(**kwdargs)
cls._ddef = copy.deepcopy(Struct._ddef)
cls._dplot = copy.deepcopy(Struct._dplot)
cls._set_color_ddef(cls._color)
@classmethod
def _set_color_ddef(cls, color):
cls._ddef['dmisc']['color'] = mpl.colors.to_rgba(color)
def __init__(
self,
Poly=None,
Type=None,
Lim=None,
pos=None,
extent=None,
Id=None,
Name=None,
Exp=None,
shot=None,
sino_RefPt=None,
sino_nP=_def.TorNP,
Clock=False,
arrayorder='C',
fromdict=None,
sep=None,
SavePath=os.path.abspath('./'),
SavePath_Include=tfpf.defInclude,
color=None,
nturns=None,
superconducting=None,
active=None,
temperature_nominal=None,
mag_field_max=None,
current_lim_max=None,
):
# Create a dplot at instance level
self._dplot = copy.deepcopy(self.__class__._dplot)
kwdargs = locals()
del kwdargs["self"]
# super()
super(Struct, self).__init__(**kwdargs)
def _reset(self):
# super()
super(Struct, self)._reset()
self._dgeom = dict.fromkeys(self._get_keys_dgeom())
self._dsino = dict.fromkeys(self._get_keys_dsino())
self._dphys = dict.fromkeys(self._get_keys_dphys())
self._dreflect = dict.fromkeys(self._get_keys_dreflect())
self._dmisc = dict.fromkeys(self._get_keys_dmisc())
# self._dplot = copy.deepcopy(self.__class__._ddef['dplot'])
@classmethod
def _checkformat_inputs_Id(
cls,
Id=None,
Name=None,
Exp=None,
shot=None,
Type=None,
include=None,
**kwdargs
):
if Id is not None:
assert isinstance(Id, utils.ID)
Name, Exp, shot, Type = Id.Name, Id.Exp, Id.shot, Id.Type
if shot is None:
shot = cls._ddef["Id"]["shot"]
if Type is None:
Type = cls._ddef["dgeom"]["Type"]
if include is None:
include = cls._ddef["Id"]["include"]
dins = {
"Name": {"var": Name, "cls": str},
"Exp": {"var": Exp, "cls": str},
"shot": {"var": shot, "cls": int},
"Type": {"var": Type, "in": ["Tor", "Lin"]},
"include": {"var": include, "listof": str},
}
dins, err, msg = cls._check_InputsGeneric(dins)
if err:
raise Exception(msg)
kwdargs.update(
{
"Name": Name,
"Exp": Exp,
"shot": shot,
"Type": Type,
"include": include,
}
)
return kwdargs
###########
# Get largs
###########
@staticmethod
def _get_largs_dgeom(sino=True):
largs = [
"Poly",
"Lim",
"pos",
"extent",
"Clock",
"arrayorder",
]
if sino:
lsino = Struct._get_largs_dsino()
largs += ["sino_{0}".format(s) for s in lsino]
return largs
@staticmethod
def _get_largs_dsino():
largs = ["RefPt", "nP"]
return largs
@staticmethod
def _get_largs_dphys():
largs = ["lSymbols"]
return largs
@staticmethod
def _get_largs_dreflect():
largs = ["Types", "coefs_reflect"]
return largs
@staticmethod
def _get_largs_dmisc():
largs = ["color"]
return largs
###########
# Get check and format inputs
###########
@staticmethod
def _checkformat_Lim(Lim, Type="Tor"):
if Lim is None:
Lim = np.array([], dtype=float)
else:
assert hasattr(Lim, "__iter__")
Lim = np.asarray(Lim, dtype=float)
assert Lim.ndim in [1, 2]
if Lim.ndim == 1:
assert Lim.size in [0, 2]
if Lim.size == 2:
Lim = Lim.reshape((2, 1))
else:
if Lim.shape[0] != 2:
Lim = Lim.T
if Type == "Lin":
if not np.all(Lim[0, :] < Lim[1, :]):
msg = "All provided Lim must be increasing !"
raise Exception(msg)
else:
Lim = np.arctan2(np.sin(Lim), np.cos(Lim))
assert np.all(~np.isnan(Lim))
return Lim
@staticmethod
def _checkformat_posextent(pos, extent, Type="Tor"):
lC = [pos is None, extent is None]
if any(lC):
if not all(lC):
msg = ""
raise Exception(msg)
pos = np.array([], dtype=float)
extent = np.array([], dtype=float)
else:
lfloat = [int, float, np.int64, np.float64]
assert type(pos) in lfloat or hasattr(pos, "__iter__")
if type(pos) in lfloat:
pos = np.array([pos], dtype=float)
else:
pos = np.asarray(pos, dtype=float).ravel()
if Type == "Tor":
pos = np.arctan2(np.sin(pos), np.cos(pos))
assert type(extent) in lfloat or hasattr(extent, "__iter__")
if type(extent) in lfloat:
extent = float(extent)
else:
extent = np.asarray(extent, dtype=float).ravel()
assert extent.size == pos.size
if not np.all(extent > 0.0):
msg = "All provided extent values must be >0 !"
raise Exception(msg)
if Type == "Tor":
if not np.all(extent < 2.0 * np.pi):
msg = "Provided extent must be in ]0;2pi[ (radians)!"
raise Exception(msg)
assert np.all(~np.isnan(pos)) and np.all(~np.isnan(extent))
return pos, extent
@staticmethod
def _get_LimFromPosExtent(pos, extent, Type="Tor"):
if pos.size > 0:
Lim = pos[np.newaxis, :] + np.array([[-0.5], [0.5]]) * extent
if Type == "Tor":
Lim = np.arctan2(np.sin(Lim), np.cos(Lim))
else:
Lim = np.asarray([], dtype=float)
return Lim
@staticmethod
def _get_PosExtentFromLim(Lim, Type="Tor"):
if Lim.size > 0:
pos, extent = np.mean(Lim, axis=0), Lim[1, :] - Lim[0, :]
if Type == "Tor":
ind = Lim[0, :] > Lim[1, :]
pos[ind] = pos[ind] + np.pi
extent[ind] = 2.0 * np.pi + extent[ind]
pos = np.arctan2(np.sin(pos), np.cos(pos))
assert np.all(extent > 0.0)
if np.std(extent) < np.mean(extent) * 1.0e-9:
extent = np.mean(extent)
else:
pos = np.array([], dtype=float)
extent = np.array([], dtype=float)
return pos, extent
@classmethod
def _checkformat_inputs_dgeom(
cls,
Poly=None,
Lim=None,
pos=None,
extent=None,
Type=None,
Clock=False,
arrayorder=None,
):
if arrayorder is None:
arrayorder = Struct._ddef["dgeom"]["arrayorder"]
if Type is None:
Type = Struct._ddef["dgeom"]["Type"]
dins = {
"Poly": {
"var": Poly,
"iter2array": float,
"ndim": 2,
"inshape": 2,
},
"Clock": {"var": Clock, "cls": bool},
"arrayorder": {"var": arrayorder, "in": ["C", "F"]},
"Type": {"var": Type, "in": ["Tor", "Lin"]},
}
dins, err, msg = cls._check_InputsGeneric(dins, tab=0)
if err:
raise Exception(msg)
Poly = dins["Poly"]["var"]
if Poly.shape[0] != 2:
Poly = Poly.T
# --------------------------------------
# Elimininate any double identical point
# Treat closed polygons seperately (no warning)
if np.sum((Poly[:, 0] - Poly[:, -1])**2) < 1.e-12:
Poly = Poly[:, :-1]
# Treat other points
ind = np.sum(np.diff(np.concatenate((Poly, Poly[:, 0:1]), axis=1),
axis=1) ** 2, axis=0) < 1.0e-12
if np.any(ind):
npts = Poly.shape[1]
Poly = Poly[:, ~ind]
msg = (
"%s instance: double identical points in Poly\n" % cls.__name__
)
msg += " => %s points removed\n" % ind.sum()
msg += " => Poly goes from %s to %s points" % (
npts,
Poly.shape[1],
)
warnings.warn(msg)
ind = np.sum(np.diff(np.concatenate((Poly, Poly[:, 0:1]), axis=1),
axis=1) ** 2, axis=0) < 1.0e-12
assert not np.any(ind), ind
lC = [Lim is None, pos is None]
if not any(lC):
msg = "Please provide either Lim xor pos/extent pair!\n"
msg += "Lim should be an array of limits\n"
msg += (
"pos should be an array of centers and extent a float / array"
)
raise Exception(msg)
if all(lC):
pos = np.asarray([], dtype=float)
extent = np.asarray([], dtype=float)
# Lim = np.asarray([],dtype=float)
elif lC[0]:
pos, extent = cls._checkformat_posextent(pos, extent, Type)
# Lim = cls._get_LimFromPosExtent(pos, extent, Type)
else:
Lim = cls._checkformat_Lim(Lim, Type)
pos, extent = cls._get_PosExtentFromLim(Lim, Type)
return Poly, pos, extent, Type, arrayorder
def _checkformat_inputs_dsino(self, RefPt=None, nP=None):
assert type(nP) is int and nP > 0
assert RefPt is None or hasattr(RefPt, "__iter__")
if RefPt is None:
RefPt = self._dgeom["BaryS"]
RefPt = np.asarray(RefPt, dtype=float).flatten()
assert RefPt.size == 2, "RefPt must be of size=2 !"
return RefPt
@staticmethod
def _checkformat_inputs_dphys(lSymbols=None):
if lSymbols is not None:
assert type(lSymbols) in [list, str]
if type(lSymbols) is list:
assert all([type(ss) is str for ss in lSymbols])
else:
lSymbols = [lSymbols]
lSymbols = np.asarray(lSymbols, dtype=str)
return lSymbols
def _checkformat_inputs_dreflect(self, Types=None, coefs_reflect=None):
if Types is None:
Types = self._ddef["dreflect"]["Type"]
assert type(Types) in [str, np.ndarray]
if type(Types) is str:
assert Types in self._DREFLECT_DTYPES.keys()
Types = np.full(
(self.nseg + 2,), self._DREFLECT_DTYPES[Types], dtype=int
)
else:
Types = Types.astype(int).ravel()
assert Types.shape == (self.nseg + 2,)
Typesu = np.unique(Types)
lc = np.array([Typesu == vv
for vv in self._DREFLECT_DTYPES.values()])
assert np.all(np.any(lc, axis=0))
assert coefs_reflect is None
return Types, coefs_reflect
@classmethod
def _checkformat_inputs_dmisc(cls, color=None):
if color is None:
color = mpl.colors.to_rgba(cls._ddef["dmisc"]["color"])
assert mpl.colors.is_color_like(color)
return tuple(np.array(mpl.colors.to_rgba(color), dtype=float))
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_dgeom():
lk = [
"Poly",
"pos",
"extent",
"noccur",
"Multi",
"nP",
"P1Max",
"P1Min",
"P2Max",
"P2Min",
"BaryP",
"BaryL",
"BaryS",
"BaryV",
"Surf",
"VolAng",
"Vect",
"VIn",
"circ-C",
"circ-r",
"Clock",
"arrayorder",
"move",
"move_param",
"move_kwdargs",
]
return lk
@staticmethod
def _get_keys_dsino():
lk = ["RefPt", "nP", "EnvTheta", "EnvMinMax"]
return lk
@staticmethod
def _get_keys_dphys():
lk = ["lSymbols"]
return lk
@staticmethod
def _get_keys_dreflect():
lk = ["Types", "coefs_reflect"]
return lk
@staticmethod
def _get_keys_dmisc():
lk = ["color"]
return lk
###########
# _init
###########
def _init(
self,
Poly=None,
Type=_Type,
Lim=None,
pos=None,
extent=None,
Clock=_Clock,
arrayorder=_arrayorder,
sino_RefPt=None,
sino_nP=_def.TorNP,
color=None,
**kwdargs
):
allkwds = dict(locals(), **kwdargs)
largs = self._get_largs_dgeom(sino=True)
kwdgeom = self._extract_kwdargs(allkwds, largs)
largs = self._get_largs_dphys()
kwdphys = self._extract_kwdargs(allkwds, largs)
largs = self._get_largs_dreflect()
kwdreflect = self._extract_kwdargs(allkwds, largs)
largs = self._get_largs_dmisc()
kwdmisc = self._extract_kwdargs(allkwds, largs)
self._set_dgeom(**kwdgeom)
self.set_dphys(**kwdphys)
self.set_dreflect(**kwdreflect)
self._set_dmisc(**kwdmisc)
self._dstrip["strip"] = 0
###########
# set dictionaries
###########
def _set_dgeom(
self,
Poly=None,
Lim=None,
pos=None,
extent=None,
Clock=False,
arrayorder="C",
sino_RefPt=None,
sino_nP=_def.TorNP,
sino=True,
):
out = self._checkformat_inputs_dgeom(
Poly=Poly,
Lim=Lim,
pos=pos,
extent=extent,
Type=self.Id.Type,
Clock=Clock,
)
Poly, pos, extent, Type, arrayorder = out
dgeom = _comp._Struct_set_Poly(
Poly,
pos=pos,
extent=extent,
arrayorder=arrayorder,
Type=self.Id.Type,
Clock=Clock,
)
dgeom["arrayorder"] = arrayorder
self._dgeom.update(dgeom)
if sino:
self.set_dsino(sino_RefPt, nP=sino_nP)
def set_dsino(self, RefPt=None, nP=_def.TorNP):
RefPt = self._checkformat_inputs_dsino(RefPt=RefPt, nP=nP)
EnvTheta, EnvMinMax = _GG.Sino_ImpactEnv(
RefPt, self.Poly_closed, NP=nP, Test=False
)
self._dsino = {
"RefPt": RefPt,
"nP": nP,
"EnvTheta": EnvTheta,
"EnvMinMax": EnvMinMax,
}
def set_dphys(self, lSymbols=None):
lSymbols = self._checkformat_inputs_dphys(lSymbols)
self._dphys["lSymbols"] = lSymbols
def set_dreflect(self, Types=None, coefs_reflect=None):
Types, coefs_reflect = self._checkformat_inputs_dreflect(
Types=Types, coefs_reflect=coefs_reflect
)
self._dreflect["Types"] = Types
self._dreflect["coefs_reflect"] = coefs_reflect
def _set_color(self, color=None):
color = self._checkformat_inputs_dmisc(color=color)
self._dmisc["color"] = color
self._dplot["cross"]["dP"]["color"] = color
self._dplot["hor"]["dP"]["color"] = color
self._dplot["3d"]["dP"]["color"] = color
def _set_dmisc(self, color=None):
self._set_color(color)
###########
# strip dictionaries
###########
def _strip_dgeom(
self,
lkeep=["Poly", "pos", "extent", "Clock", "arrayorder",
"move", "move_param", "move_kwdargs"]
):
utils.ToFuObject._strip_dict(self._dgeom, lkeep=lkeep)
def _strip_dsino(self, lkeep=["RefPt", "nP"]):
utils.ToFuObject._strip_dict(self._dsino, lkeep=lkeep)
def _strip_dphys(self, lkeep=["lSymbols"]):
utils.ToFuObject._strip_dict(self._dphys, lkeep=lkeep)
def _strip_dreflect(self, lkeep=["Types", "coefs_reflect"]):
utils.ToFuObject._strip_dict(self._dreflect, lkeep=lkeep)
def _strip_dmisc(self, lkeep=["color"]):
utils.ToFuObject._strip_dict(self._dmisc, lkeep=lkeep)
###########
# rebuild dictionaries
###########
def _rebuild_dgeom(
self,
lkeep=["Poly", "pos", "extent", "Clock", "arrayorder"]
):
reset = utils.ToFuObject._test_Rebuild(self._dgeom, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(
self._dgeom, lkeep=lkeep, dname="dgeom"
)
self._set_dgeom(
self.Poly,
pos=self.pos,
extent=self.extent,
Clock=self.dgeom["Clock"],
arrayorder=self.dgeom["arrayorder"],
sino=False,
)
def _rebuild_dsino(self, lkeep=["RefPt", "nP"]):
reset = utils.ToFuObject._test_Rebuild(self._dsino, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(
self._dsino, lkeep=lkeep, dname="dsino"
)
self.set_dsino(RefPt=self.dsino["RefPt"], nP=self.dsino["nP"])
def _rebuild_dphys(self, lkeep=["lSymbols"]):
reset = utils.ToFuObject._test_Rebuild(self._dphys, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(
self._dphys, lkeep=lkeep, dname="dphys"
)
self.set_dphys(lSymbols=self.dphys["lSymbols"])
def _rebuild_dreflect(self, lkeep=["Types", "coefs_reflect"]):
reset = utils.ToFuObject._test_Rebuild(self._dreflect, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(
self._dreflect, lkeep=lkeep, dname="dreflect"
)
self.set_dreflect(
Types=self.dreflect["Types"],
coefs_reflect=self.dreflect["coefs_reflect"]
)
def _rebuild_dmisc(self, lkeep=["color"]):
reset = utils.ToFuObject._test_Rebuild(self._dmisc, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(
self._dmisc, lkeep=lkeep, dname="dmisc"
)
self._set_dmisc(color=self.dmisc["color"])
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip["allowed"] = [0, 1, 2]
nMax = max(cls._dstrip["allowed"])
doc = """
1: Remove dsino expendables
2: Remove also dgeom, dphys, dreflect and dmisc expendables"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc, nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0):
# super()
super(Struct, self).strip(strip=strip)
def _strip(self, strip=0):
if strip == 0:
self._rebuild_dgeom()
self._rebuild_dsino()
self._rebuild_dphys()
self._rebuild_dreflect()
self._rebuild_dmisc()
elif strip == 1:
self._strip_dsino()
self._rebuild_dgeom()
self._rebuild_dphys()
self._rebuild_dreflect()
self._rebuild_dmisc()
else:
self._strip_dsino()
self._strip_dgeom()
self._strip_dphys()
self._strip_dreflect()
self._strip_dmisc()
def _to_dict(self):
dout = {
"dgeom": {"dict": self.dgeom, "lexcept": None},
"dsino": {"dict": self.dsino, "lexcept": None},
"dphys": {"dict": self.dphys, "lexcept": None},
"dreflect": {"dict": self.dreflect, "lexcept": None},
"dmisc": {"dict": self.dmisc, "lexcept": None},
"dplot": {"dict": self._dplot, "lexcept": None},
}
return dout
def _from_dict(self, fd):
self._dgeom.update(**fd["dgeom"])
self._dsino.update(**fd["dsino"])
self._dphys.update(**fd["dphys"])
self._dreflect.update(**fd["dreflect"])
self._dmisc.update(**fd["dmisc"])
if "dplot" in fd.keys():
self._dplot.update(**fd["dplot"])
###########
# Properties
###########
@property
def Type(self):
"""Return the type of structure """
return self._Id.Type
@property
def dgeom(self):
return self._dgeom
@property
def Poly(self):
"""Return the polygon defining the structure cross-section"""
return self._dgeom["Poly"]
@property
def Poly_closed(self):
""" Returned the closed polygon """
return np.hstack((self._dgeom["Poly"], self._dgeom["Poly"][:, 0:1]))
@property
def nseg(self):
""" Retunr the number of segmnents constituting the closed polygon """
return self._dgeom["Poly"].shape[1]
@property
def pos(self):
return self._dgeom["pos"]
@property
def extent(self):
if hasattr(self._dgeom["extent"], "__iter__"):
extent = self._dgeom["extent"]
else:
extent = np.full(self._dgeom["pos"].shape, self._dgeom["extent"])
return extent
@property
def noccur(self):
return self._dgeom["noccur"]
@property
def Lim(self):
Lim = self._get_LimFromPosExtent(
self._dgeom["pos"], self._dgeom["extent"], Type=self.Id.Type
)
return Lim.T
@property
def dsino(self):
return self._dsino
@property
def dphys(self):
return self._dphys
@property
def dreflect(self):
return self._dreflect
@property
def dmisc(self):
return self._dmisc
###########
# public methods
###########
def get_summary(
self,
sep=" ",
line="-",
just="l",
table_sep=None,
verb=True,
return_=False,
):
""" Summary description of the object content """
# -----------------------
# Build detailed view
col0 = [
"class",
"Name",
"SaveName",
"nP",
"noccur",
]
ar0 = [
self._Id.Cls,
self._Id.Name,
self._Id.SaveName,
str(self._dgeom["nP"]),
str(self._dgeom["noccur"]),
]
if self._dgeom["move"] is not None:
col0 += ['move', 'param']
ar0 += [self._dgeom["move"],
str(round(self._dgeom["move_param"], ndigits=4))]
col0.append('color')
cstr = ('('
+ ', '.join(['{:4.2}'.format(cc)
for cc in self._dmisc["color"]])
+ ')')
ar0.append(cstr)
return self._get_summary(
[ar0],
[col0],
sep=sep,
line=line,
table_sep=table_sep,
verb=verb,
return_=return_,
)
###########
# public methods for movement
###########
def _update_or_copy(self, poly,
pos=None,
update_extent=None,
return_copy=None, name=None):
if update_extent is None:
update_extent = _UPDATE_EXTENT
if return_copy is None:
return_copy = _RETURN_COPY
extent = self.extent
if update_extent is True:
if extent is not None:
ratio = np.nanmin(poly[0, :]) / np.nanmin(self.Poly[0, :])
extent = extent*ratio
if pos is None:
pos = self.pos
if return_copy is True:
if name is None:
name = self.Id.Name + 'copy'
return self.__class__(Poly=poly,
extent=extent, pos=pos,
sino_RefPt=self._dsino['RefPt'],
sino_nP=self._dsino['nP'],
color=self._dmisc['color'],
Exp=self.Id.Exp,
Name=name,
shot=self.Id.shot,
SavePath=self.Id.SavePath,
Type=self.Id.Type)
else:
self._set_dgeom(poly, pos=pos, extent=extent,
sino_RefPt=self._dsino['RefPt'],
sino_nP=self._dsino['nP'])
def translate_in_cross_section(self, distance=None, direction_rz=None,
update_extent=None,
return_copy=None, name=None):
""" Translate the structure in the poloidal plane """
poly = self._translate_pts_poloidal_plane_2D(
pts_rz=self.Poly,
direction_rz=direction_rz, distance=distance)
return self._update_or_copy(poly, update_extent=update_extent,
return_copy=return_copy, name=name)
def rotate_in_cross_section(self, angle=None, axis_rz=None,
update_extent=True,
return_copy=None, name=None):
""" Rotate the structure in the poloidal plane """
poly = self._rotate_pts_vectors_in_poloidal_plane_2D(
pts_rz=self.Poly,
axis_rz=axis_rz, angle=angle)
return self._update_or_copy(poly, update_extent=update_extent,
return_copy=return_copy, name=name)
def rotate_around_torusaxis(self, angle=None,
return_copy=None, name=None):
""" Rotate the structure in the poloidal plane """
if self.Id.Type != 'Tor':
msg = "Movement only available for Tor configurations!"
raise Exception(msg)
pos = self.pos
if pos is not None:
pos = pos + angle
return self._update_or_copy(self.Poly, pos=pos,
update_extent=False,
return_copy=return_copy, name=name)
def set_move(self, move=None, param=None, **kwdargs):
""" Set the default movement parameters
A default movement can be set for the instance, it can be any of the
pre-implemented movement (rotations or translations)
This default movement is the one that will be called when using
self.move()
Specify the type of movement via the name of the method (passed as a
str to move)
Specify, for the geometry of the instance at the time of defining this
default movement, the current value of the associated movement
parameter (angle / distance). This is used to set an arbitrary
difference for user who want to use absolute position values
The desired incremental movement to be performed when calling self.move
will be deduced by substracting the stored param value to the provided
param value. Just set the current param value to 0 if you don't care
about a custom absolute reference.
kwdargs must be a parameters relevant to the chosen method (axis,
direction...)
e.g.:
self.set_move(move='rotate_around_3daxis',
param=0.,
axis=([0.,0.,0.], [1.,0.,0.]))
self.set_move(move='translate_3d',
param=0.,
direction=[0.,1.,0.])
"""
move, param, kwdargs = self._checkformat_set_move(move, param, kwdargs)
self._dgeom['move'] = move
self._dgeom['move_param'] = param
if isinstance(kwdargs, dict) and len(kwdargs) == 0:
kwdargs = None
self._dgeom['move_kwdargs'] = kwdargs
def move(self, param):
""" Set new position to desired param according to default movement
Can only be used if default movement was set before
See self.set_move()
"""
param = self._move(param, dictname='_dgeom')
self._dgeom['move_param'] = param
###########
# Other public methods
###########
def set_color(self, col):
self._set_color(col)
def get_color(self):
return self._dmisc["color"]
def isInside(self, pts, In="(X,Y,Z)"):
""" Return an array of booleans indicating whether each point lies
inside the Struct volume
Tests for each point whether it lies inside the Struct object.
The points coordinates can be provided in 2D or 3D
You must specify which coordinate system is used with 'In' kwdarg.
An array of boolean flags is returned.
Parameters
----------
pts : np.ndarray
(2,N) or (3,N) array, coordinates of the points to be tested
In : str
Flag indicating the coordinate system in which pts are provided
e.g.: '(X,Y,Z)' or '(R,Z)'
Returns
-------
ind : np.ndarray
(N,) array of booleans, True if a point is inside the volume
"""
if self._dgeom["noccur"] > 0:
ind = _GG._Ves_isInside(
pts,
self.Poly,
ves_lims=np.ascontiguousarray(self.Lim),
nlim=self._dgeom["noccur"],
ves_type=self.Id.Type,
in_format=In,
test=True,
)
else:
ind = _GG._Ves_isInside(
pts,
self.Poly,
ves_lims=None,
nlim=0,
ves_type=self.Id.Type,
in_format=In,
test=True,
)
return ind
def get_InsideConvexPoly(
self,
RelOff=_def.TorRelOff,
ZLim="Def",
Spline=True,
Splprms=_def.TorSplprms,
NP=_def.TorInsideNP,
Plot=False,
Test=True,
):
""" Return a polygon that is a smaller and smoothed approximation of
Ves.Poly, useful for excluding the divertor region in a Tokamak
For some uses, it can be practical to approximate the polygon defining
the Ves object (which can be non-convex, like with a divertor), by a
simpler, sligthly smaller and convex polygon.
This method provides a fast solution for computing such a proxy.
Parameters
----------
RelOff : float
Fraction by which an homothetic polygon should be reduced
(1.-RelOff)*(Poly-BaryS)
ZLim : None / str / tuple
Flag indicating what limits shall be put to the height of the
polygon (used for excluding divertor)
Spline : bool
Flag indiating whether the reduced and truncated polygon shall be
smoothed by 2D b-spline curves
Splprms : list
List of 3 parameters to be used for the smoothing
[weights,smoothness,b-spline order], fed to
scipy.interpolate.splprep()
NP : int
Number of points to be used to define the smoothed polygon
Plot : bool
Flag indicating whether the result shall be plotted for visual
inspection
Test : bool
Flag indicating whether the inputs should be tested for conformity
Returns
-------
Poly : np.ndarray
(2,N) polygon resulting from homothetic transform, truncating and
optional smoothingop
"""
return _comp._Ves_get_InsideConvexPoly(
self.Poly_closed,
self.dgeom["P2Min"],
self.dgeom["P2Max"],
self.dgeom["BaryS"],
RelOff=RelOff,
ZLim=ZLim,
Spline=Spline,
Splprms=Splprms,
NP=NP,
Plot=Plot,
Test=Test,
)
def get_sampleEdge(
self,
res=None,
domain=None,
resMode=None,
offsetIn=0.0,
):
""" Sample the polygon edges, with resolution res
Sample each segment of the 2D polygon
Sampling can be limited to a domain
"""
if res is None:
res = _RES
return _comp._Ves_get_sampleEdge(
self.Poly_closed,
res=res,
domain=domain,
resMode=resMode,
offsetIn=offsetIn,
VIn=self.dgeom["VIn"],
margin=1.0e-9,
)
def get_sampleCross(
self,
res=None,
domain=None,
resMode=None,
ind=None,
mode="flat",
):
""" Sample, with resolution res, the 2D cross-section
The sampling domain can be limited by domain or ind
Depending on the value of mode, the method returns:
- 'flat': (tuned for integrals computing)
pts : (2,npts) array of points coordinates
dS : (npts,) array of surfaces
ind : (npts,) array of integer indices
reseff: (2,) array of effective resolution (R and Z)
- 'imshow' : (tuned for imshow plotting)
pts : (2,n1,n2) array of points coordinates
x1 : (n1,) vector of unique x1 coordinates
x2 : (n2,) vector of unique x2 coordinates
extent : the extent to be fed to mpl.pyplot.imshow()
"""
if res is None:
res = _RES
args = [
self.Poly_closed,
self.dgeom["P1Min"][0],
self.dgeom["P1Max"][0],
self.dgeom["P2Min"][1],
self.dgeom["P2Max"][1],
]
kwdargs = dict(
res=res, domain=domain, resMode=resMode, ind=ind,
margin=1.0e-9, mode=mode
)
return _comp._Ves_get_sampleCross(*args, **kwdargs)
def get_sampleS(
self,
res=None,
domain=None,
resMode=None,
ind=None,
offsetIn=0.0,
returnas="(X,Y,Z)",
Ind=None,
):
""" Sample, with resolution res, the surface defined by domain or ind
An optionnal offset perpendicular to the surface can be used
(offsetIn>0 => inwards)
Parameters
----------
res : float / list of 2 floats
Desired resolution of the surfacic sample
float : same resolution for all directions of the sample
list : [dl,dXPhi] where:
dl : res. along polygon contours (cross-section)
dXPhi : res. along axis (toroidal/linear direction)
domain : None / list of 3 lists of 2 floats
Limits of the domain in which the sample should be computed
None : whole surface of the object
list : [D1, D2, D3], where Di is a len()=2 list
(increasing floats, setting limits along coordinate i)
[DR, DZ, DPhi]: in toroidal geometry (self.Id.Type=='Tor')
[DX, DY, DZ] : in linear geometry (self.Id.Type=='Lin')
resMode : str
Flag, specifies if res is absolute or relative to element sizes
'abs' : res is an absolute distance
'rel' : if res=0.1, each polygon segment is divided in 10,
as is the toroidal/linear length
ind : None / np.ndarray of int
If provided, DS is ignored and the sample points corresponding to
the provided indices are returned
Example (assuming obj is a Ves object)
> # We create a 5x5 cm2 sample of the whole surface
> pts, dS, ind, reseff = obj.get_sample(0.05)
> # Perform operations, save only the points indices
> # (save space)
> ...
> # Retrieve the points from their indices (requires same res)
> pts2, dS2, ind2, reseff2 = obj.get_sample(0.05, ind=ind)
> np.allclose(pts,pts2)
True
offsetIn: float
Offset distance from the actual surface of the object
Inwards if positive
Useful to avoid numerical errors
returnas: str
Flag indicating the coordinate system of returned points
e.g. : '(X,Y,Z)' or '(R,Z,Phi)'
Ind : None / iterable of ints
Array of indices of the entities to be considered
(only when multiple entities, i.e.: self.nLim>1)
Returns
-------
pts : np.ndarray / list of np.ndarrays
Sample points coordinates, as a (3,N) array.
A list is returned if the object has multiple entities
dS : np.ndarray / list of np.ndarrays
The surface (in m^2) associated to each point
ind : np.ndarray / list of np.ndarrays
The index of each point
reseff : np.ndarray / list of np.ndarrays
Effective resolution in both directions after sample computation
"""
if Ind is not None:
assert self.dgeom["Multi"]
if res is None:
res = _RES
kwdargs = dict(
res=res,
domain=domain,
resMode=resMode,
ind=ind,
offsetIn=offsetIn,
VIn=self.dgeom["VIn"],
VType=self.Id.Type,
VLim=np.ascontiguousarray(self.Lim),
nVLim=self.noccur,
returnas=returnas,
margin=1.0e-9,
Multi=self.dgeom["Multi"],
Ind=Ind,
)
return _comp._Ves_get_sampleS(self.Poly, **kwdargs)
def get_sampleV(
self,
res=None,
domain=None,
resMode=None,
ind=None,
returnas="(X,Y,Z)",
algo="new",
num_threads=48
):
""" Sample, with resolution res, the volume defined by domain or ind
The 3D volume is sampled in:
- the whole volume (domain=None and ind=None)
- a sub-domain defined by bounds on each coordinates (domain)
- a pre-computed subdomain stored in indices (ind)
The coordinatesd of the center of each volume elements are returned as
pts in choosen coordinates (returnas)
For a torus, the elementary volume is kept constant, meaning that the
toroidal angular step is decreased as R increases
Parameters
----------
res : float / list of 3 floats
Desired resolution of the surfacic sample
float : same resolution for all directions of the sample
list : [dYR, dZ, dXPhi] where:
dYR : res. along in radial / Y direction
dZ : res. along Z direction
dXPhi : res. along axis (toroidal/linear direction)
domain : None / list of 3 lists of 2 floats
Limits of the domain in which the sample should be computed
None : whole surface of the object
list : [D1, D2, D3], where Di is a len()=2 list
(increasing floats, setting limits along coordinate i)
[DR, DZ, DPhi]: in toroidal geometry (self.Id.Type=='Tor')
[DX, DY, DZ] : in linear geometry (self.Id.Type=='Lin')
resMode : str
Flag, specifies if res is absolute or relative to element sizes
'abs' : res is an absolute distance
'rel' : if res=0.1, each polygon segment is divided in 10,
as is the toroidal/linear length
ind : None / np.ndarray of int
If provided, DS is ignored and the sample points corresponding to
the provided indices are returned
Example (assuming obj is a Ves object)
> # We create a 5x5 cm2 sample of the whole surface
> pts, dS, ind, reseff = obj.get_sample(0.05)
> # Perform operations, save only the points indices
> # (save space)
> ...
> # Retrieve the points from their indices (requires same res)
> pts2, dS2, ind2, reseff2 = obj.get_sample(0.05, ind=ind)
> np.allclose(pts,pts2)
True
returnas: str
Flag indicating the coordinate system of returned points
e.g. : '(X,Y,Z)' or '(R,Z,Phi)'
Ind : None / iterable of ints
Array of indices of the entities to be considered
(only when multiple entities, i.e.: self.nLim>1)
Returns
-------
pts : np.ndarray / list of np.ndarrays
Sample points coordinates, as a (3,N) array.
A list is returned if the object has multiple entities
dV : np.ndarray / list of np.ndarrays
The volume (in m^3) associated to each point
ind : np.ndarray / list of np.ndarrays
The index of each point
reseff : np.ndarray / list of np.ndarrays
Effective resolution in both directions after sample computation
"""
args = [
self.Poly,
self.dgeom["P1Min"][0],
self.dgeom["P1Max"][0],
self.dgeom["P2Min"][1],
self.dgeom["P2Max"][1],
]
kwdargs = dict(
res=res,
domain=domain,
resMode=resMode,
ind=ind,
VType=self.Id.Type,
VLim=self.Lim,
returnas=returnas,
margin=1.0e-9,
algo=algo,
num_threads=num_threads
)
return _comp._Ves_get_sampleV(*args, **kwdargs)
def _get_phithetaproj(self, refpt=None):
# Prepare ax
if refpt is None:
msg = "Please provide refpt (R,Z)"
raise Exception(msg)
refpt = np.atleast_1d(np.squeeze(refpt))
assert refpt.shape == (2,)
return _comp._Struct_get_phithetaproj(
refpt, self.Poly, self.Lim, self.noccur
)
def _get_phithetaproj_dist(
self, refpt=None, ntheta=None, nphi=None, theta=None, phi=None
):
# Prepare ax
if refpt is None:
msg = "Please provide refpt (R,Z)"
raise Exception(msg)
refpt = np.atleast_1d(np.squeeze(refpt))
assert refpt.shape == (2,)
# Prepare theta and phi
if theta is None and ntheta is None:
nphi = _PHITHETAPROJ_NTHETA
lc = [ntheta is None, theta is None]
if np.sum(lc) != 1:
msg = "Please provide either ntheta xor a theta vector !"
raise Exception(msg)
if theta is None:
theta = np.linspace(-np.pi, np.pi, ntheta, endpoint=True)
if phi is None and nphi is None:
nphi = _PHITHETAPROJ_NPHI
lc = [nphi is None, phi is None]
if np.sum(lc) != 1:
msg = "Please provide either nphi xor a phi vector !"
raise Exception(msg)
if phi is None:
phi = np.linspace(-np.pi, np.pi, nphi, endpoint=True)
# Get limits
out = _comp._Struct_get_phithetaproj(
refpt, self.Poly_closed, self.Lim, self.noccur
)
nDphi, Dphi, nDtheta, Dtheta = out
# format inputs
theta = np.atleast_1d(np.ravel(theta))
theta = np.arctan2(np.sin(theta), np.cos(theta))
phi = np.atleast_1d(np.ravel(phi))
phi = np.arctan2(np.sin(phi), np.cos(phi))
ntheta, nphi = theta.size, phi.size
dist = np.full((ntheta, nphi), np.nan)
# Get dist
dist_theta, indphi = _comp._get_phithetaproj_dist(
self.Poly_closed,
refpt,
Dtheta,
nDtheta,
Dphi,
nDphi,
theta,
phi,
ntheta,
nphi,
self.noccur,
)
dist[:, indphi] = dist_theta[:, None]
return dist, nDphi, Dphi, nDtheta, Dtheta
@staticmethod
def _get_reflections_ufromTypes(u, vperp, Types):
indspec = Types == 0
inddiff = Types == 1
indcorn = Types == 2
# Get reflected unit vectors
u2 = np.full(u.shape, np.nan)
if np.any(np.logical_or(indspec, inddiff)):
vpar = np.array(
[
vperp[1, :] * u[2, :] - vperp[2, :] * u[1, :],
vperp[2, :] * u[0, :] - vperp[0, :] * u[2, :],
vperp[0, :] * u[1, :] - vperp[1, :] * u[0, :],
]
)
vpar = np.array(
[
vpar[1, :] * vperp[2, :] - vpar[2, :] * vperp[1, :],
vpar[2, :] * vperp[0, :] - vpar[0, :] * vperp[2, :],
vpar[0, :] * vperp[1, :] - vpar[1, :] * vperp[0, :],
]
)
vpar = vpar / np.sqrt(np.sum(vpar ** 2, axis=0))[None, :]
if np.any(indspec):
# Compute u2 for specular
sca = np.sum(
u[:, indspec] * vperp[:, indspec], axis=0, keepdims=True
)
sca2 = np.sum(
u[:, indspec] * vpar[:, indspec], axis=0, keepdims=True
)
assert np.all(sca <= 0.0) and np.all(sca >= -1.0)
assert np.all(sca2 >= 0.0) and np.all(sca <= 1.0)
u2[:, indspec] = (
-sca * vperp[:, indspec] + sca2 * vpar[:, indspec]
)
if np.any(inddiff):
# Compute u2 for diffusive
sca = 2.0 * (np.random.random((1, inddiff.sum())) - 0.5)
u2[:, inddiff] = (
np.sqrt(1.0 - sca**2) * vperp[:, inddiff]
+ sca * vpar[:, inddiff]
)
if np.any(indcorn):
u2[:, indcorn] = -u[:, indcorn]
return u2
def get_reflections(self, indout2, u=None, vperp=None):
""" Return the reflected unit vectors from input unit vectors and vperp
The reflected unit vector depends on the incoming LOS (u),
the local normal unit vector (vperp), and the polygon segment hit
(indout2)
Future releases: dependence on lambda
Also return per-LOS reflection Types (0:specular, 1:diffusive, 2:ccube)
"""
# Get per-LOS reflection Types and associated indices
Types = self._dreflect["Types"][indout2]
u2 = None
if u is not None:
assert vperp is not None
u2 = self._get_reflections_ufromTypes(u, vperp, Types)
return Types, u2
def plot(
self,
lax=None,
proj="all",
element="PIBsBvV",
dP=None,
dI=_def.TorId,
dBs=_def.TorBsd,
dBv=_def.TorBvd,
dVect=_def.TorVind,
dIHor=_def.TorITord,
dBsHor=_def.TorBsTord,
dBvHor=_def.TorBvTord,
Lim=None,
Nstep=_def.TorNTheta,
dLeg=_def.TorLegd,
indices=True,
draw=True,
fs=None,
wintit=None,
Test=True,
):
""" Plot the polygon defining the vessel, in chosen projection
Generic method for plotting the Ves object
The projections to be plotted, the elements to plot can be specified
Dictionaries of properties for each elements can also be specified
If an ax is not provided a default one is created.
Parameters
----------
Lax : list or plt.Axes
The axes to be used for plotting
Provide a list of 2 axes if proj='All'
If None a new figure with axes is created
proj : str
Flag specifying the kind of projection
- 'Cross' : cross-section projection
- 'Hor' : horizontal projection
- 'All' : both
- '3d' : a 3d matplotlib plot
element : str
Flag specifying which elements to plot
Each capital letter corresponds to an element:
* 'P': polygon
* 'I': point used as a reference for impact parameters
* 'Bs': (surfacic) center of mass
* 'Bv': (volumic) center of mass for Tor type
* 'V': vector pointing inward perpendicular to each segment
dP : dict / None
Dict of properties for plotting the polygon
Fed to plt.Axes.plot() or plt.plot_surface() if proj='3d'
dI : dict / None
Dict of properties for plotting point 'I' in Cross-section
projection
dIHor : dict / None
Dict of properties for plotting point 'I' in horizontal projection
dBs : dict / None
Dict of properties for plotting point 'Bs' in Cross-section
projection
dBsHor : dict / None
Dict of properties for plotting point 'Bs' in horizontal projection
dBv : dict / None
Dict of properties for plotting point 'Bv' in Cross-section
projection
dBvHor : dict / None
Dict of properties for plotting point 'Bv' in horizontal projection
dVect : dict / None
Dict of properties for plotting point 'V' in cross-section
projection
dLeg : dict / None
Dict of properties for plotting the legend, fed to plt.legend()
The legend is not plotted if None
Lim : list or tuple
Array of a lower and upper limit of angle (rad.) or length for
plotting the '3d' proj
Nstep : int
Number of points for sampling in ignorable coordinate (toroidal
angle or length)
draw : bool
Flag indicating whether the fig.canvas.draw() shall be called
automatically
a4 : bool
Flag indicating whether the figure should be plotted in a4
dimensions for printing
Test : bool
Flag indicating whether the inputs should be tested for conformity
Returns
-------
La list / plt.Axes
Handles of the axes used for plotting (list if several axes where
used)
"""
kwdargs = locals()
lout = ["self"]
for k in lout:
del kwdargs[k]
return _plot.Struct_plot(self, **kwdargs)
def plot_sino(
self,
ax=None,
Ang=_def.LOSImpAng,
AngUnit=_def.LOSImpAngUnit,
Sketch=True,
dP=None,
dLeg=_def.TorLegd,
draw=True,
fs=None,
wintit=None,
Test=True,
):
""" Plot the sinogram of the vessel polygon, by computing its envelopp
in a cross-section, can also plot a 3D version of it.
The envelop of the polygon is computed using self.Sino_RefPt as a
reference point in projection space,
and plotted using the provided dictionary of properties.
Optionaly a small sketch can be included illustrating how the angle
and the impact parameters are defined (if the axes is not provided).
Parameters
----------
proj : str
Flag indicating whether to plot a classic sinogram ('Cross') from
the vessel cross-section (assuming 2D)
or an extended 3D version '3d' of it with additional angle
ax : None or plt.Axes
The axes on which the plot should be done, if None a new figure
and axes is created
Ang : str
Flag indicating which angle to use for the impact parameter, the
angle of the line itself (xi) or of its impact parameter (theta)
AngUnit : str
Flag for the angle units to be displayed, 'rad' for radians or
'deg' for degrees
Sketch : bool
Flag indicating whether a small skecth showing the definitions of
angles 'theta' and 'xi' should be included or not
Pdict : dict
Dictionary of properties used for plotting the polygon envelopp,
fed to plt.plot() if proj='Cross' and to plt.plot_surface()
if proj='3d'
LegDict : None or dict
Dictionary of properties used for plotting the legend, fed to
plt.legend(), the legend is not plotted if None
draw : bool
Flag indicating whether the fig.canvas.draw() shall be called
automatically
a4 : bool
Flag indicating whether the figure should be plotted in a4
dimensions for printing
Test : bool
Flag indicating whether the inputs shall be tested for conformity
Returns
-------
ax : plt.Axes
The axes used to plot
"""
if Test:
msg = "The impact parameters must be set ! (self.set_dsino())"
assert not self.dsino["RefPt"] is None, msg
# Only plot cross sino, from version 1.4.0
dP = _def.TorPFilld if dP is None else dP
ax = _plot.Plot_Impact_PolProjPoly(
self,
ax=ax,
Ang=Ang,
AngUnit=AngUnit,
Sketch=Sketch,
Leg=self.Id.NameLTX,
dP=dP,
dLeg=dLeg,
draw=False,
fs=fs,
wintit=wintit,
Test=Test,
)
# else:
# Pdict = _def.TorP3DFilld if Pdict is None else Pdict
# ax = _plot.Plot_Impact_3DPoly(self, ax=ax, Ang=Ang, AngUnit=AngUnit,
# Pdict=Pdict, dLeg=LegDict, draw=False,
# fs=fs, wintit=wintit, Test=Test)
if draw:
ax.figure.canvas.draw()
return ax
def save_to_txt(
self,
path="./",
name=None,
fmt=None,
include=["Mod", "Cls", "Exp", "Name"],
fmt_num="%.18e",
delimiter=None,
footer="",
encoding=None,
verb=True,
return_pfe=False,
):
""" Save the basic geometrical attributes only (polygon and pos/extent)
The attributes are saved to a txt file with chosen encoding
Usefu for easily sharing input with non-python users
BEWARE: doesn't save all attributes !!!
Only saves the basic geometrical inputs !!!
Not equivalent to full tofu save (using self.save()) !!!
The saving convention is:
* data is saved on 2 columns
* The first line gives 2 numbers: nP, no
- nP = Number of points in the polygon
(i.e.: the number of following lines describing the polygon)
- no = Number of occurences (toroidal if in toroidal geometry)
(i.e.: the nb. of pos/extent lines after the first nP lines)
* Hence, the data is a 2D array of shape (1 + nP + no, 2)
* The two columns of the nP lines describing the polygon represent:
- 1st: R (resp. Y) coordinate of polygon points
- 2nd: Z (resp. Z) coordinate of polygon points
* The two columns of the no lines representing the occurences are:
- 1st: pos, the tor. angle (resp. X) center of occurences
- 2nd: extent, the tor. angle (resp. X) extension of occurences
Hence, the polygon and pos/extent of the object can be retrieved with:
>>> import numpy as np
>>> out = np.loadtxt(filename)
>>> nP, no = out[0,:]
>>> poly = out[1:1+nP,:]
>>> pos, extent = out[1+nP:,0], out[1+nP:,1]
All parameters apart from path, name and include are fed to
numpy.savetxt()
Parameters
----------
path: None / str
The path where to save the file
If None -> self.Id.SavePath
name: None / str
The name to use for the saved file
If None -> self.Id.SaveName(include)
include: list
List of attributes of to be used to built the default saving name
Fed to tf.utils.ID.generate_SaveName()
Recommended: ['Mod','Cls','Exp','Name']
"""
# Check inputs
if name is None:
name = self.Id.generate_SaveName(include)
if path is None:
path = self.Id.SavePath
path = os.path.abspath(path)
if fmt is None:
fmt = 'txt'
lfmtok = ['txt', 'csv']
if fmt not in lfmtok:
msg = ("The only accpedted formats are: {}".format(lfmtok))
raise Exception(msg)
pfe = os.path.join(path, '{}.{}'.format(name, fmt))
if delimiter is None:
if fmt == 'txt':
delimiter = ' '
else:
delimiter = ', '
nPno = np.r_[self.Poly.shape[1], self.noccur]
poly = self.Poly.T
posext = np.vstack((self.pos, self.extent)).T
out = np.vstack((nPno, poly, posext))
# default standards
newline = "\n"
comments = _COMMENT
header = " Cls = {}\n Exp = {}\n Name = {}".format(
self.__class__.__name__,
self.Id.Exp,
self.Id.Name,
)
kwds = dict(
fmt=fmt_num,
delimiter=delimiter,
newline=newline,
header=header,
footer=footer,
comments=comments,
)
if "encoding" in inspect.signature(np.savetxt).parameters:
kwds["encoding"] = encoding
np.savetxt(pfe, out, **kwds)
if verb:
print("save_to_txt in:\n", pfe)
if return_pfe:
return pfe
@classmethod
def from_txt(
cls,
pfe,
returnas='object',
Exp=None,
Name=None,
shot=None,
Type=None,
color=None,
SavePath=os.path.abspath("./"),
delimiter=None,
comments=None,
warn=None,
):
""" Return the polygon and pos/extent stored in a .txt or .csv file
The file must have been generated by method save_to_txt() (resp. csv)
All arguments appart from pfe and returnas are:
- fed to the relevant tofu.geom.Struct subclass to instanciate it
- used only if returnas = 'object'
Parameters
----------
pfe: str
Unique string containing the path and file name to be read
The file must be formatted as if generated by self.save_to_txt():
- Must contain a (N,2) array
- Line 0 must contain 2 integers:
- npts : the nb. of points of the polygon
- noccur : the nb. of occurences (=0 if axisymmetric)
- Hence the number of lines hould be N = npts + noccur + 1
- Lines 1:npts+1 contain the polygon points
- Lines npts+1: contain positions and extent of each occurence
returnas: str
Flag indicating whether to return:
- 'dict' : a dictionnary of np.ndarrays
- 'object': a tofu.geom.Struct subclass, using the other kwdargs
warn: None / bool
Whether to raise a warning if the formatting of the file is
suspicious
Return
------
obj: tf.geom.Struct sublass instance / dict
Depending on the value of returnas, obj can be:
- An instance of the relevant tofu.geom.Struct subclass
- A dict with keys 'poly', 'pos' and 'extent'
"""
# Check inputs
if returnas not in [object, 'object', dict, 'dict']:
msg = ("Arg returnas must be either:"
+ "\t- 'object': return {} instance\n".format(cls.__name__)
+ "\t- 'dict' : return a dict with polygon, pos and extent")
raise Exception(msg)
if pfe[-4:] not in ['.txt', '.csv']:
msg = ("Only accepts .txt and .csv files (fed to np.loadtxt) !\n"
+ "\t file: {}".format(pfe))
raise Exception(msg)
if warn is None:
warn = True
if delimiter is None:
if pfe.endswith('.csv'):
delimiter = ', '
else:
delimiter = None
if comments is None:
comments = _COMMENT
# Extract polygon from file and check
oo = np.loadtxt(pfe, delimiter=delimiter, comments=comments)
if not (oo.ndim == 2 and oo.shape[1] == 2):
msg = ("The file should contain a (N,2) array !\n"
+ " \t file : {}\n".format(pfe)
+ "\t shape: {0}".format(oo.shape))
raise Exception(msg)
c0 = oo[0, 0] == int(oo[0, 0]) and oo[0, 1] == int(oo[0, 1])
if not c0:
# assume noccur = 0
npts, noccur = oo.shape[0], 0
poly = oo
else:
c1 = oo.shape == (oo[0, 0] + oo[0, 1] + 1, 2)
if c1 is True:
npts, noccur = int(oo[0, 0]), int(oo[0, 1])
poly = oo[1:1 + npts, :]
else:
npts, noccur = oo.shape[0], 0
poly = oo
if warn is True:
sha = (oo[0, 0] + oo[0, 1] + 1, 2)
shastr = '({0} + {1} + 1, 2)'.format(oo[0, 0], oo[0, 1])
msg = ("The shape of the array is not as expected!\n"
+ "\tfile: {}\n".format(pfe)
+ "\tExpected shape: {0} = {1}".format(sha, shastr)
+ "\tObserved shape: {0}".format(oo.shape))
warnings.warn(msg)
if noccur > 0:
pos, extent = oo[1 + npts:, 0], oo[1 + npts:, 1]
else:
pos, extent = None, None
# Try reading Exp and Name if not provided
lc = [ss for ss, vv in [('Exp', Exp), ('Name', Name)] if vv is None]
if len(lc) > 0:
dparam = utils.from_txt_extract_params(pfe, lc)
if 'Exp' in lc:
Exp = dparam['Exp']
if 'Name' in lc:
Name = dparam['Name']
# Return
if returnas in [dict, 'dict']:
return {'Name': Name, 'Exp': Exp, 'Cls': cls,
"poly": poly, "pos": pos, "extent": extent}
else:
SavePath = os.path.abspath(SavePath)
obj = cls(
Name=Name,
Exp=Exp,
shot=shot,
Type=Type,
Poly=poly,
pos=pos,
extent=extent,
SavePath=SavePath,
color=color,
)
return obj
def save_to_imas(
self,
shot=None,
run=None,
refshot=None,
refrun=None,
occ=None,
user=None,
database=None,
version=None,
dryrun=False,
verb=True,
description_2d=None,
unit=0,
):
import tofu.imas2tofu as _tfimas
_tfimas._save_to_imas(
self,
tfversion=__version__,
shot=shot,
run=run,
refshot=refshot,
refrun=refrun,
user=user,
database=database,
version=version,
dryrun=dryrun,
verb=verb,
description_2d=description_2d,
unit=unit,
)
"""
###############################################################################
###############################################################################
Effective Struct subclasses
###############################################################################
"""
class StructIn(Struct):
_color = "k"
_InOut = "in"
@classmethod
def _set_color_ddef(cls, color):
# super
color = mpl.colors.to_rgba(color)
cls._ddef["dmisc"]["color"] = color
cls._dplot["cross"]["dP"]["color"] = cls._ddef["dmisc"]["color"]
cls._dplot["hor"]["dP"]["color"] = cls._ddef["dmisc"]["color"]
cls._dplot["3d"]["dP"]["color"] = cls._ddef["dmisc"]["color"]
@staticmethod
def _checkformat_inputs_dgeom(
Poly=None,
Lim=None,
pos=None,
extent=None,
Type=None,
Clock=False,
arrayorder=None,
):
kwdargs = locals()
# super
out = Struct._checkformat_inputs_dgeom(**kwdargs)
Poly, pos, extent, Type, arrayorder = out
if Type == "Tor":
msg = "StructIn subclasses cannot have noccur>0 if Type='Tor'!"
assert pos.size == 0, msg
return out
class StructOut(Struct):
_color = (0.8, 0.8, 0.8, 0.8)
_InOut = "out"
@classmethod
def _set_color_ddef(cls, color):
color = mpl.colors.to_rgba(color)
cls._ddef["dmisc"]["color"] = color
cls._dplot["cross"]["dP"] = {"fc": color, "ec": "k", "linewidth": 1}
cls._dplot["hor"]["dP"] = {"fc": color, "ec": "none"}
cls._dplot["3d"]["dP"]["color"] = color
def _set_color(self, color=None):
color = self._checkformat_inputs_dmisc(color=color)
self._dmisc["color"] = color
self._dplot["cross"]["dP"]["fc"] = color
self._dplot["hor"]["dP"]["fc"] = color
self._dplot["3d"]["dP"]["color"] = color
def get_sampleV(self, *args, **kwdargs):
msg = "StructOut subclasses cannot use get_sampleV()!"
raise Exception(msg)
class PlasmaDomain(StructIn):
_color = (0.8, 0.8, 0.8, 1.0)
class Ves(StructIn):
_color = "k"
class PFC(StructOut):
_color = (0.8, 0.8, 0.8, 0.8)
class CoilPF(StructOut):
_color = "r"
def __init__(
self,
nturns=None,
superconducting=None,
active=None,
temperature_nominal=None,
mag_field_max=None,
current_lim_max=None,
**kwdargs
):
# super()
super(CoilPF, self).__init__(
nturns=nturns,
superconducting=superconducting,
active=active,
**kwdargs,
)
def _reset(self):
# super()
super(CoilPF, self)._reset()
self._dmag = dict.fromkeys(self._get_keys_dmag())
self._dmag["nI"] = 0
###########
# Get largs
###########
@staticmethod
def _get_largs_dmag():
largs = ["nturns", "superconducting", "active"]
return largs
###########
# Get check and format inputs
###########
@classmethod
def _checkformat_inputs_dmag(
cls,
nturns=None,
superconducting=None,
temperature_nominal=None,
mag_field_max=None,
current_lim_max=None,
active=None,
):
dins = {
"nturns": {"var": nturns, "NoneOrFloatPos": None},
"superconducting": {"var": superconducting, "NoneOrCls": bool},
"active": {"var": active, "NoneOrCls": bool},
"temperature_nominal": {"var": temperature_nominal,
"NoneOrFloatPos": None},
"mag_field_max": {"var": mag_field_max,
"NoneOrFloatPos": None},
"current_lim_max": {"var": current_lim_max,
"NoneOrFloatPos": None},
}
dins, err, msg = cls._check_InputsGeneric(dins, tab=0)
if err:
raise Exception(msg)
return [dins[dd]['var']
for dd in ['nturns', 'superconducting', 'active']]
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_dmag():
lk = ["nturns", "superconducting", "active", "current", "nI"]
return lk
###########
# _init
###########
def _init(self, nturns=None, superconducting=None, active=None, **kwdargs):
super(CoilPF, self)._init(**kwdargs)
self.set_dmag(
nturns=nturns, superconducting=superconducting, active=active
)
###########
# set dictionaries
###########
def set_dmag(self, superconducting=None, nturns=None, active=None):
out = self._checkformat_inputs_dmag(
nturns=nturns, active=active, superconducting=superconducting
)
self._dmag.update(
{
"nturns": out[0],
"superconducting": out[1],
"active": out[2],
}
)
###########
# strip dictionaries
###########
def _strip_dmag(self, lkeep=["nturns", "superconducting", "active"]):
utils.ToFuObject._strip_dict(self._dmag, lkeep=lkeep)
self._dmag["nI"] = 0
###########
# rebuild dictionaries
###########
def _rebuild_dmag(self, lkeep=["nturns", "superconducting", "active"]):
self.set_dmag(
nturns=self.nturns,
active=self._dmag["active"],
superconducting=self._dmag["superconducting"],
)
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip["allowed"] = [0, 1, 2]
nMax = max(cls._dstrip["allowed"])
doc = """
1: Remove dsino and dmag expendables
2: Remove also dgeom, dphys and dmisc expendables"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc, nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0):
super(CoilPF, self).strip(strip=strip)
def _strip(self, strip=0):
out = super(CoilPF, self)._strip(strip=strip)
if strip == 0:
self._rebuild_dmag()
else:
self._strip_dmag()
return out
def _to_dict(self):
dout = super(CoilPF, self)._to_dict()
dout.update({"dmag": {"dict": self.dmag, "lexcept": None}})
return dout
def _from_dict(self, fd):
super(CoilPF, self)._from_dict(fd)
self._dmag.update(**fd["dmag"])
###########
# Properties
###########
@property
def dmag(self):
return self._dmag
@property
def nturns(self):
return self._dmag["nturns"]
@property
def current(self):
return self._dmag["current"]
###########
# public methods
###########
def get_summary(
self,
sep=" ",
line="-",
just="l",
table_sep=None,
verb=True,
return_=False,
):
""" Summary description of the object content """
# -----------------------
# Build detailed view
col0 = [
"class",
"Name",
"SaveName",
"nP",
"noccur",
"nturns",
"active",
"superconducting",
]
ar0 = [
self._Id.Cls,
self._Id.Name,
self._Id.SaveName,
str(self._dgeom["nP"]),
str(self._dgeom["noccur"]),
str(self._dmag['nturns']),
str(self._dmag['active']),
str(self._dmag['superconducting']),
]
if self._dgeom["move"] is not None:
col0 += ['move', 'param']
ar0 += [self._dgeom["move"],
str(round(self._dgeom["move_param"], ndigits=4))]
col0.append('color')
cstr = ('('
+ ', '.join(['{:4.2}'.format(cc)
for cc in self._dmisc["color"]])
+ ')')
ar0.append(cstr)
return self._get_summary(
[ar0],
[col0],
sep=sep,
line=line,
table_sep=table_sep,
verb=verb,
return_=return_,
)
def set_current(self, current=None):
""" Set the current circulating on the coil (A) """
C0 = current is None
C1 = type(current) in [int, float, np.int64, np.float64]
C2 = type(current) in [list, tuple, np.ndarray]
msg = "Arg current must be None, a float or an 1D np.ndarray !"
assert C0 or C1 or C2, msg
if C1:
current = np.array([current], dtype=float)
elif C2:
current = np.asarray(current, dtype=float).ravel()
self._dmag["current"] = current
if C0:
self._dmag["nI"] = 0
else:
self._dmag["nI"] = current.size
class CoilCS(CoilPF):
pass
"""
###############################################################################
###############################################################################
Overall Config object
###############################################################################
"""
class Config(utils.ToFuObject):
# Special dict subclass with attr-like value access
# Fixed (class-wise) dictionary of default properties
_ddef = {'Id': {'shot': 0, 'Type': 'Tor', 'Exp': 'Dummy',
'include': ['Mod', 'Cls', 'Exp',
'Name', 'shot', 'version']},
'dStruct': {'order': ['PlasmaDomain',
'Ves',
'PFC',
'CoilPF',
'CoilCS'],
'dextraprop': {'visible': True}}}
_lclsstr = ['PlasmaDomain', 'Ves', 'PFC', 'CoilPF', 'CoilCS']
def __init__(self, lStruct=None, Lim=None, dextraprop=None,
Id=None, Name=None, Exp=None, shot=None, Type=None,
SavePath=os.path.abspath('./'),
SavePath_Include=tfpf.defInclude,
fromdict=None, sep=None):
kwdargs = locals()
del kwdargs["self"]
super(Config, self).__init__(**kwdargs)
def _reset(self):
super(Config, self)._reset()
self._dStruct = dict.fromkeys(self._get_keys_dStruct())
self._dextraprop = dict.fromkeys(self._get_keys_dextraprop())
self._dsino = dict.fromkeys(self._get_keys_dsino())
@classmethod
def _checkformat_inputs_Id(
cls,
Id=None,
Name=None,
Type=None,
Exp=None,
shot=None,
include=None,
**kwdargs
):
if Id is not None:
assert isinstance(Id, utils.ID)
Name, shot = Id.Name, Id.shot
if Type is None:
Type = cls._ddef["Id"]["Type"]
if Exp is None:
Exp = cls._ddef["Id"]["Exp"]
if shot is None:
shot = cls._ddef["Id"]["shot"]
if include is None:
include = cls._ddef["Id"]["include"]
dins = {
"Name": {"var": Name, "cls": str},
"Type": {"var": Type, "in": ["Tor", "Lin"]},
"Exp": {"var": Exp, "cls": str},
"shot": {"var": shot, "cls": int},
"include": {"var": include, "listof": str},
}
dins, err, msg = cls._check_InputsGeneric(dins, tab=0)
if err:
raise Exception(msg)
kwdargs.update(
{
"Name": Name,
"Type": Type,
"Exp": Exp,
"include": include,
"shot": shot,
}
)
return kwdargs
###########
# Get largs
###########
@staticmethod
def _get_largs_dStruct():
largs = ["lStruct", "Lim"]
return largs
@staticmethod
def _get_largs_dextraprop():
largs = ["dextraprop"]
return largs
@staticmethod
def _get_largs_dsino():
largs = ["RefPt", "nP"]
return largs
###########
# Get check and format inputs
###########
def _checkformat_inputs_Struct(self, struct, err=True):
msgi = None
c0 = issubclass(struct.__class__, Struct)
if not c0:
msgi = "\n\t- Not a struct subclass: {}".format(type(struct))
else:
c1 = struct.Id.Exp == self.Id.Exp
c2 = struct.Id.Type == self.Id.Type
c3 = struct.Id.Name.isidentifier()
c4 = c3 and '_' not in struct.Id.Name
if not (c0 and c1 and c2 and c3):
c1 = struct.Id.Exp == self.Id.Exp
c2 = struct.Id.Type == self.Id.Type
c3 = struct.Id.Name.isidentifier()
c4 = c3 and '_' not in struct.Id.Name
msgi = "\n\t- {0} :".format(struct.Id.SaveName)
if not c1:
msgi += "\n\tExp: {0}".format(struct.Id.Exp)
if not c2:
msgi += "\n\tType: {0}".format(struct.Id.Type)
if not c3:
msgi += "\n\tName: {0}".format(struct.Id.Name)
if msgi is not None and err is True:
msg = "Non-conform struct:" + msgi
raise Exception(msg)
return msgi
@staticmethod
def _errmsg_dStruct(lStruct):
ls = ["tf.geom.{}".format(ss)
for ss in ["PlasmaDomain", "Ves", "PFC", "CoilPF", "CoilCS"]]
msg = ("Arg lStruct must be "
+ "a tofu.geom.Struct subclass or list of such!\n"
+ "Valid subclasses include:\n\t- "
+ "\n\t- ".join(ls)
+ "\nYou provided: {}".format(type(lStruct)))
return msg
def _checkformat_inputs_dStruct(self, lStruct=None, Lim=None):
c0 = lStruct is not None
c1 = ((isinstance(lStruct, list) or isinstance(lStruct, tuple))
and all([issubclass(ss.__class__, Struct) for ss in lStruct]))
c2 = issubclass(lStruct.__class__, Struct)
if not (c0 and (c1 or c2)):
raise Exception(self._errmsg_dStruct(lStruct))
if c1 and isinstance(lStruct, tuple):
lStruct = list(lStruct)
elif c2:
lStruct = [lStruct]
msg = ""
for ss in lStruct:
msgi = self._checkformat_inputs_Struct(ss, err=False)
if msgi is not None:
msg += msgi
if msg != "":
msg = "The following objects have non-confrom Id:" + msg
msg += "\n => Expected values are:"
msg += "\n Exp: {0}".format(self.Id.Exp)
msg += "\n Type: {0}".format(self.Id.Type)
msg += "\n Name: a valid identifier, without '_'"
msg += " (check str.isidentifier())"
raise Exception(msg)
if Lim is None:
if not self.Id.Type == "Tor":
msg = "Issue with tf.geom.Config {0}:".format(self.Id.Name)
msg += "\n If input Lim is None, Type should be 'Tor':"
msg += "\n Type = {0}".format(self.Id.Type)
msg += "\n Lim = {0}".format(str(Lim))
raise Exception(msg)
nLim = 0
else:
if not self.Id.Type == "Lin":
msg = "Issue with tf.geom.Config {0}:".format(self.Id.Name)
msg = " If input Lim!=None, Type should be 'Lin':"
msg += "\n Type = {0}".format(self.Id.Type)
msg += "\n Lim = {0}".format(str(Lim))
raise Exception(msg)
Lim = np.asarray(Lim, dtype=float).ravel()
assert Lim.size == 2 and Lim[0] < Lim[1]
Lim = Lim.reshape((1, 2))
nLim = 1
return lStruct, Lim, nLim
def _checkformat_inputs_extraval(
self, extraval, key="", multi=True, size=None
):
lsimple = [bool, float, int, np.int_, np.float_]
C0 = type(extraval) in lsimple
C1 = isinstance(extraval, np.ndarray)
C2 = isinstance(extraval, dict)
if multi:
assert C0 or C1 or C2, str(type(extraval))
else:
assert C0, str(type(extraval))
if multi and C1:
size = self._dStruct["nObj"] if size is None else size
C = extraval.shape == ((self._dStruct["nObj"],))
if not C:
msg = "The value for %s has wrong shape!" % key
msg += "\n Expected: ({0},)".format(self._dStruct["nObj"])
msg += "\n Got: {0}".format(str(extraval.shape))
raise Exception(msg)
C = np.ndarray
elif multi and C2:
msg0 = "If an extra attribute is provided as a dict,"
msg0 += " it should have the same structure as self.dStruct !"
lk = sorted(self._dStruct["lCls"])
# removing empty dict first
extraval = {k0: v0 for k0, v0 in extraval.items() if len(v0) > 0}
c = lk == sorted(extraval.keys())
if not c:
msg = "\nThe value for %s has wrong keys !" % key
msg += "\n expected : " + str(lk)
msg += "\n received : " + str(sorted(extraval.keys()))
raise Exception(msg0 + msg)
c = [isinstance(extraval[k], dict) for k in lk]
if not all(c):
msg = (
"\nThe value for %s shall be a dict of nested dict !" % key
)
msg += "\n "
msg += "\n ".join(
[
"{0} : {1}".format(lk[ii], c[ii])
for ii in range(0, len(lk))
]
)
raise Exception(msg0 + msg)
c = [
(k, sorted(v.keys()), sorted(self.dStruct["dObj"][k].keys()))
for k, v in extraval.items()
]
if not all([cc[1] == cc[2] for cc in c]):
lc = [
(cc[0], str(cc[1]), str(cc[2]))
for cc in c
if cc[1] != cc[2]
]
msg = "\nThe value for %s has wrong nested dict !" % key
msg += "\n - " + "\n - ".join(
[" ".join(cc) for cc in lc]
)
raise Exception(msg0 + msg)
for k in lk:
for kk, v in extraval[k].items():
if not type(v) in lsimple:
msg = "\n type(%s[%s][%s])" % (key, k, kk)
msg += " = %s" % str(type(v))
msg += " should be in %s" % str(lsimple)
raise Exception(msg)
C = dict
elif C0:
C = int
return C
def _checkformat_inputs_dextraprop(self, dextraprop=None):
if dextraprop is None:
dextraprop = self._ddef["dStruct"]["dextraprop"]
if dextraprop is None:
dextraprop = {}
assert isinstance(dextraprop, dict)
dC = {}
for k in dextraprop.keys():
dC[k] = self._checkformat_inputs_extraval(dextraprop[k], key=k)
return dextraprop, dC
def _checkformat_inputs_dsino(self, RefPt=None, nP=None):
assert type(nP) is int and nP > 0
assert hasattr(RefPt, "__iter__")
RefPt = np.asarray(RefPt, dtype=float).flatten()
assert RefPt.size == 2, "RefPt must be of size=2 !"
return RefPt
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_dStruct():
lk = ["dObj", "Lim", "nLim", "nObj", "lorder", "lCls"]
return lk
@staticmethod
def _get_keys_dextraprop():
lk = ["lprop"]
return lk
@staticmethod
def _get_keys_dsino():
lk = ["RefPt", "nP"]
return lk
###########
# _init
###########
def _init(self, lStruct=None, Lim=None, dextraprop=None, **kwdargs):
largs = self._get_largs_dStruct()
kwdStruct = self._extract_kwdargs(locals(), largs)
largs = self._get_largs_dextraprop()
kwdextraprop = self._extract_kwdargs(locals(), largs)
self._set_dStruct(**kwdStruct)
self._set_dextraprop(**kwdextraprop)
self._dynamicattr()
self._dstrip["strip"] = 0
###########
# set dictionaries
###########
def _set_dStruct(self, lStruct=None, Lim=None):
lStruct, Lim, nLim = self._checkformat_inputs_dStruct(
lStruct=lStruct, Lim=Lim
)
self._dStruct.update({"Lim": Lim, "nLim": nLim})
self._set_dlObj(lStruct, din=self._dStruct)
def _set_dextraprop(self, dextraprop=None):
dextraprop, dC = self._checkformat_inputs_dextraprop(dextraprop)
self._dextraprop["lprop"] = sorted(list(dextraprop.keys()))
# Init dict
lCls = self._dStruct["lCls"]
for pp in dextraprop.keys():
dp = "d" + pp
dd = dict.fromkeys(lCls, {})
for k in lCls:
dd[k] = dict.fromkeys(self._dStruct["dObj"][k].keys())
self._dextraprop.update({dp: dd})
# Populate
for pp in dextraprop.keys():
self._set_extraprop(pp, dextraprop[pp])
def add_extraprop(self, key, val):
assert type(key) is str
d, dC = self._checkformat_inputs_dextraprop({key: val})
self._dextraprop["lprop"] = sorted(
set(self.dextraprop["lprop"] + [key])
)
# Init dict
lCls = self._dStruct["lCls"]
dp = "d" + key
dd = dict.fromkeys(lCls, {})
for k in lCls:
dd[k] = dict.fromkeys(self._dStruct["dObj"][k].keys())
self._dextraprop.update({dp: dd})
# Populate
self._set_extraprop(key, val)
self._dynamicattr()
def _set_extraprop(self, pp, val, k0=None, k1=None):
assert not (k0 is None and k1 is not None)
dp = "d" + pp
if k0 is None and k1 is None:
C = self._checkformat_inputs_extraval(val, pp)
if C is int:
for k0 in self._dStruct["dObj"].keys():
for k1 in self._dextraprop[dp][k0].keys():
self._dextraprop[dp][k0][k1] = val
elif C is np.ndarray:
ii = 0
for k in self._dStruct["lorder"]:
k0, k1 = k.split("_")
self._dextraprop[dp][k0][k1] = val[ii]
ii += 1
else:
for k0 in self._dStruct["dObj"].keys():
if k0 in self._dextraprop[dp].keys():
for k1 in self._dextraprop[dp][k0].keys():
self._dextraprop[dp][k0][k1] = val[k0][k1]
elif k1 is None:
size = len(self._dextraprop[dp][k0].keys())
C = self._checkformat_inputs_extraval(val, pp, size=size)
assert C in [int, np.ndarray]
if C is int:
for k1 in self._dextraprop[dp][k0].keys():
self._dextraprop[dp][k0][k1] = val
elif C is np.ndarray:
ii = 0
for k in self._dStruct["lorder"]:
kk, k1 = k.split("_")
if k0 == kk:
self._dextraprop[dp][k0][k1] = val[ii]
ii += 1
else:
C = self._checkformat_inputs_extraval(val, pp, multi=False)
assert C is int
self._dextraprop[dp][k0][k1] = val
def _get_extraprop(self, pp, k0=None, k1=None):
assert not (k0 is None and k1 is not None)
dp = "d" + pp
if k0 is None and k1 is None:
k0, k1 = self._dStruct["lorder"][0].split('_')
val = np.zeros((self._dStruct["nObj"],),
dtype=type(self._dextraprop[dp][k0][k1]))
ii = 0
for k in self._dStruct["lorder"]:
k0, k1 = k.split("_")
val[ii] = self._dextraprop[dp][k0][k1]
ii += 1
elif k1 is None:
k1 = list(self._dStruct["dObj"][k0].keys())[0]
val = np.zeros((len(self._dStruct["dObj"][k0].keys()),),
dtype=type(self._dextraprop[dp][k0][k1]))
ii = 0
for k in self._dStruct["lorder"]:
k, k1 = k.split("_")
if k0 == k:
val[ii] = self._dextraprop[dp][k0][k1]
ii += 1
else:
val = self._dextraprop[dp][k0][k1]
return val
def _set_color(self, k0, val):
for k1 in self._dStruct["dObj"][k0].keys():
self._dStruct["dObj"][k0][k1].set_color(val)
def _dynamicattr(self):
# get (key, val) pairs
# Purge
for k in self._ddef['dStruct']['order']:
if hasattr(self, k):
delattr(self, k)
# exec("del self.{0}".format(k))
# Set
for k in self._dStruct["dObj"].keys():
if len(self._dStruct["dObj"][k]) == 0:
continue
# Find a way to programmatically add dynamic properties to the
# instances , like visible
# In the meantime use a simple functions
lset = ["set_%s" % pp for pp in self._dextraprop["lprop"]]
lget = ["get_%s" % pp for pp in self._dextraprop["lprop"]]
if not type(list(self._dStruct["dObj"][k].values())[0]) is str:
for kk in self._dStruct["dObj"][k].keys():
for pp in self._dextraprop["lprop"]:
setattr(
self._dStruct["dObj"][k][kk],
"set_%s" % pp,
lambda val, pk=pp, k0=k, k1=kk: (
self._set_extraprop(pk, val, k0, k1)
),
)
setattr(
self._dStruct["dObj"][k][kk],
"get_%s" % pp,
lambda pk=pp, k0=k, k1=kk: self._get_extraprop(
pk, k0, k1
),
)
dd = utils.Dictattr(
["set_color"] + lset + lget, self._dStruct["dObj"][k]
)
for pp in self._dextraprop["lprop"]:
setattr(
dd,
"set_%s" % pp,
lambda val, pk=pp, k0=k: self._set_extraprop(
pk, val, k0
),
)
setattr(
dd,
"get_%s" % pp,
lambda pk=pp, k0=k: self._get_extraprop(pk, k0),
)
setattr(
dd, "set_color", lambda col, k0=k: self._set_color(k0, col)
)
setattr(self, k, dd)
for pp in self._dextraprop["lprop"]:
setattr(
self,
"set_%s" % pp,
lambda val, pk=pp: self._set_extraprop(pk, val),
)
setattr(self, "get_%s" % pp, lambda pk=pp: self._get_extraprop(pk))
def set_dsino(self, RefPt, nP=_def.TorNP):
RefPt = self._checkformat_inputs_dsino(RefPt=RefPt, nP=nP)
for k in self._dStruct["dObj"].keys():
for kk in self._dStruct["dObj"][k].keys():
self._dStruct["dObj"][k][kk].set_dsino(RefPt=RefPt, nP=nP)
self._dsino = {"RefPt": RefPt, "nP": nP}
###########
# strip dictionaries
###########
def _strip_dStruct(self, strip=0, force=False, verb=True):
if self._dstrip["strip"] == strip:
return
if self._dstrip["strip"] > strip:
# Reload if necessary
if self._dstrip["strip"] == 3:
for k in self._dStruct["dObj"].keys():
for kk in self._dStruct["dObj"][k].keys():
pfe = self._dStruct["dObj"][k][kk]
try:
self._dStruct["dObj"][k][kk] = utils.load(
pfe, verb=verb
)
except Exception as err:
msg = str(err)
msg += "\n k = {0}".format(str(k))
msg += "\n kk = {0}".format(str(kk))
msg += "\n type(pfe) = {0}".format(
str(type(pfe))
)
msg += "\n self._dstrip['strip'] = {0}".format(
self._dstrip["strip"]
)
msg += "\n strip = {0}".format(strip)
raise Exception(msg)
for k in self._dStruct["dObj"].keys():
for kk in self._dStruct["dObj"][k].keys():
self._dStruct["dObj"][k][kk].strip(strip=strip)
lkeep = self._get_keys_dStruct()
reset = utils.ToFuObject._test_Rebuild(self._dStruct, lkeep=lkeep)
if reset:
utils.ToFuObject._check_Fields4Rebuild(
self._dStruct, lkeep=lkeep, dname="dStruct"
)
self._set_dStruct(lStruct=self.lStruct, Lim=self._dStruct["Lim"])
self._dynamicattr()
else:
if strip in [1, 2]:
for k in self._dStruct["lCls"]:
for kk, v in self._dStruct["dObj"][k].items():
self._dStruct["dObj"][k][kk].strip(strip=strip)
lkeep = self._get_keys_dStruct()
elif strip == 3:
for k in self._dStruct["lCls"]:
for kk, v in self._dStruct["dObj"][k].items():
path, name = v.Id.SavePath, v.Id.SaveName
# --- Check !
lf = os.listdir(path)
lf = [
ff
for ff in lf
if all([s in ff for s in [name, ".npz"]])
]
exist = len(lf) == 1
# ----------
pathfile = os.path.join(path, name) + ".npz"
if not exist:
msg = """BEWARE:
You are about to delete the Struct objects
Only the path/name to saved objects will be
kept
But it appears that the following object has no
saved file where specified (obj.Id.SavePath)
Thus it won't be possible to retrieve it
(unless available in the current console:"""
msg += "\n - {0}".format(pathfile)
if force:
warnings.warn(msg)
else:
raise Exception(msg)
self._dStruct["dObj"][k][kk] = pathfile
self._dynamicattr()
lkeep = self._get_keys_dStruct()
utils.ToFuObject._strip_dict(self._dStruct, lkeep=lkeep)
def _strip_dextraprop(self, strip=0):
lkeep = list(self._dextraprop.keys())
utils.ToFuObject._strip_dict(self._dextraprop, lkeep=lkeep)
def _strip_dsino(self, lkeep=["RefPt", "nP"]):
for k in self._dStruct["dObj"].keys():
for kk in self._dStruct["dObj"][k].keys():
self._dStruct["dObj"][k][kk]._strip_dsino(lkeep=lkeep)
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip["allowed"] = [0, 1, 2, 3]
nMax = max(cls._dstrip["allowed"])
doc = """
1: apply strip(1) to objects in self.lStruct
2: apply strip(2) to objects in self.lStruct
3: replace objects in self.lStruct by SavePath + SaveName"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc, nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0, force=False, verb=True):
# super()
super(Config, self).strip(strip=strip, force=force, verb=verb)
def _strip(self, strip=0, force=False, verb=True):
self._strip_dStruct(strip=strip, force=force, verb=verb)
# self._strip_dextraprop()
# self._strip_dsino()
def _to_dict(self):
dout = {
"dStruct": {"dict": self.dStruct, "lexcept": None},
"dextraprop": {"dict": self._dextraprop, "lexcept": None},
"dsino": {"dict": self.dsino, "lexcept": None},
}
return dout
@classmethod
def _checkformat_fromdict_dStruct(cls, dStruct):
if dStruct["lorder"] is None:
return None
for clsn in dStruct["lorder"]:
c, n = clsn.split("_")
if type(dStruct["dObj"][c][n]) is dict:
dStruct["dObj"][c][n] = eval(c).__call__(
fromdict=dStruct["dObj"][c][n]
)
lC = [
issubclass(dStruct["dObj"][c][n].__class__, Struct),
type(dStruct["dObj"][c][n]) is str,
]
assert any(lC)
def _from_dict(self, fd):
self._checkformat_fromdict_dStruct(fd["dStruct"])
self._dStruct.update(**fd["dStruct"])
self._dextraprop.update(**fd["dextraprop"])
self._dsino.update(**fd["dsino"])
self._dynamicattr()
###########
# SOLEDGE3X
###########
@staticmethod
def _from_SOLEDGE_extract_dict(pfe=None):
# Check input
c0 = (isinstance(pfe, str)
and os.path.isfile(pfe)
and pfe[-4:] == '.mat')
if not c0:
msg = ("Arg pfe must be a valid .mat file!\n"
+ "\t- provided: {}".format(pfe))
raise Exception(msg)
pfe = os.path.abspath(pfe)
# Open file
import scipy.io as scpio
dout = scpio.loadmat(pfe)
# Check conformity of content
lk = ['Nwalls', 'coord', 'type']
lk0 = [kk for kk, vv in dout.items()
if kk == 'walls' and isinstance(vv, np.ndarray)]
c0 = (len(lk0) == 1
and len(dout['walls']) == 1
and sorted(dout['walls'][0].dtype.names) == lk
and len(dout['walls'][0][0]) == len(lk))
if not c0:
msg = ("Non-conform .mat file content from SOLEDGE3X:\n"
+ "\t- file: {}\n".format(pfe)
+ "\t- Expected:\n"
+ "\t\t- a unique matlab structure 'walls' with 3 fields\n"
+ "\t\t\t- Nwalls: int\n"
+ "\t\t\t- coord: 1xn struct\n"
+ "\t\t\t- type: 1xn double\n"
+ "Provided:\n"
+ "\t- variables: {}\n".format(lk0)
+ "\t- 1x{} struct with {} fields".format(
len(dout[lk0[0]]),
len(dout[lk0[0]][0].dtype)))
raise Exception(msg)
out = dout['walls'][0][0]
# Get inside fields 'type', 'Nwalls', 'coord'
di0 = {kk: dout['walls'][0].dtype.names.index(kk) for kk in lk}
dout = {'type': out[di0['type']].ravel(),
'Nwalls': out[di0['Nwalls']][0, 0]}
out = out[di0['coord']][0]
c0 = (sorted(out.dtype.names) == ['Rwall', 'Zwall']
and len(out) == dout['type'].size
and all([len(oo) == 2 for oo in out]))
if not c0:
msg = ("Field {} not conform:\n".format('coord')
+ "\t- expected: 1x{} struct ".format(dout['type'].size)
+ "with fields ('Rwall', 'Zwall')\n"
+ "\t- provided: 1x{} struct ".format(len(out))
+ "with fields {}".format(out.dtype.names))
raise Exception(msg)
dout['coord'] = [np.array([out[ii][0].ravel(), out[ii][1].ravel()])
for ii in range(dout['type'].size)]
return dout
@classmethod
def from_SOLEDGE3X(cls, pfe=None,
Name=None, Exp=None):
# Check input and extract dict from file
dout = cls._from_SOLEDGE_extract_dict(pfe)
npoly = len(dout['type'])
# Prepare lStruct
lcls = [Ves if dout['type'][ii] == 1 else PFC for ii in range(npoly)]
lnames = ['Soledge3X{:02.0f}'.format(ii) for ii in range(npoly)]
lS = [lcls[ii](Poly=dout['coord'][ii],
Type='Tor',
Name=lnames[ii],
pos=None,
Exp=Exp)
for ii in range(npoly)]
return cls(lStruct=lS, Exp=Exp, Name=Name)
def _to_SOLEDGE3X_get_data(self,
type_extraprop=None,
matlab_version=None, matlab_platform=None):
head = None
# Check inputs
if not (matlab_version is None or isinstance(matlab_version, str)):
msg = ("Arg matlab_version must be provided as a str!\n"
+ "\t- example: '5.0'\n"
+ "\t- provided: {}".format(matlab_version))
raise Exception(msg)
# useful ? to be deprecated ?
if matlab_platform is None:
out = os.popen('which matlab').read()
keypath = os.path.join('bin', 'matlab')
if keypath in out:
path = os.path.join(out[:out.index(keypath)], 'etc')
lf = [ff for ff in os.listdir(path)
if os.path.isdir(os.path.join(path, ff))]
if len(lf) == 1:
matlab_platform = lf[0].upper()
else:
msg = ("Couldn't get matlab_platform from 'which matlab'\n"
+ " => Please provide the matlab platform\n"
+ " Should be in {}/../etc".format(out))
warnings.warn(msg)
if not (matlab_platform is None or isinstance(matlab_platform, str)):
msg = ("Arg matlab_platform must be provided as a str!\n"
+ "\t- example: 'GLNXA64'\n"
+ "\t- provided: {}".format(matlab_platform))
raise Exception(msg)
if matlab_version is not None and matlab_platform is not None:
import datetime as dtm
now = dtm.datetime.now().strftime('%a %b %d %H:%M:%S %Y')
head = ('MATLAB {} MAT-file, '.format(matlab_version)
+ 'Platform: {}, '.format(matlab_platform)
+ 'Created on: {}'.format(now))
# Build walls
nwall = np.array([[self.nStruct]], dtype=int)
# typ (from extraprop if any, else from Ves / Struct)
if type_extraprop is not None:
typ = np.array([self._get_extraprop(type_extraprop)], dtype=int)
else:
typ = np.array([[1 if ss._InOut == 'in' else -1
for ss in self.lStruct]], dtype=int)
# Get coord
coord = np.array([np.array([
(ss.Poly[0:1, :].T, ss.Poly[1:2, :].T) for ss in self.lStruct],
dtype=[('Rwall', 'O'), ('Zwall', 'O')])],
dtype=[('Rwall', 'O'), ('Zwall', 'O')])
# put together
dout = {'walls': np.array([[
(nwall, coord, typ)]],
dtype=[('Nwalls', 'O'), ('coord', 'O'), ('type', 'O')])}
# Optinally set header and version
if head is not None:
dout['__header__'] = head.encode()
return dout
def to_SOLEDGE3X(self, name=None, path=None, verb=None,
type_extraprop=None,
matlab_version=None, matlab_platform=None):
# Check inputs
if verb is None:
verb = True
if name is None:
name = self.Id.SaveName
if not isinstance(name, str):
msg = ("Arg name must be a str!\n"
+ "\t- provided: {}".format(name))
raise Exception(msg)
if name[-4:] != '.mat':
name = name + '.mat'
if path is None:
path = os.path.abspath('.')
if not os.path.isdir(path):
msg = ("Provided path is not a valid dir!\n"
+ "\t- path: {}".format(path))
raise Exception(msg)
path = os.path.abspath(path)
pfe = os.path.join(path, name)
# Get data in proper shape
dout = self._to_SOLEDGE3X_get_data(type_extraprop=type_extraprop,
matlab_version=matlab_version,
matlab_platform=matlab_platform)
# save
import scipy.io as scpio
scpio.savemat(pfe, dout)
if verb is True:
print("Saved in:\n\t{}".format(pfe))
###########
# Properties
###########
@property
def dStruct(self):
return self._dStruct
@property
def nStruct(self):
return self._dStruct["nObj"]
@property
def lStruct(self):
""" Return the list of Struct that was used for creation
As tofu objects or SavePath+SaveNames (according to strip status)
"""
lStruct = []
for k in self._dStruct["lorder"]:
k0, k1 = k.split("_")
lStruct.append(self._dStruct["dObj"][k0][k1])
return lStruct
@property
def lStructIn(self):
""" Return the list of StructIn contained in self.lStruct
As tofu objects or SavePath+SaveNames (according to strip status)
"""
lStruct = []
for k in self._dStruct["lorder"]:
k0, k1 = k.split("_")
if type(self._dStruct["dObj"][k0][k1]) is str:
if any(
[
ss in self._dStruct["dObj"][k0][k1]
for ss in ["Ves", "PlasmaDomain"]
]
):
lStruct.append(self._dStruct["dObj"][k0][k1])
elif issubclass(self._dStruct["dObj"][k0][k1].__class__, StructIn):
lStruct.append(self._dStruct["dObj"][k0][k1])
return lStruct
@property
def Lim(self):
return self._dStruct["Lim"]
@property
def nLim(self):
return self._dStruct["nLim"]
@property
def dextraprop(self):
return self._dextraprop
@property
def dsino(self):
return self._dsino
###########
# public methods
###########
def add_Struct(
self,
struct=None,
Cls=None,
Name=None,
Poly=None,
shot=None,
Lim=None,
Type=None,
dextraprop=None,
):
""" Add a Struct instance to the config
An already existing Struct subclass instance can be added
Or it will be created from the (Cls,Name,Poly,Lim) keyword args
"""
# Check inputs
C0a = struct is None
C1a = all([ss is None for ss in [Cls, Name, Poly, Lim, Type]])
if not np.sum([C0a, C1a]) == 1:
msg = "Provide either:"
msg += "\n - struct: a Struct subclass instance"
msg += "\n - the keyword args to create one"
msg += "\n (Cls,Name,Poly,Lim,Type)\n"
msg += "\n You provded:"
msg += "\n - struct: {0}, {1}".format(str(struct), type(struct))
raise Exception(msg)
# Create struct if not provided
if C0a:
if not (type(Cls) is str or issubclass(Cls, Struct)):
msg = "Cls must be either:"
msg += "\n - a Struct subclass"
msg += "\n - the str Name of it (e.g.: 'PFC','CoilPF',...)"
raise Exception(msg)
if type(Cls) is str:
Cls = eval("%s" % Cls)
# Preformat Lim and Type
if Lim is None:
Lim = self.Lim
if Type is None:
Type = self.Id.Type
# Create instance
struct = Cls(
Poly=Poly,
Name=Name,
Lim=Lim,
Type=Type,
shot=shot,
Exp=self.Id.Exp,
)
C0b = issubclass(struct.__class__, Struct)
assert C0b, "struct must be a Struct subclass instance !"
# Prepare dextraprop
dextra = self.dextraprop
lk = sorted([k[1:] for k in dextra.keys() if k != "lprop"])
if dextraprop is None:
if dextra not in [None, {}]:
msg = (
"The current Config instance has the following extraprop:"
)
msg += "\n - " + "\n - ".join(lk)
msg += "\n => Please specify a dextraprop for struct !"
msg += "\n (using the same keys !)"
raise Exception(msg)
else:
assert isinstance(dextraprop, dict)
assert all([k in lk for k in dextraprop.keys()])
assert all([k in dextraprop.keys() for k in lk])
dx = {}
for k in lk:
dk = "d" + k
dx[k] = {}
for k0 in dextra[dk].keys():
dx[k][k0] = {}
for k1 in dextra[dk][k0].keys():
dx[k][k0][k1] = dextra[dk][k0][k1]
if struct.Id.Cls not in dx[k].keys():
dx[k][struct.Id.Cls] = {struct.Id.Name: dextraprop[k]}
else:
dx[k][struct.Id.Cls][struct.Id.Name] = dextraprop[k]
# Set self.lStruct
lS = self.lStruct + [struct]
self._init(lStruct=lS, Lim=self.Lim, dextraprop=dx)
def remove_Struct(self, Cls=None, Name=None):
# Check inputs
assert type(Cls) is str
assert type(Name) is str
C0 = Cls in self._dStruct["lCls"]
if not C0:
msg = "The Cls must be a class existing in self.dStruct['lCls']:"
msg += "\n [{0}]".format(", ".join(self._dStruct["lCls"]))
raise Exception(msg)
C0 = Name in self._dStruct["dObj"][Cls].keys()
if not C0:
ln = self.dStruct["dObj"][Cls].keys()
msg = "The Name must match an instance in"
msg += " self.dStruct['dObj'][{0}].keys():".format(Cls)
msg += "\n [{0}]".format(", ".join(ln))
raise Exception(msg)
# Create list
lS = self.lStruct
if not Cls + "_" + Name in self._dStruct["lorder"]:
msg = "The desired instance is not in self.dStruct['lorder'] !"
lord = ", ".join(self.dStruct["lorder"])
msg += "\n lorder = [{0}]".format(lord)
msg += "\n Cls_Name = {0}".format(Cls + "_" + Name)
raise Exception(msg)
ind = self._dStruct["lorder"].index(Cls + "_" + Name)
del lS[ind]
# Important : also remove from dict ! (no reset() !)
del self._dStruct["dObj"][Cls][Name]
# Prepare dextraprop
dextra = self.dextraprop
dx = {}
for k in dextra.keys():
if k == "lprop":
continue
dx[k[1:]] = {}
for cc in dextra[k].keys():
dx[k[1:]][cc] = dict(dextra[k][cc])
del dx[k[1:]][Cls][Name]
# remove Cls if empty
if len(dx[k[1:]][Cls]) == 0:
del dx[k[1:]][Cls]
# remove empty parts
if len(dx[k[1:]]) == 0:
del dx[k[1:]]
self._init(lStruct=lS, Lim=self.Lim, dextraprop=dx)
def get_color(self):
""" Return the array of rgba colors (same order as lStruct) """
col = np.full((self._dStruct["nObj"], 4), np.nan)
ii = 0
for k in self._dStruct["lorder"]:
k0, k1 = k.split("_")
col[ii, :] = self._dStruct["dObj"][k0][k1].get_color()
ii += 1
return col
def set_colors_random(self, cmap=plt.cm.Accent):
ii = 0
ncol = len(cmap.colors)
for k in self._dStruct["lorder"]:
k0, k1 = k.split("_")
if self._dStruct["dObj"][k0][k1]._InOut == "in":
col = "k"
elif "lh" in k1.lower():
col = (1.0, 0.0, 0.0)
elif "ic" in k1.lower():
col = (1.0, 0.5, 0.5)
elif "div" in k1.lower():
col = (0.0, 1.0, 0.0)
elif "bump" in k1.lower():
col = (0.0, 0.0, 1.0)
else:
col = cmap.colors[ii % ncol]
ii += 1
self._dStruct["dObj"][k0][k1].set_color(col)
def get_summary(
self,
sep=" ",
line="-",
just="l",
table_sep=None,
verb=True,
return_=False,
):
""" Summary description of the object content """
# -----------------------
# Build overview
col0 = ["tot. Struct", "tot. occur", "tot. points"]
noccur = np.sum([max(1, ss._dgeom["noccur"]) for ss in self.lStruct])
npts = np.sum([ss._dgeom["nP"] for ss in self.lStruct])
ar0 = [(self.nStruct, noccur, npts)]
# -----------------------
# Build detailed view
col1 = [
"class",
"Name",
"SaveName",
"nP",
"noccur",
"move",
"color",
] + self._dextraprop["lprop"]
d = self._dStruct["dObj"]
ar1 = []
for k in self._ddef["dStruct"]["order"]:
if k not in d.keys():
continue
otemp = self._dStruct["dObj"][k]
for kk in d[k].keys():
lu = [
k,
otemp[kk]._Id._dall["Name"],
otemp[kk]._Id._dall["SaveName"],
str(otemp[kk]._dgeom["nP"]),
str(otemp[kk]._dgeom["noccur"]),
str(otemp[kk]._dgeom["move"]),
('(' + ', '.join(['{:4.2}'.format(cc)
for cc in otemp[kk]._dmisc["color"]])
+ ')'),
]
for pp in self._dextraprop["lprop"]:
lu.append(self._dextraprop["d" + pp][k][kk])
ar1.append(lu)
return self._get_summary(
[ar0, ar1],
[col0, col1],
sep=sep,
line=line,
table_sep=table_sep,
verb=verb,
return_=return_,
)
def get_reflections(self, indout, u=None, vperp=None):
# Get global Types array
lS = self.lStruct
# Version only usable when indout returns npts+1 and npts+2 instead of
# -1 and -2
# ls = [ss._dreflect['Types'].size for ss in lS]
# Types = np.empty((len(lS), np.max(ls)), dtype=int)
# for ii,ss in enumerate(lS):
# Types[ii,:ls[ii]] = ss._dreflect['Types']
# # Deduce Types
# Types = Types[indout[0,:], indout[2,:]]
iu = np.unique(indout[0, :])
Types = np.empty((indout.shape[1],), dtype=int)
for ii in iu:
ind = indout[0, :] == ii
Types[ind] = lS[ii]._dreflect["Types"][indout[2, ind]]
# Deduce u2
u2 = None
if u is not None:
assert vperp is not None
u2 = Struct._get_reflections_ufromTypes(u, vperp, Types)
return Types, u2
def _get_phithetaproj_dist(
self, refpt=None, ntheta=None, nphi=None, theta=None, phi=None
):
# Prepare repf
if refpt is None:
refpt = self.dsino["RefPt"]
if refpt is None:
msg = "Please provide refpt (R,Z)"
raise Exception(msg)
refpt = np.atleast_1d(np.squeeze(refpt))
assert refpt.shape == (2,)
# Prepare theta and phi
if theta is None and ntheta is None:
ntheta = _PHITHETAPROJ_NTHETA
lc = [ntheta is None, theta is None]
if np.sum(lc) != 1:
msg = "Please provide either ntheta xor a theta vector !"
raise Exception(msg)
if theta is None:
theta = np.linspace(-np.pi, np.pi, ntheta, endpoint=True)
if phi is None and nphi is None:
nphi = _PHITHETAPROJ_NPHI
lc = [nphi is None, phi is None]
if np.sum(lc) != 1:
msg = "Please provide either nphi xor a phi vector !"
raise Exception(msg)
if phi is None:
phi = np.linspace(-np.pi, np.pi, nphi, endpoint=True)
# format inputs
theta = np.atleast_1d(np.ravel(theta))
theta = np.arctan2(np.sin(theta), np.cos(theta))
phi = np.atleast_1d(np.ravel(phi))
phi = np.arctan2(np.sin(phi), np.cos(phi))
ntheta, nphi = theta.size, phi.size
# Get limits
lS = self.lStruct
dist = np.full((ntheta, nphi), np.inf)
indStruct = np.zeros((ntheta, nphi), dtype=int)
for ii in range(0, self.nStruct):
out = _comp._Struct_get_phithetaproj(
refpt, lS[ii].Poly_closed, lS[ii].Lim, lS[ii].noccur
)
nDphi, Dphi, nDtheta, Dtheta = out
# Get dist
dist_theta, indphi = _comp._get_phithetaproj_dist(
lS[ii].Poly_closed,
refpt,
Dtheta,
nDtheta,
Dphi,
nDphi,
theta,
phi,
ntheta,
nphi,
lS[ii].noccur,
)
ind = np.zeros((ntheta, nphi), dtype=bool)
indok = ~np.isnan(dist_theta)
ind[indok, :] = indphi[None, :]
ind[ind] = (
dist_theta[indok, None] < dist[indok, :][:, indphi]
).ravel()
dist[ind] = (np.broadcast_to(dist_theta, (nphi, ntheta)).T)[ind]
indStruct[ind] = ii
dist[np.isinf(dist)] = np.nan
return dist, indStruct
def plot_phithetaproj_dist(self, refpt=None, ntheta=None, nphi=None,
theta=None, phi=None, cmap=None, invertx=None,
ax=None, fs=None, tit=None, wintit=None,
draw=None):
dist, indStruct = self._get_phithetaproj_dist(refpt=refpt,
ntheta=ntheta, nphi=nphi,
theta=theta, phi=phi)
return _plot.Config_phithetaproj_dist(self, refpt, dist, indStruct,
cmap=cmap, ax=ax, fs=fs,
tit=tit, wintit=wintit,
invertx=invertx, draw=draw)
def isInside(self, pts, In="(X,Y,Z)", log="any"):
""" Return a 2D array of bool
Equivalent to applying isInside to each Struct
Check self.lStruct[0].isInside? for details
Arg log determines how Struct with multiple Limits are treated
- 'all' : True only if pts belong to all elements
- 'any' : True if pts belong to any element
"""
msg = "Arg pts must be a 1D or 2D np.ndarray !"
assert isinstance(pts, np.ndarray) and pts.ndim in [1, 2], msg
msg = "Arg log must be in ['any','all']"
assert log in ["any", "all"], msg
if pts.ndim == 1:
msg = "Arg pts must contain the coordinates of a point !"
assert pts.size in [2, 3], msg
pts = pts.reshape((pts.size, 1)).astype(float)
else:
msg = "Arg pts must contain the coordinates of points !"
assert pts.shape[0] in [2, 3], pts
nP = pts.shape[1]
ind = np.zeros((self._dStruct["nObj"], nP), dtype=bool)
lStruct = self.lStruct
for ii in range(0, self._dStruct["nObj"]):
if lStruct[ii].noccur > 0:
indi = _GG._Ves_isInside(
np.ascontiguousarray(pts),
np.ascontiguousarray(lStruct[ii].Poly),
ves_lims=np.ascontiguousarray(lStruct[ii].Lim),
nlim=lStruct[ii].noccur,
ves_type=lStruct[ii].Id.Type,
in_format=In,
test=True,
)
else:
indi = _GG._Ves_isInside(
np.ascontiguousarray(pts),
np.ascontiguousarray(lStruct[ii].Poly),
ves_lims=None,
nlim=0,
ves_type=lStruct[ii].Id.Type,
in_format=In,
test=True,
)
if lStruct[ii].noccur > 1:
if log == "any":
indi = np.any(indi, axis=0)
else:
indi = np.all(indi, axis=0)
ind[ii, :] = indi
return ind
# TBF
def fdistfromwall(self, r, z, phi):
""" Return a callable (function) for detecting trajectory collisions
with wall
The function is continuous wrt time and space
It takes into account all Struct in Config, including non-axisymmetric
ones
It is desined for iterative root-finding algorithms and is thus called
for a unique position
"""
# LM: ... function NOT finished (TBF)
# LM: ... since we are in devel this is too dangerous to keep
# LM: ... commenting and raising warning
# isin = [ss._InOut == "in" for ss in self.lStruct]
# inside = self.isInside(np.r_[r, z, phi], In="(R,Z,Phi)", log="any")
# distRZ, indStruct = self._get_phithetaproj_dist(
# refpt=np.r_[r, z], ntheta=ntheta, nphi=nphi, theta=theta, phi=phi
# )
# lSlim = [ss for ss in self.lStruct if ss.noccur > 0]
# distPhi = r * np.min([np.min(np.abs(phi - ss.Lim)) for ss in lSlim])
# if inside:
# return min(distRZ, distPhi)
# else:
# return -min(distRZ, distPhi)
warnings.warn("FUNCTION NOT DEFINED")
return
# Method handling reflections
def _reflect_Types(self, indout=None, Type=None, nRays=None):
""" Return an array indicating the Type of reflection for each LOS
Return a (nRays,) np.ndarray of int indices, each index corresponds to:
- 0: specular reflections
- 1: diffusive reflections
- 2: ccube reflections (corner cube)
If indout is provided, the Types are computed according to the
information stored in each corresponding Struct
If Type is provided, the Type is forced (user-defined) for all LOS
"""
if Type is not None:
assert Type in ["specular", "diffusive", "ccube"]
Types = np.full((nRays,), _DREFLECT[Type], dtype=int)
else:
Types = self.get_reflections(indout)[0]
return Types
def _reflect_geom(self, u=None, vperp=None, indout=None, Type=None):
assert u.shape == vperp.shape and u.shape[0] == 3
if indout is not None:
assert indout.shape == (3, u.shape[1])
# Get Types of relection for each Ray
Types = self._reflect_Types(indout=indout, Type=Type, nRays=u.shape[1])
# Deduce u2
u2 = Struct._get_reflections_ufromTypes(u, vperp, Types)
return u2, Types
def plot(
self,
lax=None,
proj=None,
element="P",
dLeg=_def.TorLegd,
indices=False,
Lim=None,
Nstep=None,
draw=True,
fs=None,
wintit=None,
tit=None,
Test=True,
):
assert tit in [None, False] or isinstance(tit, str)
vis = self.get_visible()
lStruct, lS = self.lStruct, []
for ii in range(0, self._dStruct["nObj"]):
if vis[ii]:
lS.append(lStruct[ii])
if tit is None:
tit = self.Id.Name
lax = _plot.Struct_plot(
lS,
lax=lax,
proj=proj,
element=element,
Lim=Lim,
Nstep=Nstep,
dLeg=dLeg,
draw=draw,
fs=fs,
indices=indices,
wintit=wintit,
tit=tit,
Test=Test,
)
return lax
def plot_sino(
self,
ax=None,
dP=None,
Ang=_def.LOSImpAng,
AngUnit=_def.LOSImpAngUnit,
Sketch=True,
dLeg=_def.TorLegd,
draw=True,
fs=None,
wintit=None,
tit=None,
Test=True,
):
msg = "Set the sino params before plotting !"
msg += "\n => run self.set_sino(...)"
assert self.dsino["RefPt"] is not None, msg
assert tit in [None, False] or isinstance(tit, str)
# Check uniformity of sinogram parameters
for ss in self.lStruct:
msg = "{0} {1} has different".format(ss.Id.Cls, ss.Id.Name)
msgf = "\n => run self.set_sino(...)"
msg0 = msg + " sino RefPt" + msgf
assert np.allclose(self.dsino["RefPt"], ss.dsino["RefPt"]), msg0
msg1 = msg + " sino nP" + msgf
assert self.dsino["nP"] == ss.dsino["nP"], msg1
if tit is None:
tit = self.Id.Name
vis = self.get_visible()
lS = self.lStruct
lS = [lS[ii] for ii in range(0, self._dStruct["nObj"]) if vis[ii]]
ax = _plot.Plot_Impact_PolProjPoly(
lS,
ax=ax,
Ang=Ang,
AngUnit=AngUnit,
Sketch=Sketch,
dP=dP,
dLeg=dLeg,
draw=draw,
fs=fs,
tit=tit,
wintit=wintit,
Test=Test,
)
return ax
@classmethod
def from_svg(
cls,
pfe,
res=None,
point_ref1=None,
point_ref2=None,
length_ref=None,
r0=None,
z0=None,
scale=None,
Exp=None,
Name=None,
shot=None,
Type=None,
SavePath=os.path.abspath("./"),
verb=None,
returnas=None,
):
""" Build a config from a svg file (Inkscape)
The svg shall have only:
- closed polygons (possibly inc. Bezier curves)
- an optional unique 2-points straight line (non-closed)
used for auto-scaling
If Beziers curves are included, they will be discretized according to
resolution parameter res (absolute maximum tolerated distance between
points)
All closed polygons will be interpreted as:
- a Ves instance if it has no fill color
- a PFC instance if it has a fill color
The names are derived from Inkscape objects id
The coordinates are extracted from the svg
They can be rescaled either:
- automatically:
scaling computed from the unique straight line
and from the corresponding 2 points real-life coordinates
provided by the user as 2 iterables (list, arrays or tuples)
of len() = 2 (point_ref1 and point_ref2)
Alternatively a single point (point_ref1) and the length_ref
of the line can be provided
- forcefully:
the origin (r0, z0) and a common scaling factor (scale) are
provided by the user
The result Config instance must have a Name and be associated to an
experiment (Exp).
"""
# Check inputs
if returnas is None:
returnas = object
if returnas not in [object, dict]:
msg = (
"Arg returnas must be either:"
+ "\t- 'object': return Config instance\n"
+ "\t- 'dict' : return a dict with polygon, cls, color"
)
raise Exception(msg)
# Extract polygon from file and check
dpath = _comp.get_paths_from_svg(
pfe=pfe, res=res,
point_ref1=point_ref1, point_ref2=point_ref2,
length_ref=length_ref,
r0=r0, z0=z0, scale=scale,
verb=verb,
)
if len(dpath) == 0:
msg = "No Struct found in {}".format(pfe)
raise Exception(msg)
if returnas is dict:
return dpath
else:
derr = {}
lstruct = []
for k0, v0 in dpath.items():
# get class
clss = eval(v0['cls'])
# Instanciate
try:
lstruct.append(
clss(
Name=k0, Poly=v0['poly'],
color=v0['color'], Exp=Exp,
)
)
except Exception as err:
derr[k0] = str(err)
# Raise error if any
if len(derr) > 0:
lerr = [
'\n\t- {}: {}'.format(k0, v0) for k0, v0 in derr.items()
]
msg = (
"\nThe following Struct could not be created:\n"
+ '\n'.join(lerr)
)
warnings.warn(msg)
SavePath = os.path.abspath(SavePath)
return cls(
Name=Name,
Exp=Exp,
shot=shot,
Type=Type,
lStruct=lstruct,
SavePath=SavePath,
)
def save_to_imas(
self,
shot=None,
run=None,
refshot=None,
refrun=None,
user=None,
database=None,
version=None,
occ=None,
dryrun=False,
verb=True,
description_2d=None,
):
import tofu.imas2tofu as _tfimas
_tfimas._save_to_imas(
self,
tfversion=__version__,
shot=shot,
run=run,
refshot=refshot,
refrun=refrun,
user=user,
database=database,
version=version,
occ=occ,
dryrun=dryrun,
verb=verb,
description_2d=description_2d,
)
def get_kwdargs_LOS_isVis(self):
lS = self.lStruct
# -- Getting "vessels" or IN structures -------------------------------
lSIn = [ss for ss in lS if ss._InOut == "in"]
if len(lSIn) == 0:
msg = "self.config must have at least a StructIn subclass !"
assert len(lSIn) > 0, msg
elif len(lSIn) > 1:
S = lSIn[np.argmin([ss.dgeom["Surf"] for ss in lSIn])]
else:
S = lSIn[0]
# ... and its poly, limts, type, etc.
VPoly = S.Poly_closed
VVIn = S.dgeom["VIn"]
if np.size(np.shape(S.Lim)) > 1:
Lim = np.asarray([S.Lim[0][0], S.Lim[0][1]])
else:
Lim = S.Lim
VType = self.Id.Type
# -- Getting OUT structures -------------------------------------------
lS = [ss for ss in lS if ss._InOut == "out"]
if len(lS) == 0:
lSLim, lSnLim = None, None
num_lim_structs, num_tot_structs = 0, 0
lSPolyx, lSPolyy = None, None
lSVInx, lSVIny = None, None
lsnvert = None
else:
# Lims
lSLim = [ss.Lim for ss in lS]
lSnLim = np.array([ss.noccur for ss in lS])
# Nb of structures and of structures inc. Lims (toroidal occurence)
num_lim_structs = len(lS)
num_tot_structs = int(np.sum([max(1, ss.noccur) for ss in lS]))
# build concatenated C-contiguous arrays of x and y coordinates
lSPolyx = np.concatenate([ss.Poly_closed[0, :] for ss in lS])
lSPolyy = np.concatenate([ss.Poly_closed[1, :] for ss in lS])
lSVInx = np.concatenate([ss.dgeom['VIn'][0, :] for ss in lS])
lSVIny = np.concatenate([ss.dgeom['VIn'][1, :] for ss in lS])
# lsnvert = cumulated number of points in the poly of each Struct
lsnvert = np.cumsum([
ss.Poly_closed[0].size for ss in lS],
dtype=int,
)
# Now setting keyword arguments:
dkwd = dict(
ves_poly=VPoly,
ves_norm=VVIn,
ves_lims=Lim,
nstruct_tot=num_tot_structs,
nstruct_lim=num_lim_structs,
lstruct_polyx=lSPolyx,
lstruct_polyy=lSPolyy,
lstruct_lims=lSLim,
lstruct_nlim=lSnLim,
lstruct_normx=lSVInx,
lstruct_normy=lSVIny,
lnvert=lsnvert,
ves_type=VType,
rmin=-1,
forbid=True,
eps_uz=1.0e-6,
eps_vz=1.0e-9,
eps_a=1.0e-9,
eps_b=1.0e-9,
eps_plane=1.0e-9,
test=True,
)
return dkwd
def calc_solidangle_particle(
self,
pts=None,
part_traj=None,
part_radius=None,
approx=None,
aniso=None,
block=None,
):
""" Compute the solid angle subtended by a particle along a trajectory
The particle has radius r, and trajectory (array of points) traj
It is observed from pts (array of points)
Takes into account blocking of the field of view by structural elements
traj and pts are (3, N) and (3, M) arrays of cartesian coordinates
approx = True => use approximation
aniso = True => return also unit vector of emission
block = True consider LOS collisions (with Ves, Struct...)
if block:
config used for LOS collisions
Parameters
----------
traj: np.ndarray
Array of (3, N) pts coordinates (X, Y, Z) representing the particle
positions
pts: np.ndarray
Array of (3, M) pts coordinates (X, Y, Z) representing points from
which the particle is observed
rad: float / np.ndarray
Unique of multiple values for the radius of the spherical particle
if multiple, rad is a np.ndarray of shape (N,)
approx: None / bool
Flag indicating whether to compute the solid angle using a
1st-order series development (in which case the solid angle becomes
proportional to the radius of the particle, see Notes_Upgrades/)
aniso: None / bool
Flag indicating whether to consider anisotropic emissivity,
meaning the routine must also compute and return the unit vector
directing the flux from each pts to each position on the trajectory
block: None / bool
Flag indicating whether to check for vignetting by structural
elements provided by config
Return:
-------
sang: np.ndarray
(N, M) Array of floats, solid angles
"""
return _comp_solidangles.calc_solidangle_particle(
pts=pts,
part_traj=part_traj,
part_radius=part_radius,
config=self,
approx=approx,
aniso=aniso,
block=block,
)
def calc_solidangle_particle_integrated(
self,
part_traj=None,
part_radius=None,
approx=None,
block=None,
resolution=None,
DR=None,
DZ=None,
DPhi=None,
plot=None,
vmin=None,
vmax=None,
scale=None,
fs=None,
dmargin=None,
returnax=None,
):
""" Compute the integrated solid angle map subtended by particles
Integrates the solid angle toroidally on a volume sampling of Config
The particle has radius r, and trajectory (array of points) traj
It is observed from pts (array of points)
Takes into account blocking of the field of view by structural elements
traj and pts are (3, N) and (3, M) arrays of cartesian coordinates
approx = True => use approximation
block = True consider LOS collisions (with Ves, Struct...)
if block:
config used for LOS collisions
Parameters
----------
traj: np.ndarray
Array of (3, N) pts coordinates (X, Y, Z) representing the particle
positions
pts: np.ndarray
Array of (3, M) pts coordinates (X, Y, Z) representing points from
which the particle is observed
rad: float / np.ndarray
Unique of multiple values for the radius of the spherical particle
if multiple, rad is a np.ndarray of shape (N,)
approx: None / bool
Flag indicating whether to compute the solid angle using a
1st-order series development (in which case the solid angle becomes
proportional to the radius of the particle, see Notes_Upgrades/)
block: None / bool
Flag indicating whether to check for vignetting by structural
elements provided by config
Return:
-------
sang: np.ndarray
(N, M) Array of floats, solid angles
"""
if plot is None:
plot = True
if returnax is None:
returnax = True
# -------------------
# Compute
(
ptsRZ, sang, indices, reseff,
) = _comp_solidangles.calc_solidangle_particle_integ(
part_traj=part_traj,
part_radius=part_radius,
config=self,
resolution=resolution,
DR=DR,
DZ=DZ,
DPhi=DPhi,
block=block,
approx=approx,
)
if plot is False:
return ptsRZ, sang, indices, reseff
# -------------------
# plot
dax = _plot.Config_plot_solidangle_map_particle(
config=self,
part_traj=part_traj,
part_radius=part_radius,
ptsRZ=ptsRZ,
sang=sang,
indices=indices,
reseff=reseff,
vmin=vmin,
vmax=vmax,
scale=scale,
fs=fs,
dmargin=dmargin,
)
if returnax is True:
return ptsRZ, sang, indices, reseff, dax
else:
return ptsRZ, sang, indices, reseff
"""
###############################################################################
###############################################################################
Rays-derived classes and functions
###############################################################################
"""
class Rays(utils.ToFuObject):
""" Parent class of rays (ray-tracing), LOS, CamLOS1D and CamLOS2D
Focused on optimizing the computation time for many rays.
Each ray is defined by a starting point (D) and a unit vector(u).
If a vessel (Ves) and structural elements (LStruct) are provided,
the intersection points are automatically computed.
Methods for plootting, computing synthetic signal are provided.
Parameters
----------
Id : str / :class:`~tofu.pathfile.ID`
A name string or a :class:`~tofu.pathfile.ID` to identify this
instance,
if a string is provided, it is fed to :class:`~tofu.pathfile.ID`
Du : iterable
Iterable of len=2, containing 2 np.ndarrays represnting, for N rays:
- Ds: a (3,N) array of the (X,Y,Z) coordinates of starting points
- us: a (3,N) array of the (X,Y,Z) coordinates of the unit vectors
Ves : None / :class:`~tofu.geom.Ves`
A :class:`~tofu.geom.Ves` instance to be associated to the rays
LStruct: None / :class:`~tofu.geom.Struct` / list
A :class:`~tofu.geom.Struct` instance or list of such, for obstructions
Sino_RefPt : None / np.ndarray
Iterable of len=2 with the coordinates of the sinogram reference point
- (R,Z) coordinates if the vessel is of Type 'Tor'
- (Y,Z) coordinates if the vessel is of Type 'Lin'
Exp : None / str
Experiment to which the LOS belongs:
- if both Exp and Ves are provided: Exp==Ves.Id.Exp
- if Ves is provided but not Exp: Ves.Id.Exp is used
Diag : None / str
Diagnostic to which the LOS belongs
shot : None / int
Shot number from which this LOS is valid
SavePath : None / str
If provided, default saving path of the object
"""
# Fixed (class-wise) dictionary of default properties
_ddef = {
"Id": {
"shot": 0,
"include": [
"Mod",
"Cls",
"Exp",
"Diag",
"Name",
"shot",
"version",
],
},
"dgeom": {"Type": "Tor", "Lim": [], "arrayorder": "C"},
"dsino": {},
"dmisc": {"color": "k"},
}
_dplot = {
"cross": {
"Elt": "P",
"dP": {"color": "k", "lw": 2},
"dI": {"color": "k", "ls": "--", "m": "x", "ms": 8, "mew": 2},
"dBs": {"color": "b", "ls": "--", "m": "x", "ms": 8, "mew": 2},
"dBv": {"color": "g", "ls": "--", "m": "x", "ms": 8, "mew": 2},
"dVect": {"color": "r", "scale": 10},
},
"hor": {
"Elt": "P",
"dP": {"color": "k", "lw": 2},
"dI": {"color": "k", "ls": "--"},
"dBs": {"color": "b", "ls": "--"},
"dBv": {"color": "g", "ls": "--"},
"Nstep": 50,
},
"3d": {
"Elt": "P",
"dP": {
"color": (0.8, 0.8, 0.8, 1.0),
"rstride": 1,
"cstride": 1,
"linewidth": 0.0,
"antialiased": False,
},
"Lim": None,
"Nstep": 50,
},
}
_dcases = {
"A": {"type": tuple, "lk": []},
"B": {"type": dict, "lk": ["D", "u"]},
"C": {"type": dict, "lk": ["D", "pinhole"]},
"D": {"type": dict, "lk": ["pinhole", "F", "nIn", "e1", "x1"]},
"E": {"type": dict, "lk": ["pinhole", "F", "nIn", "e1", "l1", "n1"]},
"F": {"type": dict, "lk": ["pinhole", "F", "angles", "x1"]},
"G": {"type": dict, "lk": ["pinhole", "F", "angles", "l1", "n1"]},
}
_method = "optimized"
# Does not exist beofre Python 3.6 !!!
def __init_subclass__(cls, color="k", **kwdargs):
# Python 2
super(Rays, cls).__init_subclass__(**kwdargs)
# Python 3
# super().__init_subclass__(**kwdargs)
cls._ddef = copy.deepcopy(Rays._ddef)
cls._dplot = copy.deepcopy(Rays._dplot)
cls._set_color_ddef(color)
if cls._is2D():
cls._dcases["D"]["lk"] += ["e2", "x2"]
cls._dcases["E"]["lk"] += ["e2", "l2", "n2"]
cls._dcases["F"]["lk"] += ["x2"]
cls._dcases["G"]["lk"] += ["l2", "n2"]
@classmethod
def _set_color_ddef(cls, color):
cls._ddef['dmisc']['color'] = mpl.colors.to_rgba(color)
def __init__(self, dgeom=None, strict=None,
lOptics=None, Etendues=None, Surfaces=None,
config=None, dchans=None, dX12='geom',
Id=None, Name=None, Exp=None, shot=None, Diag=None,
sino_RefPt=None, fromdict=None, sep=None, method='optimized',
SavePath=os.path.abspath('./'), color=None):
# Create a dplot at instance level
self._dplot = copy.deepcopy(self.__class__._dplot)
# Extra-early fix for Exp
# Workflow to be cleaned up later ?
if Exp is None and config is not None:
Exp = config.Id.Exp
kwdargs = locals()
del kwdargs["self"]
# super()
super(Rays, self).__init__(**kwdargs)
def _reset(self):
# super()
super(Rays, self)._reset()
self._dgeom = dict.fromkeys(self._get_keys_dgeom())
if self._is2D():
self._dX12 = dict.fromkeys(self._get_keys_dX12())
self._dOptics = dict.fromkeys(self._get_keys_dOptics())
self._dconfig = dict.fromkeys(self._get_keys_dconfig())
self._dsino = dict.fromkeys(self._get_keys_dsino())
self._dchans = dict.fromkeys(self._get_keys_dchans())
self._dmisc = dict.fromkeys(self._get_keys_dmisc())
# self._dplot = copy.deepcopy(self.__class__._ddef['dplot'])
@classmethod
def _checkformat_inputs_Id(
cls,
Id=None,
Name=None,
Exp=None,
shot=None,
Diag=None,
include=None,
**kwdargs
):
if Id is not None:
assert isinstance(Id, utils.ID)
Name, Exp, shot, Diag = Id.Name, Id.Exp, Id.shot, Id.Diag
if shot is None:
shot = cls._ddef["Id"]["shot"]
if include is None:
include = cls._ddef["Id"]["include"]
dins = {
"Name": {"var": Name, "cls": str},
"Exp": {"var": Exp, "cls": str},
"Diag": {"var": Diag, "cls": str},
"shot": {"var": shot, "cls": int},
"include": {"var": include, "listof": str},
}
dins, err, msg = cls._check_InputsGeneric(dins, tab=0)
if err:
raise Exception(msg)
kwdargs.update(
{
"Name": Name,
"Exp": Exp,
"shot": shot,
"Diag": Diag,
"include": include,
}
)
return kwdargs
###########
# Get largs
###########
@staticmethod
def _get_largs_dgeom(sino=True):
largs = ["dgeom", 'strict', "Etendues", "Surfaces"]
if sino:
lsino = Rays._get_largs_dsino()
largs += ["sino_{0}".format(s) for s in lsino]
return largs
@staticmethod
def _get_largs_dX12():
largs = ["dX12"]
return largs
@staticmethod
def _get_largs_dOptics():
largs = ["lOptics"]
return largs
@staticmethod
def _get_largs_dconfig():
largs = ["config", "strict"]
return largs
@staticmethod
def _get_largs_dsino():
largs = ["RefPt"]
return largs
@staticmethod
def _get_largs_dchans():
largs = ["dchans"]
return largs
@staticmethod
def _get_largs_dmisc():
largs = ["color"]
return largs
###########
# Get check and format inputs
###########
def _checkformat_inputs_dES(self, val=None):
if val is not None:
C0 = type(val) in [int, float, np.int64, np.float64]
C1 = hasattr(val, "__iter__")
assert C0 or C1
if C0:
val = np.asarray([val], dtype=float)
else:
val = np.asarray(val, dtype=float).ravel()
assert val.size == self._dgeom["nRays"]
return val
def _checkformat_inputs_dgeom(self, dgeom=None):
assert dgeom is not None
assert isinstance(dgeom, tuple) or isinstance(dgeom, dict)
lC = [k for k in self._dcases.keys()
if (isinstance(dgeom, self._dcases[k]['type'])
and all([kk in dgeom.keys() # noqa
for kk in self._dcases[k]['lk']]))]
if not len(lC) == 1:
lstr = [v['lk'] for v in self._dcases.values()]
msg = "Arg dgeom must be either:\n"
msg += " - dict with keys:\n"
msg += "\n - " + "\n - ".join(lstr)
msg += " - tuple of len()==2 containing (D,u)"
raise Exception(msg)
case = lC[0]
def _checkformat_Du(arr, name):
arr = np.asarray(arr, dtype=float)
msg = f"Arg {name} must be an iterable convertible into either:"
msg += "\n - a 1D np.ndarray of size=3"
msg += "\n - a 2D np.ndarray of shape (3,N)"
if arr.ndim not in [1, 2]:
msg += f"\nProvided arr.shape: {arr.shape}"
raise Exception(msg)
if arr.ndim == 1:
assert arr.size == 3, msg
arr = arr.reshape((3, 1))
else:
assert 3 in arr.shape, msg
if arr.shape[0] != 3:
arr = arr.T
arr = np.ascontiguousarray(arr)
return arr
if case in ["A", "B"]:
D = dgeom[0] if case == "A" else dgeom["D"]
u = dgeom[1] if case == "A" else dgeom["u"]
D = _checkformat_Du(D, "D")
u = _checkformat_Du(u, "u")
# Normalize u
u = u / np.sqrt(np.sum(u ** 2, axis=0))[np.newaxis, :]
nD, nu = D.shape[1], u.shape[1]
C0 = nD == 1 and nu > 1
C1 = nD > 1 and nu == 1
C2 = nD == nu
msg = "The number of rays is ambiguous from D and u shapes !"
assert C0 or C1 or C2, msg
nRays = max(nD, nu)
dgeom = {"D": D, "u": u, "isImage": False}
elif case == 'C':
D = _checkformat_Du(dgeom['D'], 'D')
dins = {'pinhole': {'var': dgeom['pinhole'], 'vectnd': 3}}
dins, err, msg = self._check_InputsGeneric(dins)
if err:
raise Exception(msg)
pinhole = dins["pinhole"]["var"]
dgeom = {"D": D, "pinhole": pinhole, "isImage": False}
nRays = D.shape[1]
else:
dins = {
"pinhole": {"var": dgeom["pinhole"], "vectnd": 3},
"F": {"var": dgeom["F"], "int2float": None},
}
if case in ["D", "E"]:
dins["nIn"] = {"var": dgeom["nIn"], "unitvectnd": 3}
dins["e1"] = {"var": dgeom["e1"], "unitvectnd": 3}
if "e2" in dgeom.keys():
dins["e2"] = {"var": dgeom["e2"], "unitvectnd": 3}
else:
dins["angles"] = {"var": dgeom["angles"], "vectnd": 3}
if case in ["D", "F"]:
dins["x1"] = {"var": dgeom["x1"], "vectnd": None}
if "x2":
dins["x2"] = {"var": dgeom["x2"], "vectnd": None}
else:
dins["l1"] = {"var": dgeom["l1"], "int2float": None}
dins["n1"] = {"var": dgeom["n1"], "float2int": None}
if "l2" in dgeom.keys():
dins["l2"] = {"var": dgeom["l2"], "int2float": None}
dins["n2"] = {"var": dgeom["n2"], "float2int": None}
dins, err, msg = self._check_InputsGeneric(dins)
if err:
raise Exception(msg)
dgeom = {"dX12": {}}
for k in dins.keys():
if k == "pinhole":
dgeom[k] = dins[k]["var"]
else:
dgeom["dX12"][k] = dins[k]["var"]
if case in ["E", "G"]:
x1 = dgeom["dX12"]["l1"] * np.linspace(
-0.5, 0.5, dgeom["dX12"]["n1"], end_point=True
)
dgeom["dX12"]["x1"] = x1
if self._is2D():
x2 = dgeom["dX12"]["l2"] * np.linspace(
-0.5, 0.5, dgeom["dX12"]["n2"], end_point=True
)
dgeom["dX12"]["x2"] = x2
if self._is2D():
nRays = dgeom["dX12"]["n1"] * dgeom["dX12"]["n2"]
ind1, ind2, indr = self._get_ind12r_n12(
n1=dgeom["dX12"]["n1"], n2=dgeom["dX12"]["n2"]
)
dgeom["dX12"]["ind1"] = ind1
dgeom["dX12"]["ind2"] = ind2
dgeom["dX12"]["indr"] = indr
dgeom["isImage"] = True
else:
nRays = dgeom["dX12"]["n1"]
dgeom["isImage"] = False
dgeom.update({"case": case, "nRays": nRays})
return dgeom
def _checkformat_dX12(self, dX12=None):
lc = [
dX12 is None,
dX12 == "geom" or dX12 == {"from": "geom"},
isinstance(dX12, dict),
]
if not np.sum(lc) == 1:
msg = "dX12 must be either:\n"
msg += " - None\n"
msg += " - 'geom' : will be derived from the 3D geometry\n"
msg += " - dict : containing {'x1' : array of coords.,\n"
msg += " 'x2' : array of coords.,\n"
msg += " 'ind1': array of int indices,\n"
msg += " 'ind2': array of int indices}"
raise Exception(msg)
if lc[1]:
ls = self._get_keys_dX12()
c0 = isinstance(self._dgeom["dX12"], dict)
c1 = c0 and all([ss in self._dgeom["dX12"].keys() for ss in ls])
c2 = c1 and all([self._dgeom["dX12"][ss] is not None for ss in ls])
if not c2:
msg = "dX12 is not provided as input (dX12 = None)\n"
msg += " => self._dgeom['dX12'] (computed) used as fallback\n"
msg += " - It should have non-None keys: %s\n" % str(
list(ls)
)
msg += " - it is:\n%s" % str(self._dgeom["dX12"])
raise Exception(msg)
dX12 = {"from": "geom"}
if lc[2]:
ls = ["x1", "x2", "ind1", "ind2"]
assert all([ss in dX12.keys() for ss in ls])
x1 = np.asarray(dX12["x1"]).ravel()
x2 = np.asarray(dX12["x2"]).ravel()
n1, n2 = x1.size, x2.size
ind1, ind2, indr = self._get_ind12r_n12(
ind1=dX12["ind1"], ind2=dX12["ind2"], n1=n1, n2=n2
)
dX12 = {
"x1": x1,
"x2": x2,
"n1": n1,
"n2": n2,
"ind1": ind1,
"ind2": ind2,
"indr": indr,
"from": "self",
}
return dX12
@staticmethod
def _checkformat_dOptics(lOptics=None):
if lOptics is None:
lOptics = []
assert type(lOptics) is list
lcls = ["Apert3D", "Cryst2D"]
nOptics = len(lOptics)
for ii in range(0, nOptics):
assert lOptics[ii].__class__.__name__ in lcls
return lOptics
@staticmethod
def _checkformat_inputs_dconfig(config=None):
# Check config has proper class
if not isinstance(config, Config):
msg = ("Arg config must be a Config instance!\n"
+ "\t- expected: {}".format(str(Config))
+ "\t- provided: {}".format(str(config.__class__)))
raise Exception(msg)
# Check all structures
lS = config.lStruct
lC = [hasattr(ss, "_InOut") and ss._InOut in ["in", "out"]
for ss in lS]
if not all(lC):
msg = "All Struct in config must have self._InOut in ['in','out']"
raise Exception(msg)
# Check there is at least one struct which is a subclass of StructIn
lSIn = [ss for ss in lS if ss._InOut == "in"]
if len(lSIn) == 0:
lclsnames = [
f'\t- {ss.Id.Name}, {ss.Id.Cls}, {ss._InOut}' for ss in lS
]
msg = (
f"Config {config.Id.Name} is missing a StructIn!\n"
+ "\n".join(lclsnames)
)
raise Exception(msg)
# Add 'compute' parameter if not present
if "compute" not in config._dextraprop["lprop"]:
config = config.copy()
config.add_extraprop("compute", True)
return config
def _checkformat_inputs_dsino(self, RefPt=None):
assert RefPt is None or hasattr(RefPt, "__iter__")
if RefPt is not None:
RefPt = np.asarray(RefPt, dtype=float).flatten()
assert RefPt.size == 2, "RefPt must be of size=2 !"
return RefPt
def _checkformat_inputs_dchans(self, dchans=None):
assert dchans is None or isinstance(dchans, dict)
if dchans is None:
dchans = {}
for k in dchans.keys():
arr = np.asarray(dchans[k]).ravel()
assert arr.size == self._dgeom["nRays"]
dchans[k] = arr
return dchans
@classmethod
def _checkformat_inputs_dmisc(cls, color=None):
if color is None:
color = mpl.colors.to_rgba(cls._ddef["dmisc"]["color"])
assert mpl.colors.is_color_like(color)
return tuple(mpl.colors.to_rgba(color))
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_dgeom():
lk = [
"D",
"u",
"pinhole",
"nRays",
"kIn",
"kOut",
"PkIn",
"PkOut",
"vperp",
"indout",
"indStruct",
"kRMin",
"PRMin",
"RMin",
"isImage",
"Etendues",
"Surfaces",
"dX12",
"dreflect",
"move",
"move_param",
"move_kwdargs",
]
return lk
@staticmethod
def _get_keys_dX12():
lk = ["x1", "x2", "n1", "n2", "ind1", "ind2", "indr"]
return lk
@staticmethod
def _get_keys_dOptics():
lk = ["lorder", "lCls", "nObj", "dObj"]
return lk
@staticmethod
def _get_keys_dsino():
lk = ["RefPt", "k", "pts", "theta", "p", "phi"]
return lk
@staticmethod
def _get_keys_dconfig():
lk = ["config"]
return lk
@staticmethod
def _get_keys_dchans():
lk = []
return lk
@staticmethod
def _get_keys_dmisc():
lk = ["color"]
return lk
###########
# _init
###########
def _init(
self,
dgeom=None,
config=None,
Etendues=None,
Surfaces=None,
sino_RefPt=None,
dchans=None,
method="optimized",
**kwargs
):
if method is not None:
self._method = method
kwdargs = locals()
kwdargs.update(**kwargs)
largs = self._get_largs_dgeom(sino=True)
kwdgeom = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dconfig()
kwdconfig = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dchans()
kwdchans = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dOptics()
kwdOptics = self._extract_kwdargs(kwdargs, largs)
largs = self._get_largs_dmisc()
kwdmisc = self._extract_kwdargs(kwdargs, largs)
self.set_dconfig(calcdgeom=False, **kwdconfig)
self._set_dgeom(sino=True, **kwdgeom)
if self._is2D():
kwdX12 = self._extract_kwdargs(kwdargs, self._get_largs_dX12())
self.set_dX12(**kwdX12)
self._set_dOptics(**kwdOptics)
self.set_dchans(**kwdchans)
self._set_dmisc(**kwdmisc)
self._dstrip["strip"] = 0
###########
# set dictionaries
###########
def set_dconfig(self, config=None, strict=None, calcdgeom=True):
config = self._checkformat_inputs_dconfig(config)
self._dconfig["Config"] = config.copy()
if calcdgeom:
self.compute_dgeom(strict=strict)
def _update_dgeom_from_TransRotFoc(self, val, key="x"):
# To be finished for 1.4.1
raise Exception("Not coded yet !")
# assert False, "Not implemented yet, for future versions"
# if key in ['x','y','z']:
# if key == 'x':
# trans = np.r_[val,0.,0.]
# elif key == 'y':
# trans = np.r_[0.,val,0.]
# else:
# trans = np.r_[0.,0.,val]
# if self._dgeom['pinhole'] is not None:
# self._dgeom['pinhole'] += trans
# self._dgeom['D'] += trans[:,np.newaxis]
# if key in ['nIn','e1','e2']:
# if key == 'nIn':
# e1 = (np.cos(val)*self._dgeom['dX12']['e1']
# + np.sin(val)*self._dgeom['dX12']['e2'])
# e2 = (np.cos(val)*self._dgeom['dX12']['e2']
# - np.sin(val)*self._dgeom['dX12']['e1'])
# self._dgeom['dX12']['e1'] = e1
# self._dgeom['dX12']['e2'] = e2
# elif key == 'e1':
# nIn = (np.cos(val)*self._dgeom['dX12']['nIn']
# + np.sin(val)*self._dgeom['dX12']['e2'])
# e2 = (np.cos(val)*self._dgeom['dX12']['e2']
# - np.sin(val)*self._dgeom['dX12']['nIn'])
# self._dgeom['dX12']['nIn'] = nIn
# self._dgeom['dX12']['e2'] = e2
# else:
# nIn = (np.cos(val)*self._dgeom['dX12']['nIn']
# + np.sin(val)*self._dgeom['dX12']['e1'])
# e1 = (np.cos(val)*self._dgeom['dX12']['e1']
# - np.sin(val)*self._dgeom['dX12']['nIn'])
# self._dgeom['dX12']['nIn'] = nIn
# self._dgeom['dX12']['e1'] = e1
# if key == 'F':
# self._dgeom['F'] += val
@classmethod
def _get_x12_fromflat(cls, X12):
x1, x2 = np.unique(X12[0, :]), np.unique(X12[1, :])
n1, n2 = x1.size, x2.size
if n1 * n2 != X12.shape[1]:
tol = np.linalg.norm(np.diff(X12[:, :2], axis=1)) / 100.0
tolmag = int(np.log10(tol)) - 1
x1 = np.unique(np.round(X12[0, :], -tolmag))
x2 = np.unique(np.round(X12[1, :], -tolmag))
ind1 = np.digitize(X12[0, :], 0.5 * (x1[1:] + x1[:-1]))
ind2 = np.digitize(X12[1, :], 0.5 * (x2[1:] + x2[:-1]))
ind1u, ind2u = np.unique(ind1), np.unique(ind2)
x1 = np.unique([np.mean(X12[0, ind1 == ii]) for ii in ind1u])
x2 = np.unique([np.mean(X12[1, ind2 == ii]) for ii in ind2u])
n1, n2 = x1.size, x2.size
if n1 * n2 != X12.shape[1]:
msg = "The provided X12 array does not seem to correspond to"
msg += "a n1 x n2 2D matrix, even within tolerance\n"
msg += " n1*n2 = %s x %s = %s\n" % (
str(n1),
str(n2),
str(n1 * n2),
)
msg += " X12.shape = %s" % str(X12.shape)
raise Exception(msg)
ind1 = np.digitize(X12[0, :], 0.5 * (x1[1:] + x1[:-1]))
ind2 = np.digitize(X12[1, :], 0.5 * (x2[1:] + x2[:-1]))
ind1, ind2, indr = cls._get_ind12r_n12(
ind1=ind1, ind2=ind2, n1=n1, n2=n2
)
return x1, x2, n1, n2, ind1, ind2, indr
def _complete_dX12(self, dgeom):
# Test if unique starting point
if dgeom["case"] in ["A", "B", "C"]:
# Test if pinhole
if dgeom['D'].shape[1] == 1 and dgeom['nRays'] > 1:
dgeom['pinhole'] = dgeom['D'].ravel()
elif dgeom['case'] in ['A', 'B']:
u = dgeom['u'][:, 0:1]
sca2 = np.sum(dgeom['u'][:, 1:]*u, axis=0)**2
if np.all(sca2 < 1.0 - 1.e-9):
DDb = dgeom['D'][:, 1:]-dgeom['D'][:, 0:1]
k = np.sum(DDb*(u - np.sqrt(sca2)*dgeom['u'][:, 1:]),
axis=0)
k = k / (1.0-sca2)
if k[0] > 0 and np.allclose(k, k[0], atol=1.e-3,
rtol=1.e-6):
pinhole = dgeom['D'][:, 0] + k[0]*u[:, 0]
dgeom['pinhole'] = pinhole
if np.any(np.isnan(dgeom['D'])):
msg = ("Some LOS have nan as starting point !\n"
+ "The geometry may not be provided !")
raise Exception(msg)
# Test if all D are on a common plane or line
va = dgeom["D"] - dgeom["D"][:, 0:1]
# critetrion of unique D
crit = np.sqrt(np.sum(va ** 2, axis=0))
if np.sum(crit) < 1.0e-9:
if self._is2D():
msg = "2D camera but dgeom cannot be obtained !\n"
msg += " crit = %s\n" % str(crit)
msg += " dgeom = %s" % str(dgeom)
raise Exception(msg)
return dgeom
# To avoid ||v0|| = 0
if crit[1] > 1.0e-12:
# Take first one by default to ensure square grid for CamLOS2D
ind0 = 1
else:
ind0 = np.nanargmax(crit)
v0 = va[:, ind0]
v0 = v0 / np.linalg.norm(v0)
indok = np.nonzero(crit > 1.0e-12)[0]
van = np.full(va.shape, np.nan)
van[:, indok] = va[:, indok] / crit[None, indok]
vect2 = (
(van[1, :] * v0[2] - van[2, :] * v0[1]) ** 2
+ (van[2, :] * v0[0] - van[0, :] * v0[2]) ** 2
+ (van[0, :] * v0[1] - van[1, :] * v0[0]) ** 2
)
# Don't forget that vect2[0] is nan
if np.all(vect2[indok] < 1.0e-9):
# All D are aligned
e1 = v0
x1 = np.sum(va * e1[:, np.newaxis], axis=0)
if dgeom["pinhole"] is not None:
kref = -np.sum((dgeom["D"][:, 0] - dgeom["pinhole"]) * e1)
x1 = x1 - kref
# l1 = np.nanmax(x1) - np.nanmin(x1)
if dgeom["dX12"] is None:
dgeom["dX12"] = {}
dgeom["dX12"].update({"e1": e1, "x1": x1, "n1": x1.size})
elif self._is2D():
ind = np.nanargmax(vect2)
v1 = van[:, ind]
nn = np.cross(v0, v1)
nn = nn / np.linalg.norm(nn)
scaabs = np.abs(np.sum(nn[:, np.newaxis] * va, axis=0))
if np.all(scaabs < 1.0e-9):
# All D are in a common plane, but not aligned
# check nIn orientation
sca = np.sum(self.u * nn[:, np.newaxis], axis=0)
lc = [np.all(sca >= 0.0), np.all(sca <= 0.0)]
assert any(lc)
nIn = nn if lc[0] else -nn
e1 = v0
e2 = v1
if np.sum(np.cross(e1, nIn) * e2) < 0.0:
e2 = -e2
if np.abs(e1[2]) > np.abs(e2[2]):
# Try to set e2 closer to ez if possible
e1, e2 = -e2, e1
if dgeom["dX12"] is None:
dgeom["dX12"] = {}
dgeom["dX12"].update({"nIn": nIn, "e1": e1, "e2": e2})
# Test binning
if dgeom["pinhole"] is not None:
k1ref = -np.sum(
(dgeom["D"][:, 0] - dgeom["pinhole"]) * e1
)
k2ref = -np.sum(
(dgeom["D"][:, 0] - dgeom["pinhole"]) * e2
)
else:
k1ref, k2ref = 0.0, 0.0
x12 = np.array(
[
np.sum(va * e1[:, np.newaxis], axis=0) - k1ref,
np.sum(va * e2[:, np.newaxis], axis=0) - k2ref,
]
)
try:
out_loc = self._get_x12_fromflat(x12)
x1, x2, n1, n2, ind1, ind2, indr = out_loc
dgeom["dX12"].update(
{
"x1": x1,
"x2": x2,
"n1": n1,
"n2": n2,
"ind1": ind1,
"ind2": ind2,
"indr": indr,
}
)
dgeom["isImage"] = True
except Exception as err:
msg = str(err)
msg += "\n nIn = %s" % str(nIn)
msg += "\n e1 = %s" % str(e1)
msg += "\n e2 = %s" % str(e2)
msg += "\n k1ref, k2ref = %s, %s" % (
str(k1ref),
str(k2ref),
)
msg += "\n va = %s" % str(va)
msg += "\n x12 = %s" % str(x12)
warnings.warn(msg)
else:
if dgeom["case"] in ["F", "G"]:
# Get unit vectors from angles
msg = "Not implemented yet, angles will be available for 1.4.1"
raise Exception(msg)
# Get D and x12 from x1, x2
x12 = np.array(
[
dgeom["dX12"]["x1"][dgeom["dX12"]["ind1"]],
dgeom["dX12"]["x2"][dgeom["dX12"]["ind2"]],
]
)
D = dgeom["pinhole"] - dgeom["F"] * dgeom["dX12"]["nIn"]
D = (
D[:, np.newaxis]
+ x12[0, :] * dgeom["dX12"]["e1"]
+ x12[1, :] * dgeom["dX12"]["e2"]
)
dgeom["D"] = D
return dgeom
def _prepare_inputs_kInOut(self, D=None, u=None, indStruct=None):
# Prepare input: D, u
if D is None:
D = np.ascontiguousarray(self.D)
else:
D = np.ascontiguousarray(D)
if u is None:
u = np.ascontiguousarray(self.u)
else:
u = np.ascontiguousarray(u)
assert D.shape == u.shape
# Get reference: lS
if indStruct is None:
indIn, indOut = self.get_indStruct_computeInOut(unique_In=True)
indStruct = np.r_[indIn, indOut]
else:
indIn = [
ii for ii in indStruct
if self.config.lStruct[ii]._InOut == "in"
]
if len(indIn) > 1:
ind = np.argmin([
self.config.lStruct[ii].dgeom['Surf']
for ii in indIn
])
indStruct = [ii for ii in indStruct
if ii not in indIn or ii == ind]
indIn = [indIn[ind]]
indOut = [
ii for ii in indStruct
if self.config.lStruct[ii]._InOut == "out"
]
if len(indIn) == 0:
msg = "self.config must have at least a StructIn subclass !"
raise Exception(msg)
S = self.config.lStruct[indIn[0]]
VPoly = S.Poly_closed
VVIn = S.dgeom["VIn"]
largs = [D, u, VPoly, VVIn]
lS = [self.config.lStruct[ii] for ii in indOut]
if self._method == "ref":
Lim = S.Lim
nLim = S.noccur
VType = self.config.Id.Type
lSPoly, lSVIn, lSLim, lSnLim = [], [], [], []
for ss in lS:
lSPoly.append(ss.Poly_closed)
lSVIn.append(ss.dgeom["VIn"])
lSLim.append(ss.Lim)
lSnLim.append(ss.noccur)
dkwd = dict(
Lim=Lim,
nLim=nLim,
LSPoly=lSPoly,
LSLim=lSLim,
lSnLim=lSnLim,
LSVIn=lSVIn,
VType=VType,
RMin=None,
Forbid=True,
EpsUz=1.0e-6,
EpsVz=1.0e-9,
EpsA=1.0e-9,
EpsB=1.0e-9,
EpsPlane=1.0e-9,
Test=True,
)
elif self._method == "optimized":
if np.size(np.shape(S.Lim)) > 1:
Lim = np.asarray([S.Lim[0][0], S.Lim[0][1]])
else:
Lim = S.Lim
nLim = S.noccur
VType = self.config.Id.Type
lSPolyx, lSVInx = [], []
lSPolyy, lSVIny = [], []
lSLim, lSnLim = [], []
lsnvert = []
num_tot_structs = 0
num_lim_structs = 0
for ss in lS:
lp = ss.Poly_closed[0]
[lSPolyx.append(item) for item in lp]
lp = ss.Poly_closed[1]
[lSPolyy.append(item) for item in lp]
lp = ss.dgeom["VIn"][0]
[lSVInx.append(item) for item in lp]
lp = ss.dgeom["VIn"][1]
[lSVIny.append(item) for item in lp]
lSLim.append(ss.Lim)
lSnLim.append(ss.noccur)
if len(lsnvert) == 0:
lsnvert.append(len(ss.Poly_closed[0]))
else:
lsnvert.append(
len(ss.Poly_closed[0]) + lsnvert[num_lim_structs - 1]
)
num_lim_structs += 1
if ss.Lim is None or len(ss.Lim) == 0:
num_tot_structs += 1
else:
num_tot_structs += len(ss.Lim)
lsnvert = np.asarray(lsnvert, dtype=int)
lSPolyx = np.asarray(lSPolyx)
lSPolyy = np.asarray(lSPolyy)
lSVInx = np.asarray(lSVInx)
lSVIny = np.asarray(lSVIny)
dkwd = dict(ves_lims=Lim,
nstruct_tot=num_tot_structs,
nstruct_lim=num_lim_structs,
lstruct_polyx=lSPolyx,
lstruct_polyy=lSPolyy,
lstruct_lims=lSLim,
lstruct_nlim=np.asarray(lSnLim, dtype=int),
lstruct_normx=lSVInx,
lstruct_normy=lSVIny,
lnvert=lsnvert,
ves_type=VType,
rmin=-1, forbid=True, eps_uz=1.e-6, eps_vz=1.e-9,
eps_a=1.e-9, eps_b=1.e-9, eps_plane=1.e-9, test=True)
return indStruct, largs, dkwd
def _compute_kInOut(self, largs=None, dkwd=None, indStruct=None):
# Prepare inputs
if largs is None:
indStruct, largs, dkwd = self._prepare_inputs_kInOut(
indStruct=indStruct
)
else:
assert dkwd is not None
assert indStruct is not None
if self._method == "ref":
# call the dedicated function
out = _GG.SLOW_LOS_Calc_PInOut_VesStruct(*largs, **dkwd)
# Currently computes and returns too many things
PIn, POut, kIn, kOut, VperpIn, vperp, IIn, indout = out
elif self._method == "optimized":
# call the dedicated function
out = _GG.LOS_Calc_PInOut_VesStruct(*largs, **dkwd)
# Currently computes and returns too many things
kIn, kOut, vperp, indout = out
else:
pass
# Make sure indices refer to lStruct
indout[0, :] = indStruct[indout[0, :]]
return kIn, kOut, vperp, indout, indStruct
def compute_dgeom(self, extra=True, strict=None, show_debug_plot=True):
""" Compute dictionnary of geometrical attributes (dgeom)
Parameters
----------
show_debug_plot: bool
In case some lines of sight have no visibility inside the tokamak,
they will be considered invalid. tofu will issue a warning with
their indices and if show_debug_plot is True, try to plot a 3d
figure to help understand why these los have no visibility
"""
# check inputs
if strict is None:
strict = True
# Can only be computed if config if provided
if self._dconfig["Config"] is None:
msg = "Attribute dgeom cannot be computed without a config!"
warnings.warn(msg)
return
# dX12
if self._dgeom["nRays"] > 1 and strict is True:
self._dgeom = self._complete_dX12(self._dgeom)
# Perform computation of kIn and kOut
kIn, kOut, vperp, indout, indStruct = self._compute_kInOut()
# Check for LOS that have no visibility inside the plasma domain (nan)
ind = np.isnan(kIn)
kIn[ind] = 0.0
ind = np.isnan(kOut) | np.isinf(kOut)
if np.any(ind):
msg = ("Some LOS have no visibility inside the plasma domain!\n"
+ "Nb. of LOS concerned: {} / {}\n".format(ind.sum(),
kOut.size)
+ "Indices of LOS ok:\n"
+ repr((~ind).nonzero()[0])
+ "\nIndices of LOS with no visibility:\n"
+ repr(ind.nonzero()[0]))
if show_debug_plot is True:
PIn = self.D[:, ind] + kIn[None, ind] * self.u[:, ind]
POut = self.D[:, ind] + kOut[None, ind] * self.u[:, ind]
msg2 = ("\n\tD = {}\n".format(self.D[:, ind])
+ "\tu = {}\n".format(self.u[:, ind])
+ "\tPIn = {}\n".format(PIn)
+ "\tPOut = {}".format(POut))
warnings.warn(msg2)
# plot 3d debug figure
# _plot._LOS_calc_InOutPolProj_Debug(
# self.config,
# self.D[:, ind],
# self.u[:, ind],
# PIn,
# POut,
# nptstot=kOut.size,
# Lim=[np.pi / 4.0, 2.0 * np.pi / 4],
# Nstep=50,
# )
kOut[ind] = np.nan
if strict is True:
raise Exception(msg)
else:
warnings.warn(msg)
# Handle particular cases with kIn > kOut
ind = np.zeros(kIn.shape, dtype=bool)
ind[~np.isnan(kOut)] = True
ind[ind] = kIn[ind] > kOut[ind]
kIn[ind] = 0.0
# Update dgeom
dd = {
"kIn": kIn,
"kOut": kOut,
"vperp": vperp,
"indout": indout,
"indStruct": indStruct,
}
self._dgeom.update(dd)
# Run extra computations
if extra:
self._compute_dgeom_kRMin()
self._compute_dgeom_extra1()
def _compute_dgeom_kRMin(self):
# Get RMin if Type is Tor
if self.config.Id.Type == "Tor":
kRMin = np.atleast_1d(
_comp.LOS_PRMin(
self.D, self.u, kOut=self.kOut, Eps=1.0e-12, squeeze=True
)
)
else:
kRMin = None
self._dgeom.update({"kRMin": kRMin})
def _compute_dgeom_extra1(self):
if self._dgeom["kRMin"] is not None:
PRMin = self.D + self._dgeom["kRMin"][None, :] * self.u
RMin = np.hypot(PRMin[0, :], PRMin[1, :])
else:
PRMin, RMin = None, None
PkIn = self.D + self._dgeom["kIn"][np.newaxis, :] * self.u
PkOut = self.D + self._dgeom["kOut"][np.newaxis, :] * self.u
dd = {"PkIn": PkIn, "PkOut": PkOut, "PRMin": PRMin, "RMin": RMin}
self._dgeom.update(dd)
def _compute_dgeom_extra2D(self):
if "2d" not in self.Id.Cls.lower():
return
D, u = self.D, self.u
C = np.nanmean(D, axis=1)
CD0 = D[:, :-1] - C[:, np.newaxis]
CD1 = D[:, 1:] - C[:, np.newaxis]
cross = np.array(
[
CD1[1, 1:] * CD0[2, :-1] - CD1[2, 1:] * CD0[1, :-1],
CD1[2, 1:] * CD0[0, :-1] - CD1[0, 1:] * CD0[2, :-1],
CD1[0, 1:] * CD0[1, :-1] - CD1[1, 1:] * CD0[0, :-1],
]
)
crossn2 = np.sum(cross ** 2, axis=0)
if np.all(np.abs(crossn2) < 1.0e-12):
msg = "Is %s really a 2D camera ? (LOS aligned?)" % self.Id.Name
warnings.warn(msg)
cross = cross[:, np.nanargmax(crossn2)]
cross = cross / np.linalg.norm(cross)
nIn = cross if np.sum(cross * np.nanmean(u, axis=1)) > 0.0 else -cross
# Find most relevant e1 (for pixels alignment), without a priori info
D0D = D - D[:, 0][:, np.newaxis]
dist = np.sqrt(np.sum(D0D ** 2, axis=0))
dd = np.min(dist[1:])
e1 = (D[:, 1] - D[:, 0]) / np.linalg.norm(D[:, 1] - D[:, 0])
crossbis = np.sqrt(
(D0D[1, :] * e1[2] - D0D[2, :] * e1[1]) ** 2
+ (D0D[2, :] * e1[0] - D0D[0, :] * e1[2]) ** 2
+ (D0D[0, :] * e1[1] - D0D[1, :] * e1[0]) ** 2
)
D0D = D0D[:, crossbis < dd / 3.0]
sca = np.sum(D0D * e1[:, np.newaxis], axis=0)
e1 = D0D[:, np.argmax(np.abs(sca))]
try:
import tofu.geom.utils as geom_utils
except Exception:
from . import utils as geom_utils
nIn, e1, e2 = geom_utils.get_nIne1e2(C, nIn=nIn, e1=e1)
if np.abs(np.abs(nIn[2]) - 1.0) > 1.0e-12:
if np.abs(e1[2]) > np.abs(e2[2]):
e1, e2 = e2, e1
e2 = e2 if e2[2] > 0.0 else -e2
self._dgeom.update({"C": C, "nIn": nIn, "e1": e1, "e2": e2})
def set_Etendues(self, val):
val = self._checkformat_inputs_dES(val)
self._dgeom["Etendues"] = val
def set_Surfaces(self, val):
val = self._checkformat_inputs_dES(val)
self._dgeom["Surfaces"] = val
def _set_dgeom(
self,
dgeom=None,
Etendues=None,
Surfaces=None,
sino_RefPt=None,
extra=True,
strict=None,
sino=True,
):
dgeom = self._checkformat_inputs_dgeom(dgeom=dgeom)
self._dgeom.update(dgeom)
self.compute_dgeom(extra=extra, strict=strict)
self.set_Etendues(Etendues)
self.set_Surfaces(Surfaces)
if sino:
self.set_dsino(sino_RefPt)
def set_dX12(self, dX12=None):
dX12 = self._checkformat_dX12(dX12)
self._dX12.update(dX12)
def _compute_dsino_extra(self):
if self._dsino["k"] is not None:
pts = self.D + self._dsino["k"][np.newaxis, :] * self.u
R = np.hypot(pts[0, :], pts[1, :])
DR = R - self._dsino["RefPt"][0]
DZ = pts[2, :] - self._dsino["RefPt"][1]
p = np.hypot(DR, DZ)
theta = np.arctan2(DZ, DR)
ind = theta < 0
p[ind] = -p[ind]
theta[ind] = -theta[ind]
phipts = np.arctan2(pts[1, :], pts[0, :])
etheta = np.array(
[
np.cos(phipts) * np.cos(theta),
np.sin(phipts) * np.cos(theta),
np.sin(theta),
]
)
phi = np.arccos(np.abs(np.sum(etheta * self.u, axis=0)))
dd = {"pts": pts, "p": p, "theta": theta, "phi": phi}
self._dsino.update(dd)
def set_dsino(self, RefPt=None, extra=True):
RefPt = self._checkformat_inputs_dsino(RefPt=RefPt)
self._dsino.update({"RefPt": RefPt})
VType = self.config.Id.Type
if RefPt is not None:
self._dconfig["Config"].set_dsino(RefPt=RefPt)
kOut = np.copy(self._dgeom["kOut"])
kOut[np.isnan(kOut)] = np.inf
try:
out = _GG.LOS_sino(
self.D, self.u, RefPt, kOut, Mode="LOS", VType=VType
)
Pt, k, r, Theta, p, theta, Phi = out
self._dsino.update({"k": k})
except Exception as err:
msg = str(err)
msg += "\nError while computing sinogram !"
raise Exception(msg)
if extra:
self._compute_dsino_extra()
def _set_dOptics(self, lOptics=None):
lOptics = self._checkformat_dOptics(lOptics=lOptics)
self._set_dlObj(lOptics, din=self._dOptics)
def set_dchans(self, dchans=None):
dchans = self._checkformat_inputs_dchans(dchans)
self._dchans = dchans
def _set_color(self, color=None):
color = self._checkformat_inputs_dmisc(color=color)
self._dmisc["color"] = color
self._dplot["cross"]["dP"]["color"] = color
self._dplot["hor"]["dP"]["color"] = color
self._dplot["3d"]["dP"]["color"] = color
def _set_dmisc(self, color=None):
self._set_color(color)
###########
# Reflections
###########
def get_reflections_as_cam(self, Type=None, Name=None, nb=None):
""" Return a camera made of reflected LOS
Reflected LOS can be of 3 types:
- 'speculiar': standard mirror-like reflection
- 'diffusive': random reflection
- 'ccube': corner-cube reflection (ray goes back its way)
As opposed to self.add_reflections(), the reflected rays are
return as an independent camera (CamLOS1D)
"""
# Check inputs
if nb is None:
nb = 1
nb = int(nb)
assert nb > 0
if Name is None:
Name = self.Id.Name + "_Reflect%s" % str(Type)
clas = Rays if self.__class__.__name__ == Rays else CamLOS1D
# Run first iteration
Types = np.full((nb, self.nRays), 0, dtype=int)
Ds = self.D + (self._dgeom["kOut"][None, :] - 1.0e-12) * self.u
us, Types[0, :] = self.config._reflect_geom(
u=self.u,
vperp=self._dgeom["vperp"],
indout=self._dgeom["indout"],
Type=Type,
)
lcam = [
clas(
dgeom=(Ds, us),
config=self.config,
Exp=self.Id.Exp,
Diag=self.Id.Diag,
Name=Name,
shot=self.Id.shot,
)
]
if nb == 1:
return lcam[0], Types[0, :]
indStruct, largs, dkwd = self._prepare_inputs_kInOut(
D=Ds, u=us, indStruct=self._dgeom["indStruct"]
)
outi = self._compute_kInOut(
largs=largs, dkwd=dkwd, indStruct=indStruct
)
kouts, vperps, indouts = outi[1:-1]
# Run other iterations
for ii in range(1, nb):
Ds = Ds + (kouts[None, :] - 1.0e-12) * us
us, Types[ii, :] = self.config._reflect_geom(
u=us, vperp=vperps, indout=indouts, Type=Type
)
outi = self._compute_kInOut(
largs=[Ds, us, largs[2], largs[3]],
dkwd=dkwd,
indStruct=indStruct,
)
kouts, vperps, indouts = outi[1:-1]
lcam.append(
clas(
dgeom=(Ds, us),
config=self.config,
Exp=self.Id.Exp,
Diag=self.Id.Diag,
Name=Name,
shot=self.Id.shot,
)
)
return lcam, Types
def add_reflections(self, Type=None, nb=None):
""" Add relfected LOS to the camera
Reflected LOS can be of 3 types:
- 'speculiar': standard mirror-like reflection
- 'diffusive': random reflection
- 'ccube': corner-cube reflection (ray goes back its way)
As opposed to self.get_reflections_as_cam(), the reflected rays are
stored in the camera object
"""
# Check inputs
if nb is None:
nb = 1
nb = int(nb)
assert nb > 0
# Prepare output
nRays = self.nRays
Types = np.full((nRays, nb), 0, dtype=int)
Ds = np.full((3, nRays, nb), np.nan, dtype=float)
us = np.full((3, nRays, nb), np.nan, dtype=float)
kouts = np.full((nRays, nb), np.nan, dtype=float)
indouts = np.full((3, nRays, nb), 0, dtype=int)
vperps = np.full((3, nRays, nb), np.nan, dtype=float)
# Run first iteration
Ds[:, :, 0] = (
self.D + (self._dgeom["kOut"][None, :] - 1.0e-12) * self.u
)
us[:, :, 0], Types[:, 0] = self.config._reflect_geom(
u=self.u,
vperp=self._dgeom["vperp"],
indout=self._dgeom["indout"],
Type=Type,
)
indStruct, largs, dkwd = self._prepare_inputs_kInOut(
D=Ds[:, :, 0], u=us[:, :, 0], indStruct=self._dgeom["indStruct"]
)
outi = self._compute_kInOut(
largs=largs, dkwd=dkwd, indStruct=indStruct
)
kouts[:, 0], vperps[:, :, 0], indouts[:, :, 0] = outi[1:-1]
# Run other iterations
for ii in range(1, nb):
Dsi = (
Ds[:, :, ii - 1]
+ (kouts[None, :, ii - 1] - 1.0e-12) * us[:, :, ii - 1]
)
usi, Types[:, ii] = self.config._reflect_geom(
u=us[:, :, ii - 1],
vperp=vperps[:, :, ii - 1],
indout=indouts[:, :, ii - 1],
Type=Type,
)
outi = self._compute_kInOut(
largs=[Dsi, usi, largs[2], largs[3]],
dkwd=dkwd,
indStruct=indStruct,
)
kouts[:, ii], vperps[:, :, ii], indouts[:, :, ii] = outi[1:-1]
Ds[:, :, ii], us[:, :, ii] = Dsi, usi
self._dgeom["dreflect"] = {
"nb": nb,
"Type": Type,
"Types": Types,
"Ds": Ds,
"us": us,
"kouts": kouts,
"indouts": indouts,
}
###########
# strip dictionaries
###########
def _strip_dgeom(self, strip=0):
if self._dstrip["strip"] == strip:
return
if strip < self._dstrip["strip"]:
# Reload
if self._dstrip["strip"] == 1:
self._compute_dgeom_extra1()
elif self._dstrip["strip"] >= 2 and strip == 1:
self._compute_dgeom_kRMin()
elif self._dstrip["strip"] >= 2 and strip == 0:
self._compute_dgeom_kRMin()
self._compute_dgeom_extra1()
else:
# strip
if strip == 1:
lkeep = [
"D",
"u",
"pinhole",
"nRays",
"kIn",
"kOut",
"vperp",
"indout",
"indStruct",
"kRMin",
"Etendues",
"Surfaces",
"isImage",
"dX12",
"dreflect",
"move",
"move_param",
"move_kwdargs",
]
utils.ToFuObject._strip_dict(self._dgeom, lkeep=lkeep)
elif self._dstrip["strip"] <= 1 and strip >= 2:
lkeep = [
"D",
"u",
"pinhole",
"nRays",
"kIn",
"kOut",
"vperp",
"indout",
"indStruct",
"Etendues",
"Surfaces",
"isImage",
"dX12",
"dreflect",
"move",
"move_param",
"move_kwdargs",
]
utils.ToFuObject._strip_dict(self._dgeom, lkeep=lkeep)
def _strip_dconfig(self, strip=0, force=False, verb=True):
if self._dstrip["strip"] == strip:
return
if strip < self._dstrip["strip"]:
if self._dstrip["strip"] == 4:
pfe = self._dconfig["Config"]
try:
self._dconfig["Config"] = utils.load(pfe, verb=verb)
except Exception as err:
msg = str(err)
msg += "\n type(pfe) = {0}".format(str(type(pfe)))
msg += "\n self._dstrip['strip'] = {0}".format(
self._dstrip["strip"]
)
msg += "\n strip = {0}".format(strip)
raise Exception(msg)
self._dconfig["Config"].strip(strip, verb=verb)
else:
if strip == 4:
path, name = self.config.Id.SavePath, self.config.Id.SaveName
# --- Check !
lf = os.listdir(path)
lf = [
ff for ff in lf if all([s in ff for s in [name, ".npz"]])
]
exist = len(lf) == 1
# ----------
pathfile = os.path.join(path, name) + ".npz"
if not exist:
msg = """BEWARE:
You are about to delete the Config object
Only the path/name to saved a object will be kept
But it appears that the following object has no
saved file where specified (obj.Id.SavePath)
Thus it won't be possible to retrieve it
(unless available in the current console:"""
msg += "\n - {0}".format(pathfile)
if force:
warnings.warn(msg)
else:
raise Exception(msg)
self._dconfig["Config"] = pathfile
else:
self._dconfig["Config"].strip(strip, verb=verb)
def _strip_dsino(self, strip=0):
if self._dstrip["strip"] == strip:
return
if strip < self._dstrip["strip"]:
if strip <= 1 and self._dsino["k"] is not None:
self._compute_dsino_extra()
else:
if self._dstrip["strip"] <= 1:
utils.ToFuObject._strip_dict(self._dsino, lkeep=["RefPt", "k"])
def _strip_dmisc(self, lkeep=["color"]):
utils.ToFuObject._strip_dict(self._dmisc, lkeep=lkeep)
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip["allowed"] = [0, 1, 2, 3, 4]
nMax = max(cls._dstrip["allowed"])
doc = """
1: dgeom w/o pts + config.strip(1)
2: dgeom w/o pts + config.strip(2) + dsino empty
3: dgeom w/o pts + config.strip(3) + dsino empty
4: dgeom w/o pts + config=pathfile + dsino empty
"""
doc = utils.ToFuObjectBase.strip.__doc__.format(doc, nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0, verb=True):
# super()
super(Rays, self).strip(strip=strip, verb=verb)
def _strip(self, strip=0, verb=True):
self._strip_dconfig(strip=strip, verb=verb)
self._strip_dgeom(strip=strip)
self._strip_dsino(strip=strip)
def _to_dict(self):
dout = {
"dconfig": {"dict": self._dconfig, "lexcept": None},
"dgeom": {"dict": self.dgeom, "lexcept": None},
"dchans": {"dict": self.dchans, "lexcept": None},
"dsino": {"dict": self.dsino, "lexcept": None},
}
if self._is2D():
dout["dX12"] = {"dict": self._dX12, "lexcept": None}
return dout
@classmethod
def _checkformat_fromdict_dconfig(cls, dconfig):
if dconfig["Config"] is None:
return None
if type(dconfig["Config"]) is dict:
dconfig["Config"] = Config(fromdict=dconfig["Config"])
lC = [
isinstance(dconfig["Config"], Config),
type(dconfig["Config"]) is str,
]
assert any(lC)
def _from_dict(self, fd):
self._checkformat_fromdict_dconfig(fd["dconfig"])
self._dconfig.update(**fd["dconfig"])
self._dgeom.update(**fd["dgeom"])
self._dsino.update(**fd["dsino"])
if "dchans" in fd.keys():
self._dchans.update(**fd["dchans"])
if self._is2D():
self._dX12.update(**fd["dX12"])
###########
# properties
###########
@property
def dgeom(self):
return self._dgeom
@property
def dchans(self):
return self._dchans
@property
def dsino(self):
return self._dsino
@property
def lOptics(self):
return [self._dOptics['dobj'][k0][k1]
for (k0, k1) in map(lambda x: str.split(x, '_'),
self._dOptics['lorder'])]
@property
def isPinhole(self):
c0 = "pinhole" in self._dgeom.keys()
return c0 and self._dgeom["pinhole"] is not None
@property
def isInPoloidalPlane(self):
phiD = np.arctan2(self.D[1, :], self.D[0, :])
if self.nRays > 1 and not np.allclose(phiD[0], phiD[1:]):
return False
phiD = phiD[0]
ephi = np.array([-np.sin(phiD), np.cos(phiD), 0.])[:, None]
return np.allclose(np.sum(self.u*ephi, axis=0), 0.)
@property
def nRays(self):
return self._dgeom["nRays"]
@property
def D(self):
if self._dgeom["D"].shape[1] < self._dgeom["nRays"]:
D = np.tile(self._dgeom["D"], self._dgeom["nRays"])
else:
D = self._dgeom["D"]
return D
@property
def u(self):
if self._dgeom['u'] is not None \
and self._dgeom['u'].shape[1] == self._dgeom['nRays']:
u = self._dgeom['u']
elif self.isPinhole:
u = self._dgeom['pinhole'][:, None] - self._dgeom['D']
u = u / np.sqrt(np.sum(u**2, axis=0))[None, :]
elif self._dgeom['u'].shape[1] < self._dgeom['nRays']:
u = np.tile(self._dgeom['u'], self._dgeom['nRays'])
return u
@property
def pinhole(self):
if self._dgeom["pinhole"] is None:
msg = "This is not a pinhole camera => pinhole is None"
warnings.warn(msg)
return self._dgeom["pinhole"]
@property
def config(self):
return self._dconfig["Config"]
@property
def Etendues(self):
if self._dgeom["Etendues"] is None:
E = None
elif self._dgeom["Etendues"].size == self._dgeom["nRays"]:
E = self._dgeom["Etendues"]
elif self._dgeom["Etendues"].size == 1:
E = np.repeat(self._dgeom["Etendues"], self._dgeom["nRays"])
else:
msg = "Stored Etendues is not conform !"
raise Exception(msg)
return E
@property
def Surfaces(self):
if self._dgeom["Surfaces"] is None:
S = None
elif self._dgeom["Surfaces"].size == self._dgeom["nRays"]:
S = self._dgeom["Surfaces"]
elif self._dgeom["Surfaces"].size == 1:
S = np.repeat(self._dgeom["Surfaces"], self._dgeom["nRays"])
else:
msg = "Stored Surfaces not conform !"
raise Exception(msg)
return S
@property
def kIn(self):
return self._dgeom["kIn"]
@property
def kOut(self):
return self._dgeom["kOut"]
@property
def kMin(self):
if self.isPinhole:
kMin = self._dgeom["pinhole"][:, np.newaxis] - self._dgeom["D"]
kMin = np.sqrt(np.sum(kMin ** 2, axis=0))
else:
kMin = 0.0
return kMin
@classmethod
def _is2D(cls):
c0 = "2d" in cls.__name__.lower()
return c0
@classmethod
def _isLOS(cls):
c0 = "los" in cls.__name__.lower()
return c0
###########
# Movement methods
###########
def _update_or_copy(self, D, u, pinhole=None,
return_copy=None,
name=None, diag=None, dchans=None):
if return_copy is None:
return_copy = _RETURN_COPY
if self.isPinhole is True:
dgeom = {'pinhole': pinhole,
'D': D}
else:
dgeom = (D, u)
if return_copy is True:
if name is None:
name = self.Id.Name + 'copy'
if diag is None:
diag = self.Id.Diag
if dchans is None:
dchans = self.dchans
return self.__class__(dgeom=dgeom,
lOptics=self.lOptics,
Etendues=self.Etendues,
Surfaces=self.Surfaces,
config=self.config,
sino_RefPt=self._dsino['RefPt'],
color=self._dmisc['color'],
dchans=dchans,
Exp=self.Id.Exp,
Diag=diag,
Name=name,
shot=self.Id.shot,
SavePath=self.Id.SavePath)
else:
dgeom0 = ((self.D, self.pinhole)
if self.isPinhole is True else (self.D, self.u))
try:
self._set_dgeom(dgeom=dgeom,
Etendues=self.Etendues,
Surfaces=self.Surfaces,
sino_RefPt=self._dsino['RefPt'],
extra=True,
sino=True)
except Exception as err:
# Make sure instance does not move
self._set_dgeom(dgeom=dgeom0,
Etendues=self.Etendues,
Surfaces=self.Surfaces,
sino_RefPt=self._dsino['RefPt'],
extra=True,
sino=True)
msg = (str(err)
+ "\nAn exception occured during updating\n"
+ " => instance unmoved")
raise Exception(msg)
def _rotate_DPinholeu(self, func, **kwdargs):
pinhole, u = None, None
if self.isPinhole is True:
D = np.concatenate((self.D, self._dgeom['pinhole'][:, None]),
axis=1)
D = func(pts=D, **kwdargs)
D, pinhole = D[:, :-1], D[:, -1]
elif 'rotate' in func.__name__:
D, u = func(pts=self.D, vect=self.u, **kwdargs)
else:
D = func(pts=self.D, **kwdargs)
u = self.u
return D, pinhole, u
def translate_in_cross_section(self, distance=None, direction_rz=None,
phi=None,
return_copy=None,
diag=None, name=None, dchans=None):
""" Translate the instance in the cross-section """
if phi is None:
if self.isInPoloidalPlane:
phi = np.arctan2(*self.D[1::-1, 0])
elif self.isPinhole:
phi = np.arctan2(*self._dgeom['pinhole'][1::-1])
else:
msg = ("Instance not associated to a specific poloidal plane\n"
+ "\tPlease specify which poloidal plane (phi) to use")
raise Exception(msg)
D, pinhole, u = self._rotate_DPinholeu(
self._translate_pts_poloidal_plane,
phi=phi, direction_rz=direction_rz, distance=distance)
return self._update_or_copy(D, u, pinhole,
return_copy=return_copy,
diag=diag, name=name, dchans=dchans)
def translate_3d(self, distance=None, direction=None,
return_copy=None,
diag=None, name=None, dchans=None):
""" Translate the instance in provided direction """
D, pinhole, u = self._rotate_DPinholeu(
self._translate_pts_3d,
direction=direction, distance=distance)
return self._update_or_copy(D, u, pinhole,
return_copy=return_copy,
diag=diag, name=name, dchans=dchans)
def rotate_in_cross_section(self, angle=None, axis_rz=None,
phi=None,
return_copy=None,
diag=None, name=None, dchans=None):
""" Rotate the instance in the cross-section """
if phi is None:
if self.isInPoloidalPlane:
phi = np.arctan2(*self.D[1::-1, 0])
elif self.isPinhole:
phi = np.arctan2(*self._dgeom['pinhole'][1::-1])
else:
msg = ("Camera not associated to a specific poloidal plane\n"
+ "\tPlease specify which poloidal plane (phi) to use")
raise Exception(msg)
D, pinhole, u = self._rotate_DPinholeu(
self._rotate_pts_vectors_in_poloidal_plane,
axis_rz=axis_rz, angle=angle, phi=phi)
return self._update_or_copy(D, u, pinhole,
return_copy=return_copy,
diag=diag, name=name, dchans=dchans)
def rotate_around_torusaxis(self, angle=None,
return_copy=None,
diag=None, name=None, dchans=None):
""" Rotate the instance around the torus axis """
if self.config is not None and self.config.Id.Type != 'Tor':
msg = "Movement only available for Tor configurations!"
raise Exception(msg)
D, pinhole, u = self._rotate_DPinholeu(
self._rotate_pts_vectors_around_torusaxis,
angle=angle)
return self._update_or_copy(D, u, pinhole,
return_copy=return_copy,
diag=diag, name=name, dchans=dchans)
def rotate_around_3daxis(self, angle=None, axis=None,
return_copy=None,
diag=None, name=None, dchans=None):
""" Rotate the instance around the provided 3d axis """
D, pinhole, u = self._rotate_DPinholeu(
self._rotate_pts_vectors_around_3daxis,
axis=axis, angle=angle)
return self._update_or_copy(D, u, pinhole,
return_copy=return_copy,
diag=diag, name=name, dchans=dchans)
def set_move(self, move=None, param=None, **kwdargs):
""" Set the default movement parameters
A default movement can be set for the instance, it can be any of the
pre-implemented movement (rotations or translations)
This default movement is the one that will be called when using
self.move()
Specify the type of movement via the name of the method (passed as a
str to move)
Specify, for the geometry of the instance at the time of defining this
default movement, the current value of the associated movement
parameter (angle / distance). This is used to set an arbitrary
difference for user who want to use absolute position values
The desired incremental movement to be performed when calling self.move
will be deduced by substracting the stored param value to the provided
param value. Just set the current param value to 0 if you don't care
about a custom absolute reference.
kwdargs must be a parameters relevant to the chosen method (axis,
direction...)
e.g.:
self.set_move(move='rotate_around_3daxis',
param=0.,
axis=([0.,0.,0.], [1.,0.,0.]))
self.set_move(move='translate_3d',
param=0.,
direction=[0.,1.,0.])
"""
move, param, kwdargs = self._checkformat_set_move(move, param, kwdargs)
self._dgeom['move'] = move
self._dgeom['move_param'] = param
if isinstance(kwdargs, dict) and len(kwdargs) == 0:
kwdargs = None
self._dgeom['move_kwdargs'] = kwdargs
def move(self, param):
""" Set new position to desired param according to default movement
Can only be used if default movement was set before
See self.set_move()
"""
param = self._move(param, dictname='_dgeom')
self._dgeom['move_param'] = param
###########
# public methods
###########
def get_indStruct_computeInOut(self, unique_In=None):
""" The indices of structures with compute = True
The indidces refer to self.config.lStruct
- The first array corresponds to Struct of type In
- The second array corresponds to Struct of type Out
"""
if unique_In is None:
unique_In = False
compute = self.config.get_compute()
indIn = np.array([
ii for ii, ss in enumerate(self.config.lStruct)
if compute[ii] and ss._InOut == "in"
], dtype=int)
if unique_In is True and indIn.size > 1:
iind = np.argmin([
self.config.lStruct[ii].dgeom['Surf'] for ii in indIn
])
indIn = np.r_[indIn[iind]]
indOut = np.array([
ii for ii, ss in enumerate(self.config.lStruct)
if compute[ii] and ss._InOut == "out"
], dtype=int)
return indIn, indOut
def _check_indch(self, ind, out=int):
if ind is not None:
ind = np.asarray(ind)
assert ind.ndim == 1
assert ind.dtype in [np.int64, np.bool_, int]
if ind.dtype == np.bool_:
assert ind.size == self.nRays
if out is int:
indch = ind.nonzero()[0]
else:
indch = ind
else:
assert np.max(ind) < self.nRays
if out is bool:
indch = np.zeros((self.nRays,), dtype=bool)
indch[ind] = True
else:
indch = ind
else:
if out is int:
indch = np.arange(0, self.nRays)
elif out is bool:
indch = np.ones((self.nRays,), dtype=bool)
return indch
def select(self, key=None, val=None, touch=None, log="any", out=int):
""" Return the indices of the rays matching selection criteria
The criterion can be of two types:
- a key found in self.dchans, with a matching value
- a touch tuple (indicating which element in self.config is touched
by the desired rays)
Parameters
----------
key : None / str
A key to be found in self.dchans
val : int / str / float / list of such
The value to be matched
If a list of values is provided, the behaviour depends on log
log : str
A flag indicating which behaviour to use when val is a list
- any : Returns indices of rays matching any value in val
- all : Returns indices of rays matching all values in val
- not : Returns indices of rays matching None of the val
touch: None / str / int / tuple
Used if key is None
Tuple that can be of len()=1, 2 or 3
Tuple indicating you want the rays that are touching some specific
elements of self.config:
- touch[0] : str / int or list of such
str : a 'Cls_Name' string indicating the element
int : the index of the element in self.config.lStruct
- touch[1] : int / list of int
Indices of the desired segments on the polygon
(i.e.: of the cross-section polygon of the above element)
- touch[2] : int / list of int
Indices, if relevant, of the toroidal / linear unit
Only relevant when the element has noccur>1
In this case only log='not' has an effect
out : str
Flag indicating whether to return:
- bool : a (nRays,) boolean array of indices
- int : a (N,) array of int indices (N=number of matching
rays)
Returns
-------
ind : np.ndarray
The array of matching rays
"""
assert out in [int, bool]
assert log in ["any", "all", "not"]
C = [key is None, touch is None]
assert np.sum(C) >= 1
if np.sum(C) == 2:
ind = np.ones((self.nRays,), dtype=bool)
else:
if key is not None:
assert type(key) is str and key in self._dchans.keys()
ltypes = [str, int, float, np.int64, np.float64]
C0 = type(val) in ltypes
C1 = type(val) in [list, tuple, np.ndarray]
assert C0 or C1
if C0:
val = [val]
else:
assert all([type(vv) in ltypes for vv in val])
ind = np.vstack([self._dchans[key] == ii for ii in val])
if log == "any":
ind = np.any(ind, axis=0)
elif log == "all":
ind = np.all(ind, axis=0)
else:
ind = ~np.any(ind, axis=0)
elif touch is not None:
lint = [int, np.int64]
larr = [list, tuple, np.ndarray]
touch = [touch] if not type(touch) is list else touch
assert len(touch) in [1, 2, 3]
def _check_touch(tt):
cS = type(tt) is str and len(tt.split("_")) == 2
c0 = type(tt) in lint
c1 = type(tt) in larr and len(tt) >= 0
c1 = c1 and all([type(t) in lint for t in tt])
return cS, c0, c1
for ii in range(0, 3 - len(touch)):
touch.append([])
ntouch = len(touch)
assert ntouch == 3
for ii in range(0, ntouch):
cS, c0, c1 = _check_touch(touch[ii])
if not (cS or c0 or c1):
msg = "Provided touch is not valid:\n" % touch
msg += " - Provided: %s\n" % str(touch)
msg += "Please provide either:\n"
msg += " - str in the form 'Cls_Name'\n"
msg += " - int (index)\n"
msg += " - array of int indices"
raise Exception(msg)
if cS:
k0, k1 = touch[ii].split("_")
lS = self.config.lStruct
ind = [
jj
for jj, ss in enumerate(lS)
if ss.Id.Cls == k0 and ss.Id.Name == k1
]
assert len(ind) == 1
touch[ii] = [ind[0]]
elif c0:
touch[ii] = [touch[ii]]
# Common part
ind = np.zeros((ntouch, self.nRays), dtype=bool)
for i in range(0, ntouch):
if len(touch[i]) == 0:
ind[i, :] = True
else:
for n in range(0, len(touch[i])):
ind[i, :] = np.logical_or(
ind[i, :],
self._dgeom["indout"][i, :] == touch[i][n],
)
ind = np.all(ind, axis=0)
if log == "not":
ind[:] = ~ind
if out is int:
ind = ind.nonzero()[0]
return ind
def get_subset(self, indch=None, Name=None):
""" Return an instance which is a sub-set of the camera
The subset is the same camera but with only the LOS selected by indch
It can be assigned a new Name (str), or the same one (True)
"""
if indch is None:
return self
else:
indch = self._check_indch(indch)
dd = self.to_dict()
sep = [kk for kk in dd.keys()
if all([ss in kk for ss in ['dId', 'dall', 'Name']])][0]
sep = sep[3]
# Name
assert Name in [None, True] or type(Name) is str
if Name is True:
pass
elif type(Name) is str:
dd[sep.join(['dId', 'dall', 'Name'])] = Name
elif Name is None:
dd[sep.join(['dId', 'dall', 'Name'])] += "-subset"
# Resize all np.ndarrays
for kk in dd.keys():
vv = dd[kk]
c0 = isinstance(vv, np.ndarray) and self.nRays in vv.shape
if c0:
if vv.ndim == 1:
dd[kk] = vv[indch]
elif vv.ndim == 2 and vv.shape[1] == self.nRays:
dd[kk] = vv[:, indch]
dd[sep.join(['dgeom', 'nRays'])] = (
dd[sep.join(['dgeom', 'D'])].shape[1])
# Recreate from dict
obj = self.__class__(fromdict=dd)
return obj
def _get_plotL(
self,
reflections=True,
Lplot=None,
proj=None,
ind=None,
return_pts=False,
multi=False,
):
""" Get the (R,Z) coordinates of the cross-section projections """
# Check inputs
if Lplot is None:
Lplot = 'tot'
if proj is None:
proj = 'All'
# Compute
ind = self._check_indch(ind)
if ind.size > 0:
us = self.u[:, ind]
kOuts = np.atleast_1d(self.kOut[ind])[:, None]
if Lplot.lower() == "tot":
Ds = self.D[:, ind]
else:
Ds = self.D[:, ind] + self.kIn[None, ind] * us
kOuts = kOuts - np.atleast_1d(self.kIn[ind])[:, None]
if ind.size == 1:
Ds, us = Ds[:, None], us[:, None]
Ds, us = Ds[:, :, None], us[:, :, None]
# kRMin = None
# Add reflections ?
c0 = (
reflections
and self._dgeom.get("dreflect") is not None
and self._dgeom["dreflect"].get("us") is not None
)
if c0:
Dsadd = self._dgeom["dreflect"]["Ds"][:, ind, :]
usadd = self._dgeom["dreflect"]["us"][:, ind, :]
kOutsadd = self._dgeom["dreflect"]["kouts"][ind, :]
if ind.size == 1:
Dsadd, usadd = Dsadd[:, None, :], usadd[:, None, :]
kOutsadd = kOutsadd[None, :]
Ds = np.concatenate((Ds, Dsadd), axis=-1)
us = np.concatenate((us, usadd), axis=-1)
kOuts = np.concatenate((kOuts, kOutsadd), axis=-1)
# if self.config.Id.Type == "Tor":
# kRMin = _comp.LOS_PRMin(
# Ds, us, kOut=kOuts, Eps=1.0e-12, squeeze=False
# )
# elif self.config.Id.Type == "Tor":
# kRMin = self._dgeom["kRMin"][ind][:, None]
out = _comp.LOS_CrossProj(
self.config.Id.Type,
Ds,
us,
kOuts,
proj=proj,
return_pts=return_pts,
multi=multi,
)
else:
out = None
return out
def get_sample(
self,
res=None,
resMode="abs",
DL=None,
method="sum",
ind=None,
pts=False,
compact=True,
num_threads=_NUM_THREADS,
Test=True,
):
""" Return a linear sampling of the LOS
The LOS is sampled into a series a points and segments lengths
The resolution (segments length) is <= res
The sampling can be done according to different methods
It is possible to sample only a subset of the LOS
Parameters
----------
res: float
Desired resolution
resMode: str
Flag indicating res should be understood as:
- 'abs': an absolute distance in meters
- 'rel': a relative distance (fraction of the LOS length)
DL: None / iterable
The fraction [L1;L2] of the LOS that should be sampled, where
L1 and L2 are distances from the starting point of the LOS (LOS.D)
DL can be an iterable of len()==2 (identical to all los), or a
(2,nlos) array
method: str
Flag indicating which to use for sampling:
- 'sum': the LOS is sampled into N segments of equal length,
where N is the smallest int such that:
* segment length <= resolution(res,resMode)
The points returned are the center of each segment
- 'simps': the LOS is sampled into N segments of equal length,
where N is the smallest int such that:
* segment length <= resolution(res,resMode)
* N is even
The points returned are the egdes of each segment
- 'romb': the LOS is sampled into N segments of equal length,
where N is the smallest int such that:
* segment length <= resolution(res,resMode)
* N = 2^k + 1
The points returned are the egdes of each segment
ind: None / iterable of int
indices of the LOS to be sampled
pts: bool
Flag indicating whether to return only the abscissa parameter k
(False) or the 3D pts coordinates (True)
compact: bool
Flag incating whether to retrun the sampled pts of all los in a
single concatenated array (True) or splitted into
a list of nlos arrays)
Returns
-------
k: np.ndarray
if pts == False:
A (npts,) array of the abscissa parameters
(i.e.: points distances from the LOS starting points)
In order to get the 3D cartesian coordinates of pts do:
if pts == True:
A (3,npts) array of the sampled points 3D cartesian coordinates
reseff: np.ndarray
A (nlos,) array of the effective resolution (<= res input), as an
absolute distance
ind: np.ndarray
A (nlos-1,) array of integere indices (where to split k to separate
the points of each los). e.g.: lk = np.split(k,ind)
"""
if res is None:
res = _RES
ind = self._check_indch(ind)
# preload k
kIn = self.kIn
kOut = self.kOut
# Preformat DL
if DL is None:
DL = np.array([kIn[ind], kOut[ind]])
elif np.asarray(DL).size == 2:
DL = np.tile(np.asarray(DL).ravel(), (len(ind), 1)).T
DL = np.ascontiguousarray(DL).astype(float)
assert type(DL) is np.ndarray and DL.ndim == 2
assert DL.shape == (2, len(ind)), "Arg DL has wrong shape !"
# Check consistency of limits
ii = DL[0, :] < kIn[ind]
DL[0, ii] = kIn[ind][ii]
ii[:] = DL[0, :] >= kOut[ind]
DL[0, ii] = kOut[ind][ii]
ii[:] = DL[1, :] > kOut[ind]
DL[1, ii] = kOut[ind][ii]
ii[:] = DL[1, :] <= kIn[ind]
DL[1, ii] = kIn[ind][ii]
# Preformat Ds, us
Ds, us = self.D[:, ind], self.u[:, ind]
if len(ind) == 1:
Ds, us = Ds.reshape((3, 1)), us.reshape((3, 1))
Ds, us = np.ascontiguousarray(Ds), np.ascontiguousarray(us)
# Launch # NB : find a way to exclude cases with DL[0,:]>=DL[1,:] !!
# Todo : reverse in _GG : make compact default for faster computation !
nlos = Ds.shape[1]
k, reseff, lind = _GG.LOS_get_sample(
nlos,
res,
DL,
dmethod=resMode,
method=method,
num_threads=num_threads,
Test=Test,
)
if pts:
nbrep = np.r_[lind[0], np.diff(lind), k.size - lind[-1]]
k = np.repeat(Ds, nbrep, axis=1) + k[None, :] * np.repeat(
us, nbrep, axis=1
)
if not compact:
k = np.split(k, lind, axis=-1)
return k, reseff, lind
def _kInOut_Isoflux_inputs(self, lPoly, lVIn=None):
if self._method == "ref":
D, u = np.ascontiguousarray(self.D), np.ascontiguousarray(self.u)
Lim = self.config.Lim
nLim = self.config.nLim
Type = self.config.Id.Type
largs = [D, u, lPoly[0], lVIn[0]]
dkwd = dict(Lim=Lim, nLim=nLim, VType=Type)
elif self._method == "optimized":
D = np.ascontiguousarray(self.D)
u = np.ascontiguousarray(self.u)
if np.size(self.config.Lim) == 0 or self.config.Lim is None:
Lim = np.array([])
else:
Lim = np.asarray(self.config.Lim)
if np.size(np.shape(Lim)) > 1:
# in case self.config.Lim = [[L0, L1]]
Lim = np.asarray([Lim[0][0], Lim[0][1]])
nLim = self.config.nLim
Type = self.config.Id.Type
largs = [D, u, lPoly[0], lVIn[0]]
dkwd = dict(ves_lims=Lim, ves_type=Type)
else:
# To be adjusted later
pass
return largs, dkwd
def _kInOut_Isoflux_inputs_usr(self, lPoly, lVIn=None):
c0 = type(lPoly) in [np.ndarray, list, tuple]
# Check lPoly
if c0 and type(lPoly) is np.ndarray:
c0 = c0 and lPoly.ndim in [2, 3]
if c0 and lPoly.ndim == 2:
c0 = c0 and lPoly.shape[0] == 2
if c0:
lPoly = [np.ascontiguousarray(lPoly)]
elif c0:
c0 = c0 and lPoly.shape[1] == 2
if c0:
lPoly = np.ascontiguousarray(lPoly)
elif c0:
lPoly = [np.ascontiguousarray(pp) for pp in lPoly]
c0 = all([pp.ndim == 2 and pp.shape[0] == 2 for pp in lPoly])
if not c0:
msg = "Arg lPoly must be either:\n"
msg += " - a (2,N) np.ndarray (signle polygon of N points)\n"
msg += " - a list of M polygons, each a (2,Ni) np.ndarray\n"
msg += " - where Ni is the number of pts of each polygon\n"
msg += " - a (M,2,N) np.ndarray where:\n"
msg += " - M is the number of polygons\n"
msg += " - N is the (common) number of points per polygon\n"
raise Exception(msg)
nPoly = len(lPoly)
# Check anti-clockwise and closed
if type(lPoly) is list:
for ii in range(nPoly):
# Check closed and anti-clockwise
if not np.allclose(lPoly[ii][:, 0], lPoly[ii][:, -1]):
lPoly[ii] = np.concatenate(
(lPoly[ii], lPoly[ii][:, 0:1]), axis=-1
)
try:
if _GG.Poly_isClockwise(lPoly[ii]):
lPoly[ii] = lPoly[ii][:, ::-1]
except Exception as excp:
print("For structure ", ii, " : ", excp)
else:
# Check closed and anti-clockwise
d = np.sum((lPoly[:, :, 0]-lPoly[:, :, -1])**2, axis=1)
if np.allclose(d, 0.):
pass
elif np.all(d > 0.):
lPoly = np.concatenate((lPoly, lPoly[:, :, 0:1]), axis=-1)
else:
msg = "All poly in lPoly should be closed or all non-closed!"
raise Exception(msg)
for ii in range(nPoly):
try:
if _GG.Poly_isClockwise(lPoly[ii]):
lPoly[ii] = lPoly[ii][:, ::-1]
except Exception as excp:
print("For structure ", ii, " : ", excp)
# Check lVIn
if lVIn is None:
lVIn = []
for pp in lPoly:
vIn = np.diff(pp, axis=1)
vIn = vIn/(np.sqrt(np.sum(vIn**2, axis=0))[None, :])
vIn = np.ascontiguousarray([-vIn[1, :], vIn[0, :]])
lVIn.append(vIn)
else:
c0 = type(lVIn) in [np.ndarray, list, tuple]
if c0 and type(lVIn) is np.ndarray and lVIn.ndim == 2:
c0 = c0 and lVIn.shape == (2, lPoly[0].shape[1]-1)
if c0:
lVIn = [np.ascontiguousarray(lVIn)]
elif c0 and type(lVIn) is np.ndarray:
c0 = c0 and lVIn.shape == (nPoly, 2, lPoly.shape[-1]-1)
if c0:
lVIn = np.ascontiguousarray(lVIn)
elif c0:
c0 = c0 and len(lVIn) == nPoly
if c0:
c0 = c0 and all([vv.shape == (2, pp.shape[1]-1)
for vv, pp in zip(lVIn, lPoly)])
if c0:
lVIn = [np.ascontiguousarray(vv) for vv in lVIn]
# Check normalization and direction
for ii in range(0, nPoly):
lVIn[ii] = (lVIn[ii]
/ np.sqrt(np.sum(lVIn[ii]**2, axis=0))[None, :])
vect = np.diff(lPoly[ii], axis=1)
vect = vect / np.sqrt(np.sum(vect**2, axis=0))[None, :]
det = vect[0, :]*lVIn[ii][1, :] - vect[1, :]*lVIn[ii][0, :]
if not np.allclose(np.abs(det), 1.):
msg = "Each lVIn must be perp. to each lPoly segment !"
raise Exception(msg)
ind = np.abs(det+1) < 1.e-12
lVIn[ii][:, ind] = -lVIn[ii][:, ind]
return nPoly, lPoly, lVIn
def calc_kInkOut_Isoflux(self, lPoly, lVIn=None, Lim=None,
kInOut=True):
""" Calculate the intersection points of each ray with each isoflux
The isofluxes are provided as a list of 2D closed polygons
The intersections are the inward and outward intersections
They are retruned as two np.ndarrays: kIn and kOut
Each array contains the length parameter along the ray for each isoflux
Parameters
----------
Returns
-------
"""
# Preformat input
nPoly, lPoly, lVIn = self._kInOut_Isoflux_inputs_usr(lPoly, lVIn=lVIn)
# Prepare output
kIn = np.full((nPoly, self.nRays), np.nan)
kOut = np.full((nPoly, self.nRays), np.nan)
# Compute intersections
assert(self._method in ['ref', 'optimized'])
if self._method == 'ref':
for ii in range(0, nPoly):
largs, dkwd = self._kInOut_Isoflux_inputs([lPoly[ii]],
lVIn=[lVIn[ii]])
out = _GG.SLOW_LOS_Calc_PInOut_VesStruct(*largs, **dkwd)
# PIn, POut, kin, kout, VperpIn, vperp, IIn, indout = out[]
kIn[ii, :], kOut[ii, :] = out[2], out[3]
elif self._method == "optimized":
for ii in range(0, nPoly):
largs, dkwd = self._kInOut_Isoflux_inputs([lPoly[ii]],
lVIn=[lVIn[ii]])
out = _GG.LOS_Calc_PInOut_VesStruct(*largs, **dkwd)[:2]
kIn[ii, :], kOut[ii, :] = out
if kInOut:
indok = ~np.isnan(kIn)
ind = np.zeros((nPoly, self.nRays), dtype=bool)
kInref = np.tile(self.kIn, (nPoly, 1))
kOutref = np.tile(self.kOut, (nPoly, 1))
ind[indok] = (kIn[indok] < kInref[indok])
ind[indok] = ind[indok] | (kIn[indok] > kOutref[indok])
kIn[ind] = np.nan
ind[:] = False
indok[:] = ~np.isnan(kOut)
ind[indok] = (kOut[indok] < kInref[indok]) | (
kOut[indok] > kOutref[indok]
)
kOut[ind] = np.nan
return kIn, kOut
def calc_length_in_isoflux(self, lPoly, lVIn=None, Lim=None, kInOut=True):
""" Return the length of each LOS inside each isoflux
Uses self.calc_kInkOut_Isoflux() to compute the linear abscissa (k) of
the entry points (kIn) and exit points (kOut) for each LOS
The isofluxes must be provided as a list of polygons
The length is returned as a (nPoly, nLOS) 2d array
"""
kIn, kOut = self.calc_kInkOut_Isoflux(lPoly, lVIn=lVIn, Lim=Lim,
kInOut=kInOut)
return kOut-kIn
def calc_min_geom_radius(self, axis):
""" Return the minimum geom. radius of each LOS, from an arbitrary axis
The axis mut be provided as a (R,Z) iterable
Uses self.set_dsino()
Return:
-------
p: np.ndarray
(nLOS,) array of minimal radius (or impact parameter)
theta: np.ndarray
(nLOS,) array of associated theta with respect to axis
pts: np.ndarray
(3,nLOS) array of (X,Y,Z) coordinates of associated points on LOS
"""
self.set_dsino(RefPt=axis, extra=True)
p, theta, pts = self.dsino['p'], self.dsino['theta'], self.dsino['pts']
return p, theta, pts
def calc_min_rho_from_Plasma2D(self, plasma, t=None, log='min',
res=None, resMode='abs', method='sum',
quant=None, ref1d=None, ref2d=None,
interp_t=None, interp_space=None,
fill_value=np.nan, pts=False, Test=True):
""" Return the min/max value of scalar field quant for each LOS
Typically used to get the minimal normalized minor radius
But can be used for any quantity available in plasma if:
- it is a 2d profile
- it is a 1d profile that can be interpolated on a 2d mesh
Currently sample each LOS with desired resolution and returns the
absolute min/max interpolated value (and associated point)
See self.get_sample() for details on sampling arguments:
- res, resMode, method
See Plasma2D.interp_pts2profile() for details on interpolation args:
- t, quant, q2dref, q1dref, interp_t, interp_space, fill_value
Returns:
--------
val: np.ndarray
(nt, nLOS) array of min/max values
pts: np.ndarray
(nt, nLOS, 3) array of (X,Y,Z) coordinates of associated points
Only returned if pts = True
t: np.ndarray
(nt,) array of time steps at which the interpolations were made
"""
assert log in ['min', 'max']
assert isinstance(pts, bool)
# Sample LOS
ptsi, reseff, lind = self.get_sample(res=res, resMode=resMode, DL=None,
method=method, ind=None,
pts=True, compact=True, Test=True)
# Interpolate values
val, t = plasma.interp_pts2profile(
pts=ptsi, t=t, quant=quant, ref1d=ref1d, ref2d=ref2d,
interp_t=interp_t, interp_space=interp_space,
fill_value=fill_value)
# Separate val per LOS and compute min / max
func = np.nanmin if log == 'min' else np.nanmax
if pts:
funcarg = np.nanargmin if log == 'min' else np.nanargmax
if pts:
nt = t.size
pts = np.full((3, self.nRays, nt), np.nan)
vals = np.full((nt, self.nRays), np.nan)
# indt = np.arange(0, nt)
lind = np.r_[0, lind, ptsi.shape[1]]
for ii in range(self.nRays):
indok = ~np.all(np.isnan(val[:, lind[ii]:lind[ii+1]]), axis=1)
if np.any(indok):
vals[indok, ii] = func(val[indok, lind[ii]:lind[ii+1]],
axis=1)
ind = funcarg(val[indok, lind[ii]:lind[ii+1]], axis=1)
pts[:, ii, indok] = ptsi[:, lind[ii]:lind[ii+1]][:, ind]
pts = pts.T
else:
pts = None
vals = np.column_stack([func(vv, axis=1)
for vv in np.split(val, lind, axis=-1)])
return vals, pts, t
def get_inspector(self, ff):
out = inspect.signature(ff)
pars = out.parameters.values()
na = np.sum([(pp.kind == pp.POSITIONAL_OR_KEYWORD
and pp.default is pp.empty) for pp in pars])
kw = [pp.name for pp in pars if (pp.kind == pp.POSITIONAL_OR_KEYWORD
and pp.default is not pp.empty)]
return na, kw
def check_ff(self, ff, t=None, ani=None):
# Initialization of function wrapper
wrapped_ff = ff
# Define unique error message giving all info in a concise way
# Optionnally add error-specific line afterwards
msg = ("User-defined emissivity function ff must:\n"
+ "\t- be a callable (function)\n"
+ "\t- take only one positional arg "
+ "and at least one keyword arg:\n"
+ "\t\t - ff(pts, t=None), where:\n"
+ "\t\t\t - pts is a (3, npts) of (x, y, z) coordinates\n"
+ "\t\t\t - t can be None / scalar / iterable of len(t) = nt\n"
+ "\t- Always return a 2d (nt, npts) np.ndarray, where:\n"
+ "\t\t - nt = len(t) if t is an iterable\n"
+ "\t\t - nt = 1 if t is None or scalar\n"
+ "\t\t - npts is the number of pts (pts.shape[1])\n\n"
+ "\t- Optionally, ff can take an extra keyword arg:\n"
+ "\t\t - ff(pts, vect=None, t=None), where:\n"
+ "\t\t\t - vect is a (3, npts) np.ndarray\n"
+ "\t\t\t - vect contains the (x, y, z) coordinates "
+ "of the units vectors of the photon emission directions"
+ "for each pts. Present only for anisotropic emissivity, "
+ "unless specifically indicated otherwise "
+ "(with ani=False in LOS_calc_signal).\n"
+ "\t\t\tDoes not affect the outpout shape (still (nt, npts))")
# .. Checking basic definition of function ..........................
if not hasattr(ff, '__call__'):
msg += "\n\n => ff must be a callable (function)!"
raise Exception(msg)
npos_args, kw = self.get_inspector(ff)
if npos_args != 1:
msg += "\n\n => ff must take only 1 positional arg: ff(pts)!"
raise Exception(msg)
if 't' not in kw:
msg += "\n\n => ff must have kwarg 't=None' for time vector!"
raise Exception(msg)
# .. Checking time vector .........................................
ltypeok = [int, float, np.int64, np.float64]
is_t_type_valid = (type(t) in ltypeok or hasattr(t, '__iter__'))
if not (t is None or is_t_type_valid):
msg += "\n\n => t must be None, scalar or iterable !"
raise Exception(msg)
nt = len(t) if hasattr(t, '__iter__') else 1
# .. Test anisotropic case .......................................
if ani is None:
is_ani = ('vect' in kw)
else:
assert isinstance(ani, bool)
is_ani = ani
# .. Testing outputs ...............................................
test_pts = np.array([[1, 2], [3, 4], [5, 6]])
npts = test_pts.shape[1]
if is_ani:
vect = np.ones(test_pts.shape)
try:
out = ff(test_pts, vect=vect, t=t)
except Exception:
msg += "\n\n => ff must take ff(pts, vect=vect, t=t) !"
raise Exception(msg)
else:
try:
out = ff(test_pts, t=t)
except Exception:
msg += "\n\n => ff must take a ff(pts, t=t) !"
raise Exception(msg)
if not (isinstance(out, np.ndarray) and (out.shape == (nt, npts)
or out.shape == (npts,))):
msg += "\n\n => wrong output (always 2d np.ndarray) !"
raise Exception(msg)
if nt == 1 and out.shape == (npts,):
def wrapped_ff(*args, **kwargs):
res_ff = ff(*args, **kwargs)
return np.reshape(res_ff, (1, -1))
return is_ani, wrapped_ff
def _calc_signal_preformat(self, ind=None, DL=None, t=None,
out=object, Brightness=True):
msg = "Arg out must be in [object,np.ndarray]"
assert out in [object, np.ndarray], msg
assert type(Brightness) is bool, "Arg Brightness must be a bool !"
if Brightness is False and self.Etendues is None:
msg = "Etendue must be set if Brightness is False !"
raise Exception(msg)
# Preformat ind
ind = self._check_indch(ind)
# Preformat DL
kIn, kOut = self.kIn, self.kOut
if DL is None:
DL = np.array([kIn[ind], kOut[ind]])
elif np.asarray(DL).size == 2:
DL = np.tile(np.asarray(DL).ravel()[:, np.newaxis], len(ind))
DL = np.ascontiguousarray(DL).astype(float)
assert type(DL) is np.ndarray and DL.ndim == 2
assert DL.shape == (2, len(ind)), "Arg DL has wrong shape !"
# check limits
ii = DL[0, :] < kIn[ind]
DL[0, ii] = kIn[ind][ii]
ii[:] = DL[0, :] >= kOut[ind]
DL[0, ii] = kOut[ind][ii]
ii[:] = DL[1, :] > kOut[ind]
DL[1, ii] = kOut[ind][ii]
ii[:] = DL[1, :] <= kIn[ind]
DL[1, ii] = kIn[ind][ii]
# Preformat Ds, us and Etendue
Ds, us = self.D[:, ind], self.u[:, ind]
E = None
if Brightness is False:
E = self.Etendues
if E.size == self.nRays:
E = E[ind]
# Preformat signal
if len(ind) == 1:
Ds, us = Ds.reshape((3, 1)), us.reshape((3, 1))
indok = ~(
np.any(np.isnan(DL), axis=0)
| np.any(np.isinf(DL), axis=0)
| ((DL[1, :] - DL[0, :]) <= 0.0)
)
if np.any(indok):
Ds, us, DL = Ds[:, indok], us[:, indok], DL[:, indok]
if indok.sum() == 1:
Ds, us = Ds.reshape((3, 1)), us.reshape((3, 1))
DL = DL.reshape((2, 1))
Ds, us = np.ascontiguousarray(Ds), np.ascontiguousarray(us)
DL = np.ascontiguousarray(DL)
else:
Ds, us, DL = None, None, None
return indok, Ds, us, DL, E
def _calc_signal_postformat(
self,
sig,
Brightness=True,
dataname=None,
t=None,
E=None,
units=None,
plot=True,
out=object,
fs=None,
dmargin=None,
wintit=None,
invert=True,
draw=True,
connect=True,
):
if Brightness is False:
if dataname is None:
dataname = r"LOS-integral x Etendue"
if E is None or np.all(np.isnan(E)):
msg = "Cannot use etendue, it was not set properly !"
raise Exception(msg)
if t is None or len(t) == 1 or E.size == 1:
sig = sig * E
else:
sig = sig * E[np.newaxis, :]
if units is None:
units = r"origin x $m^3.sr$"
else:
if dataname is None:
dataname = r"LOS-integral"
if units is None:
units = r"origin x m"
if plot or out in [object, "object"]:
kwdargs = dict(
data=sig,
t=t,
lCam=self,
Name=self.Id.Name,
dlabels={"data": {"units": units, "name": dataname}},
Exp=self.Id.Exp,
Diag=self.Id.Diag,
)
import tofu.data as tfd
if self._is2D():
osig = tfd.DataCam2D(**kwdargs)
else:
osig = tfd.DataCam1D(**kwdargs)
if plot:
_ = osig.plot(
fs=fs,
dmargin=dmargin,
wintit=wintit,
invert=invert,
draw=draw,
connect=connect,
)
if out in [object, "object"]:
return osig, units
else:
return sig, units
def calc_signal(
self,
func,
t=None,
ani=None,
fkwdargs={},
Brightness=True,
res=None,
DL=None,
resMode="abs",
method="sum",
minimize="calls",
num_threads=16,
reflections=True,
coefs=None,
coefs_reflect=None,
ind=None,
returnas=object,
plot=True,
dataname=None,
fs=None,
dmargin=None,
wintit=None,
invert=True,
units=None,
draw=True,
connect=True,
newcalc=True,
):
""" Return the line-integrated emissivity
Beware, by default, Brightness=True and it is only a line-integral !
Indeed, to get the received power, you need an estimate of the Etendue
(previously set using self.set_Etendues()) and use Brightness=False.
Hence, if Brightness=True and if
the emissivity is provided in W/m3 (resp. W/m3/sr),
=> the method returns W/m2 (resp. W/m2/sr)
The line is sampled using :meth:`~tofu.geom.LOS.get_sample`,
Except func, arguments common to :meth:`~tofu.geom.LOS.get_sample`
Parameters
----------
func : callable
The user-provided emissivity function
Shall take at least:
func(pts, t=None, vect=None)
where:
- pts : (3,N) np.ndarray, (X,Y,Z) coordinates of points
- t : None / (nt,) np.ndarray, time vector
- vect: None / (3,N) np.ndarray, unit direction vectors (X,Y,Z)
Should return at least:
- val : (N,) np.ndarray, local emissivity values
method : string, the integral can be computed using 3 different methods
- 'sum': A numpy.sum() on the local values (x segments) DEFAULT
- 'simps': using :meth:`scipy.integrate.simps`
- 'romb': using :meth:`scipy.integrate.romb`
minimize : string, method to minimize for computation optimization
- "calls": minimal number of calls to `func` (default)
- "memory": slowest method, to use only if "out of memory" error
- "hybrid": mix of before-mentioned methods.
Returns
-------
sig : np.ndarray
The computed signal, a 1d or 2d array depending on whether a time
vector was provided.
units: str
Units of the result
"""
# Format input
indok, Ds, us, DL, E = self._calc_signal_preformat(
ind=ind, DL=DL, out=returnas, Brightness=Brightness
)
if Ds is None:
return None
if res is None:
res = _RES
# Launch # NB : find a way to exclude cases with DL[0,:]>=DL[1,:] !!
# Exclude Rays not seeing the plasma
if newcalc:
ani, func = self.check_ff(func, t=t, ani=ani)
s = _GG.LOS_calc_signal(
func,
Ds,
us,
res,
DL,
dmethod=resMode,
method=method,
ani=ani,
t=t,
fkwdargs=fkwdargs,
minimize=minimize,
num_threads=num_threads,
Test=True,
)
c0 = (
reflections
and self._dgeom["dreflect"] is not None
and self._dgeom["dreflect"].get("nb", 0) > 0
)
if c0:
if coefs_reflect is None:
coefs_reflect = 1.0
for ii in range(self._dgeom["dreflect"]["nb"]):
Dsi = np.ascontiguousarray(
self._dgeom["dreflect"]["Ds"][:, :, ii]
)
usi = np.ascontiguousarray(
self._dgeom["dreflect"]["us"][:, :, ii]
)
s += coefs_reflect * _GG.LOS_calc_signal(
func,
Dsi,
usi,
res,
DL,
dmethod=resMode,
method=method,
ani=ani,
t=t,
fkwdargs=fkwdargs,
minimize=minimize,
num_threads=num_threads,
Test=True,
)
# Integrate
# Creating the arrays with null everywhere..........
if s.ndim == 2:
sig = np.full((s.shape[0], self.nRays), np.nan)
else:
sig = np.full((1, self.nRays), np.nan)
if t is None or len(t) == 1:
sig[0, indok] = s
else:
sig[:, indok] = s
else:
# Get ptsRZ along LOS // Which to choose ???
pts, reseff, indpts = self.get_sample(
res,
resMode=resMode,
DL=DL,
method=method,
ind=ind,
compact=True,
pts=True,
)
if ani:
nbrep = np.r_[
indpts[0], np.diff(indpts), pts.shape[1] - indpts[-1]
]
vect = np.repeat(self.u, nbrep, axis=1)
else:
vect = None
# Get quantity values at ptsRZ
# This is the slowest step (~3.8 s with res=0.02
# and interferometer)
val = func(pts, t=t, vect=vect)
# Integrate
sig = np.add.reduceat(val, np.r_[0, indpts],
axis=-1)*reseff[None, :]
# Apply user-provided coefs
if coefs is not None:
if hasattr(coefs, '__iter__'):
coefs = np.atleast_1d(coefs).ravel()
assert coefs.shape == (sig.shape[-1],)
if sig.ndim == 2:
coefs = coefs[None, :]
sig *= coefs
# Format output
return self._calc_signal_postformat(
sig,
Brightness=Brightness,
dataname=dataname,
t=t,
E=E,
units=units,
plot=plot,
out=returnas,
fs=fs,
dmargin=dmargin,
wintit=wintit,
invert=invert,
draw=draw,
connect=connect,
)
def calc_signal_from_Plasma2D(
self,
plasma2d,
t=None,
newcalc=True,
quant=None,
ref1d=None,
ref2d=None,
q2dR=None,
q2dPhi=None,
q2dZ=None,
Type=None,
Brightness=True,
interp_t="nearest",
interp_space=None,
fill_value=None,
res=None,
DL=None,
resMode="abs",
method="sum",
minimize="calls",
num_threads=16,
reflections=True,
coefs=None,
coefs_reflect=None,
ind=None,
returnas=object,
plot=True,
dataname=None,
fs=None,
dmargin=None,
wintit=None,
invert=True,
units=None,
draw=True,
connect=True,
):
# Format input
indok, Ds, us, DL, E = self._calc_signal_preformat(
ind=ind, out=returnas, t=t, Brightness=Brightness
)
if Ds is None:
return None
if res is None:
res = _RES
if newcalc:
# Get time vector
lc = [t is None, type(t) is str, type(t) is np.ndarray]
assert any(lc)
if lc[0]:
out = plasma2d._checkformat_qr12RPZ(
quant=quant,
ref1d=ref1d,
ref2d=ref2d,
q2dR=q2dR,
q2dPhi=q2dPhi,
q2dZ=q2dZ,
)
t = plasma2d._get_tcom(*out[:4])[0]
elif lc[1]:
t = plasma2d._ddata[t]['data']
else:
t = np.atleast_1d(t).ravel()
if fill_value is None:
fill_value = 0.0
func = plasma2d.get_finterp2d(
quant=quant,
ref1d=ref1d,
ref2d=ref2d,
q2dR=q2dR,
q2dPhi=q2dPhi,
q2dZ=q2dZ,
interp_t=interp_t,
interp_space=interp_space,
fill_value=fill_value,
Type=Type,
)
def funcbis(*args, **kwdargs):
return func(*args, **kwdargs)[0]
if DL is None:
# set to [kIn,kOut]
DL = None
ani = quant is None
if num_threads is None:
num_threads = _NUM_THREADS
if np.all(indok):
D, u = self.D, self.u
else:
D = np.ascontiguousarray(self.D[:, indok])
u = np.ascontiguousarray(self.u[:, indok])
sig = _GG.LOS_calc_signal(
funcbis,
D,
u,
res,
DL,
dmethod=resMode,
method=method,
ani=ani,
t=t,
fkwdargs={},
minimize=minimize,
Test=True,
num_threads=num_threads,
)
c0 = (
reflections
and self._dgeom["dreflect"] is not None
and self._dgeom["dreflect"].get("nb", 0) > 0
)
if c0:
if coefs_reflect is None:
coefs_reflect = 1.0
for ii in range(self._dgeom["dreflect"]["nb"]):
Dsi = np.ascontiguousarray(
self._dgeom["dreflect"]["Ds"][:, :, ii]
)
usi = np.ascontiguousarray(
self._dgeom["dreflect"]["us"][:, :, ii]
)
sig += coefs_reflect * _GG.LOS_calc_signal(
funcbis,
Dsi,
usi,
res,
DL,
dmethod=resMode,
method=method,
ani=ani,
t=t,
fkwdargs={},
minimize=minimize,
num_threads=num_threads,
Test=True,
)
else:
# Get ptsRZ along LOS // Which to choose ???
pts, reseff, indpts = self.get_sample(
res,
resMode=resMode,
DL=DL,
method=method,
ind=ind,
compact=True,
pts=True,
)
if q2dR is None:
vect = None
else:
nbrep = np.r_[
indpts[0], np.diff(indpts), pts.shape[1] - indpts[-1]
]
vect = -np.repeat(self.u, nbrep, axis=1)
if fill_value is None:
fill_value = 0.
# Get quantity values at ptsRZ
# This is the slowest step (~3.8 s with res=0.02
# and interferometer)
val, t = plasma2d.interp_pts2profile(
pts=pts,
vect=vect,
t=t,
quant=quant,
ref1d=ref1d,
ref2d=ref2d,
q2dR=q2dR,
q2dPhi=q2dPhi,
q2dZ=q2dZ,
interp_t=interp_t,
Type=Type,
interp_space=interp_space,
fill_value=fill_value,
)
# Integrate using ufunc reduceat for speed
# (cf. https://stackoverflow.com/questions/59079141)
sig = np.add.reduceat(val, np.r_[0, indpts],
axis=-1)*reseff[None, :]
# Apply user-provided coefs
if coefs is not None:
if hasattr(coefs, '__iter__'):
coefs = np.atleast_1d(coefs).ravel()
assert coefs.shape == (sig.shape[-1],)
if sig.ndim == 2:
coefs = coefs[None, :]
sig *= coefs
# Format output
# this is the secod slowest step (~0.75 s)
out = self._calc_signal_postformat(
sig,
Brightness=Brightness,
dataname=dataname,
t=t,
E=E,
units=units,
plot=plot,
out=returnas,
fs=fs,
dmargin=dmargin,
wintit=wintit,
invert=invert,
draw=draw,
connect=connect,
)
return out
def plot(
self,
lax=None,
proj="all",
reflections=True,
Lplot=_def.LOSLplot,
element="L",
element_config="P",
Leg="",
dL=None,
dPtD=_def.LOSMd,
dPtI=_def.LOSMd,
dPtO=_def.LOSMd,
dPtR=_def.LOSMd,
dPtP=_def.LOSMd,
dLeg=_def.TorLegd,
multi=False,
ind=None,
fs=None,
tit=None,
wintit=None,
draw=True,
Test=True,
):
""" Plot the Rays / LOS, in the chosen projection(s)
Optionnally also plot associated :class:`~tofu.geom.Ves` and Struct
The plot can also include:
- special points
- the unit directing vector
Parameters
----------
lax : list / plt.Axes
The axes for plotting (list of 2 axes if Proj='All')
If None a new figure with new axes is created
proj : str
Flag specifying the kind of projection:
- 'Cross' : cross-section
- 'Hor' : horizontal
- 'All' : both cross-section and horizontal (on 2 axes)
- '3d' : a (matplotlib) 3d plot
projections:bool
Flag indicating whether to plot also the reflected rays
Assuming some reflected rays are present (self.add_reflections())
element : str
Flag specifying which elements to plot
Each capital letter corresponds to an element:
* 'L': LOS
* 'D': Starting point of the LOS
* 'I': Input point (i.e.: where the LOS enters the Vessel)
* 'O': Output point (i.e.: where the LOS exits the Vessel)
* 'R': Point of minimal major radius R (only if Ves.Type='Tor')
* 'P': Point of used for impact parameter (i.e.: with minimal
distance to reference point Sino_RefPt)
Lplot : str
Flag specifying the length to plot:
- 'Tot': total length, from starting point (D) to output point
- 'In' : only the in-vessel fraction (from input to output)
element_config : str
Fed to self.config.plot()
Leg : str
Legend, if Leg='' the LOS name is used
dL : dict / None
Dictionary of properties for plotting the lines
Fed to plt.Axes.plot(), set to default if None
dPtD : dict
Dictionary of properties for plotting point 'D'
dPtI : dict
Dictionary of properties for plotting point 'I'
dPtO : dict
Dictionary of properties for plotting point 'O'
dPtR : dict
Dictionary of properties for plotting point 'R'
dPtP : dict
Dictionary of properties for plotting point 'P'
dLeg : dict or None
Dictionary of properties for plotting the legend
Fed to plt.legend(), the legend is not plotted if None
draw : bool
Flag indicating whether fig.canvas.draw() shall be called
a4 : bool
Flag indicating whether to plot the figure in a4 dimensions
Test : bool
a4 : bool
Flag indicating whether to plot the figure in a4 dimensions
Test : bool
a4 : bool
Flag indicating whether to plot the figure in a4 dimensions
Test : bool
a4 : bool
Flag indicating whether to plot the figure in a4 dimensions
Test : bool
Test : bool
Flag indicating whether the inputs should be tested for conformity
Returns
-------
La : list / plt.Axes
Handles of the axes used for plotting (list if Proj='All')
"""
return _plot.Rays_plot(
self,
Lax=lax,
Proj=proj,
reflections=reflections,
Lplot=Lplot,
element=element,
element_config=element_config,
Leg=Leg,
dL=dL,
dPtD=dPtD,
dPtI=dPtI,
dPtO=dPtO,
dPtR=dPtR,
dPtP=dPtP,
dLeg=dLeg,
multi=multi,
ind=ind,
fs=fs,
tit=tit,
wintit=wintit,
draw=draw,
Test=Test,
)
def plot_sino(
self,
ax=None,
element=_def.LOSImpElt,
Sketch=True,
Ang=_def.LOSImpAng,
AngUnit=_def.LOSImpAngUnit,
Leg=None,
dL=_def.LOSMImpd,
dVes=_def.TorPFilld,
dLeg=_def.TorLegd,
ind=None,
multi=False,
fs=None,
tit=None,
wintit=None,
draw=True,
Test=True,
):
""" Plot the LOS in projection space (sinogram)
Plot the Rays in projection space (cf. sinograms) as points.
Can also optionnally plot the associated :class:`~tofu.geom.Ves`
Can plot the conventional projection-space (in 2D in a cross-section),
or a 3D extrapolation of it, where the third coordinate is provided by
the angle that the LOS makes with the cross-section plane
(useful in case of multiple LOS with a partially tangential view)
Parameters
----------
Proj : str
Flag indicating whether to plot:
- 'Cross': a classic sinogram (vessel cross-section)
- '3d': an extended 3D version ('3d'), with an additional angle
ax : None / plt.Axes
The axes on which to plot, if None a new figure is created
Elt : str
Flag indicating which elements to plot (one per capital letter):
* 'L': LOS
* 'V': Vessel
Ang : str
Flag indicating which angle to use for the impact parameter:
- 'xi': the angle of the line itself
- 'theta': its impact parameter (theta)
AngUnit : str
Flag for the angle units to be displayed:
- 'rad': for radians
- 'deg': for degrees
Sketch : bool
Flag indicating whether to plot a skecth with angles definitions
dL : dict
Dictionary of properties for plotting the Rays points
dV : dict
Dictionary of properties for plotting the vessel envelopp
dLeg : None / dict
Dictionary of properties for plotting the legend
The legend is not plotted if None
draw : bool
Flag indicating whether to draw the figure
a4 : bool
Flag indicating whether the figure should be a4
Test : bool
Flag indicating whether the inputs shall be tested for conformity
Returns
-------
ax : plt.Axes
The axes used to plot
"""
if self._dsino["RefPt"] is None:
msg = "The sinogram ref. point is not set !"
msg += "\n => run self.set_dsino()"
raise Exception(msg)
return _plot.GLOS_plot_Sino(
self,
Proj="Cross",
ax=ax,
Elt=element,
Leg=Leg,
Sketch=Sketch,
Ang=Ang,
AngUnit=AngUnit,
dL=dL,
dVes=dVes,
dLeg=dLeg,
ind=ind,
fs=fs,
tit=tit,
wintit=wintit,
draw=draw,
Test=Test,
)
def get_touch_dict(self, ind=None, out=bool):
""" Get a dictionnary of Cls_Name struct with indices of Rays touching
Only includes Struct object with compute = True
(as returned by self.lStruct__computeInOut_computeInOut)
Also return the associated colors
If in is not None, the indices for each Struct are split between:
- indok : rays touching Struct and in ind
- indout: rays touching Struct but not in ind
"""
if self.config is None:
msg = "Config must be set in order to get touch dict !"
raise Exception(msg)
dElt = {}
lS = self.config.lStruct
ind = self._check_indch(ind, out=bool)
for ii in np.r_[self.get_indStruct_computeInOut(unique_In=True)]:
kn = "{}_{}".format(lS[ii].__class__.__name__, lS[ii].Id.Name)
indtouch = self.select(touch=kn, out=bool)
if np.any(indtouch):
indok = indtouch & ind
indout = indtouch & ~ind
if np.any(indok) or np.any(indout):
if out == int:
indok = indok.nonzero()[0]
indout = indout.nonzero()[0]
dElt[kn] = {
"indok": indok,
"indout": indout,
"col": lS[ii].get_color(),
}
return dElt
def get_touch_colors(
self,
ind=None,
dElt=None,
cbck=(0.8, 0.8, 0.8),
rgba=True,
):
""" Get array of colors per LOS (color set by the touched Struct) """
if dElt is None:
dElt = self.get_touch_dict(ind=None, out=bool)
else:
assert type(dElt) is dict
assert all(
[type(k) is str and type(v) is dict for k, v in dElt.items()]
)
if rgba:
colors = np.tile(mpl.colors.to_rgba(cbck), (self.nRays, 1)).T
for k, v in dElt.items():
colors[:, v["indok"]] = np.r_[mpl.colors.to_rgba(v["col"])][
:, None
]
else:
colors = np.tile(mpl.colors.to_rgb(cbck), (self.nRays, 1)).T
for k, v in dElt.items():
colors[:, v["indok"]] = np.r_[mpl.colors.to_rgb(v["col"])][
:, None
]
return colors
def plot_touch(
self,
key=None,
quant="lengths",
Lplot=None,
invert=None,
ind=None,
Bck=True,
fs=None,
wintit=None,
tit=None,
connect=True,
draw=True,
):
""" Interactive plot of the camera and the structures it touches
The camera LOS are plotted in poloidal and horizontal projections
The associated Config is also plotted
The plot shows which strutural element is touched by each LOS
In addition, an extra quantity can be mapped to alpha (transparency)
Parameters
----------
key: None / str
Only relevant if self.dchans was defined
key is then a key to sekf.dchans
quant: None / str
Flag indicating which extra quantity is used to map alpha:
- 'lengths' (default): the length of each LOS
- 'angles' : the angle of incidence of each LOS
(with respect to the normal of the surface touched,
useful for assessing reflection probabilities)
- 'indices': the index of each LOS
(useful for checking numbering)
- 'Etendues': the etendue associated to each LOS (user-provided)
- 'Surfaces': the surfaces associated to each LOS (user-provided)
Lplot: None / str
Flag indicating whether to plot:
- 'tot': the full length of the LOS
- 'in': only the part that is inside the vessel
invert: None / bool
Flag indicating whether to plot 2D camera images inverted (pinhole)
ind: None / np.ndarray
Array of bool indices used to select only a subset of the LOS
Bck: None / bool
Flag indicating whether to plot the background LOS
fs: None / tuple
figure size in inches
wintit: None / str
Title for the window
tit: None / str
Title for the figure
connect: None / bool
Flag indicating to connect interactive actuators
draw: None / bool
Flag indicating whether to draw the figure
"""
out = _plot.Rays_plot_touch(
self,
key=key,
Bck=Bck,
quant=quant,
ind=ind,
Lplot=Lplot,
invert=invert,
connect=connect,
fs=fs,
wintit=wintit,
tit=tit,
draw=draw,
)
return out
########################################
# CamLOS subclasses
########################################
sig = inspect.signature(Rays)
params = sig.parameters
class CamLOS1D(Rays):
def get_summary(
self,
sep=" ",
line="-",
just="l",
table_sep=None,
verb=True,
return_=False,
):
# Prepare
kout = self._dgeom["kOut"]
indout = self._dgeom["indout"]
lS = self._dconfig["Config"].lStruct
angles = np.arccos(-np.sum(self.u*self.dgeom['vperp'], axis=0))
# ar0
col0 = ["nb. los", "av. length", "min length", "max length",
"nb. touch", "av. angle", "min angle", "max angle"]
ar0 = [
self.nRays,
"{:.3f}".format(np.nanmean(kout)),
"{:.3f}".format(np.nanmin(kout)),
"{:.3f}".format(np.nanmax(kout)),
np.unique(indout[0, :]).size,
"{:.2f}".format(np.nanmean(angles)),
"{:.2f}".format(np.nanmin(angles)),
"{:.2f}".format(np.nanmax(angles)),
]
if self._dgeom['move'] is not None:
col0 += ['move', 'param']
ar0 += [self._dgeom['move'],
str(round(self._dgeom['move_param'], ndigits=4))]
# ar1
col1 = ["los index", "length", "touch", "angle (rad)"]
ar1 = [
np.arange(0, self.nRays),
np.around(kout, decimals=3).astype("U"),
["%s_%s" % (lS[ii].Id.Cls, lS[ii].Id.Name) for ii in indout[0, :]],
np.around(angles, decimals=2).astype('U')
]
for k, v in self._dchans.items():
col1.append(k)
if v.ndim == 1:
ar1.append(v)
else:
ar1.append([str(vv) for vv in v])
# call base method
return self._get_summary(
[ar0, ar1],
[col0, col1],
sep=sep,
line=line,
table_sep=table_sep,
verb=verb,
return_=return_,
)
def __add__(self, other):
if not other.__class__.__name__ == self.__class__.__name__:
msg = "Operator defined only for same-class operations !"
raise Exception(msg)
lc = [self.Id.Exp == other.Id.Exp, self.Id.Diag == other.Id.Diag]
if not all(lc):
msg = (
"Operation only valid if objects have identical (Diag, Exp) !"
)
raise Exception(msg)
if not self.config == other.config:
msg = "Operation only valid if objects have identical config !"
raise Exception(msg)
Name = "%s+%s" % (self.Id.Name, other.Id.Name)
D = np.concatenate((self.D, other.D), axis=1)
u = np.concatenate((self.u, other.u), axis=1)
return self.__class__(
dgeom=(D, u),
config=self.config,
Name=Name,
Diag=self.Id.Diag,
Exp=self.Id.Exp,
)
def __radd__(self, other):
return self.__add__(other)
def save_to_imas(
self,
ids=None,
shot=None,
run=None,
refshot=None,
refrun=None,
user=None,
database=None,
version=None,
occ=None,
dryrun=False,
deep=True,
restore_size=True,
verb=True,
config_description_2d=None,
config_occ=None,
):
import tofu.imas2tofu as _tfimas
_tfimas._save_to_imas(
self,
tfversion=__version__,
shot=shot,
run=run,
refshot=refshot,
refrun=refrun,
user=user,
database=database,
version=version,
occ=occ,
dryrun=dryrun,
verb=verb,
ids=ids,
deep=deep,
restore_size=restore_size,
config_description_2d=config_description_2d,
config_occ=config_occ,
)
lp = [p for p in params.values() if p.name != "dX12"]
CamLOS1D.__signature__ = sig.replace(parameters=lp)
class CamLOS2D(Rays):
def get_summary(
self,
sep=" ",
line="-",
just="l",
table_sep=None,
verb=True,
return_=False,
):
# Prepare
kout = self._dgeom["kOut"]
indout = self._dgeom["indout"]
# lS = self._dconfig["Config"].lStruct
angles = np.arccos(-np.sum(self.u*self.dgeom['vperp'], axis=0))
# ar0
col0 = ["nb. los", "av. length", "min length", "max length",
"nb. touch", "av. angle", "min angle", "max angle"]
ar0 = [
self.nRays,
"{:.3f}".format(np.nanmean(kout)),
"{:.3f}".format(np.nanmin(kout)),
"{:.3f}".format(np.nanmax(kout)),
np.unique(indout[0, :]).size,
"{:.2f}".format(np.nanmean(angles)),
"{:.2f}".format(np.nanmin(angles)),
"{:.2f}".format(np.nanmax(angles)),
]
if self._dgeom['move'] is not None:
col0 += ['move', 'param']
ar0 += [self._dgeom['move'],
str(round(self._dgeom['move_param'], ndigits=4))]
# call base method
return self._get_summary(
[ar0],
[col0],
sep=sep,
line=line,
table_sep=table_sep,
verb=verb,
return_=return_,
)
def _isImage(self):
return self._dgeom["isImage"]
@property
def dX12(self):
if self._dX12 is not None and self._dX12["from"] == "geom":
dX12 = self._dgeom["dX12"]
else:
dX12 = self._dX12
return dX12
def get_X12plot(self, plot="imshow"):
if plot == "imshow":
x1, x2 = self.dX12["x1"], self.dX12["x2"]
x1min, Dx1min = x1[0], 0.5 * (x1[1] - x1[0])
x1max, Dx1max = x1[-1], 0.5 * (x1[-1] - x1[-2])
x2min, Dx2min = x2[0], 0.5 * (x2[1] - x2[0])
x2max, Dx2max = x2[-1], 0.5 * (x2[-1] - x2[-2])
extent = (
x1min - Dx1min,
x1max + Dx1max,
x2min - Dx2min,
x2max + Dx2max,
)
indr = self.dX12["indr"]
return x1, x2, indr, extent
"""
def set_e12(self, e1=None, e2=None):
assert e1 is None or (hasattr(e1,'__iter__') and len(e1)==3)
assert e2 is None or (hasattr(e2,'__iter__') and len(e2)==3)
if e1 is None:
e1 = self._dgeom['e1']
else:
e1 = np.asarray(e1).astype(float).ravel()
e1 = e1 / np.linalg.norm(e1)
if e2 is None:
e2 = self._dgeom['e2']
else:
e2 = np.asarray(e1).astype(float).ravel()
e2 = e2 / np.linalg.norm(e2)
assert np.abs(np.sum(e1*self._dgeom['nIn']))<1.e-12
assert np.abs(np.sum(e2*self._dgeom['nIn']))<1.e-12
assert np.abs(np.sum(e1*e2))<1.e-12
self._dgeom['e1'] = e1
self._dgeom['e2'] = e2
def get_ind_flatimg(self, direction='flat2img'):
assert direction in ['flat2img','img2flat']
assert self._dgeom['ddetails'] is not None
assert all([ss in self._dgeom['ddetails'].keys()
for ss in ['x12','x1','x2']])
x1b = 0.5*(self._dgeom['ddetails']['x1'][1:]
+ self._dgeom['ddetails']['x1'][:-1])
x2b = 0.5*(self._dgeom['ddetails']['x2'][1:]
+ self._dgeom['ddetails']['x2'][:-1])
ind = np.array([np.digitize(self._dgeom['ddetails']['x12'][0,:], x1b),
np.digitize(self._dgeom['ddetails']['x12'][0,:], x2b)])
if direction == 'flat2img':
indr = np.zeros((self._dgeom['ddetails']['x1'].size,
self._dgeom['ddetails']['x2'].size),dtype=int)
indr[ind[0,:],ind[1,:]] = np.arange(0,self._dgeom['nRays'])
ind = indr
return ind
def get_X12(self, out='imshow'):
if out == 'imshow':
x1, x2 = self._dgeom['x1'], self._dgeom['x2']
dx1, dx2 = 0.5*(x1[1]-x1[0]), 0.5*(x2[1]-x2[0])
extent = (x1[0]-dx1, x1[-1]+dx1, x2[0]-dx2, x2[-1]+dx2)
return x1, x2, extent
# TBF
if self._X12 is None:
Ds = self.D
C = np.mean(Ds,axis=1)
X12 = Ds-C[:,np.newaxis]
X12 = np.array([np.sum(X12*self._dgeom['e1'][:,np.newaxis],axis=0),
np.sum(X12*self._dgeom['e2'][:,np.newaxis],axis=0)])
else:
X12 = self._X12
if X12 is None or out.lower()=='1d':
DX12 = None
else:
x1u, x2u, ind, DX12 = utils.get_X12fromflat(X12)
if out.lower()=='2d':
X12 = [x1u, x2u, ind]
return X12, DX12
"""
lp = [p for p in params.values()]
CamLOS2D.__signature__ = sig.replace(parameters=lp)
""" Return the indices or instances of all LOS matching criteria
The selection can be done according to 2 different mechanisms
Mechanism (1): provide the value (Val) a criterion (Crit) should match
The criteria are typically attributes of :class:`~tofu.pathfile.ID`
(i.e.: name, or user-defined attributes like the camera head...)
Mechanism (2): (used if Val=None)
Provide a str expression (or a list of such) to be fed to eval()
Used to check on quantitative criteria.
- PreExp: placed before the criterion value (e.g.: 'not ' or '<=')
- PostExp: placed after the criterion value
- you can use both
Other parameters are used to specify logical operators for the selection
(match any or all the criterion...) and the type of output.
Parameters
----------
Crit : str
Flag indicating which criterion to use for discrimination
Can be set to:
- any attribute of :class:`~tofu.pathfile.ID`
A str (or list of such) expression to be fed to eval()
Placed after the criterion value
Used for selection mechanism (2)
Log : str
Flag indicating whether the criterion shall match:
- 'all': all provided values
- 'any': at least one of them
InOut : str
Flag indicating whether the returned indices are:
- 'In': the ones matching the criterion
- 'Out': the ones not matching it
Out : type / str
Flag indicating in which form to return the result:
- int: as an array of integer indices
- bool: as an array of boolean indices
- 'Name': as a list of names
- 'LOS': as a list of :class:`~tofu.geom.LOS` instances
Returns
-------
ind : list / np.ndarray
The computed output, of nature defined by parameter Out
Examples
--------
>>> import tofu.geom as tfg
>>> VPoly, VLim = [[0.,1.,1.,0.],[0.,0.,1.,1.]], [-1.,1.]
>>> V = tfg.Ves('ves', VPoly, Lim=VLim, Type='Lin', Exp='Misc', shot=0)
>>> Du1 = ([0.,-0.1,-0.1],[0.,1.,1.])
>>> Du2 = ([0.,-0.1,-0.1],[0.,0.5,1.])
>>> Du3 = ([0.,-0.1,-0.1],[0.,1.,0.5])
>>> l1 = tfg.LOS('l1', Du1, Ves=V, Exp='Misc', Diag='A', shot=0)
>>> l2 = tfg.LOS('l2', Du2, Ves=V, Exp='Misc', Diag='A', shot=1)
>>> l3 = tfg.LOS('l3', Du3, Ves=V, Exp='Misc', Diag='B', shot=1)
>>> gl = tfg.GLOS('gl', [l1,l2,l3])
>>> Arg1 = dict(Val=['l1','l3'],Log='any',Out='LOS')
>>> Arg2 = dict(Val=['l1','l3'],Log='any',InOut='Out',Out=int)
>>> Arg3 = dict(Crit='Diag', Val='A', Out='Name')
>>> Arg4 = dict(Crit='shot', PostExp='>=1')
>>> gl.select(**Arg1)
[l1,l3]
>>> gl.select(**Arg2)
array([1])
>>> gl.select(**Arg3)
['l1','l2']
>>> gl.select(**Arg4)
array([False, True, True], dtype=bool)
"""
|
py | b40eddec7c701c850c066962a1a2c9ff3a392eb5 | import numpy as np
from sklearn.neighbors import DistanceMetric
from geometry import *
def mappings(predicted_x, predicted_y, predicted_primary, reference_z,
event, max_angle=1.0e-2, max_primary=5.0):
predicted = np.ndarray(shape=(predicted_x.shape[0], 3))
predicted[:, 0] = predicted_x
predicted[:, 1] = predicted_y
predicted[:, 2] = reference_z
predicted = predicted / np.sqrt(np.sum(predicted ** 2, axis=1))[:, None]
m = DistanceMetric.get_metric(metric='pyfunc', func=spherical_angle)
d = m.pairwise(predicted, event.tracks)
mapping_matrix = (d <= max_angle) & (np.abs(event.z0 - predicted_primary) < max_primary)[:, None]
test_mapping = np.sum(mapping_matrix, axis=0)
predicted_mapping = np.sum(mapping_matrix, axis=1)
return mapping_matrix, predicted_mapping, test_mapping
# Each true sample maps to closest
test_mapping = np.zeros(shape=(event.tracks.shape[0], ), dtype=int)
predicted_mapping = np.zeros(shape=(predicted.shape[0], ), dtype=int)
test_distance = -np.ones(shape=(event.tracks.shape[0]), dtype='float32')
for i in xrange(event.tracks.shape[0]):
min_d = np.min(d[:, i])(d[:, i] < max_angle) & ()
test_mapping[i] = 1 if np.any(mask) else 0
for i in xrange(predicted.shape[0]):
mask = np.any(d[i, :] < max_angle) and np.abs(predicted_primary[i] - event.z0) > max_primary
predicted_mapping[i] = 1 if mask else 0
return predicted_mapping, test_mapping
def bin_metrics(*args, **kwargs):
matrix, predicted_mapping, test_mapping = mappings(*args, **kwargs)
fn = float(np.sum(test_mapping == 0))
fp = float(np.sum(predicted_mapping == 0))
tp = float(np.sum(test_mapping == 1))
return tp, fp, fn, matrix, predicted_mapping, test_mapping
def binary_metrics(*args, **kwargs):
tp, fp, fn, matrix, predicted_mapping, test_mapping = bin_metrics(*args, **kwargs)
metrics = dict()
metrics['fn'] = fn
metrics['fp'] = fp
metrics['tp'] = tp
metrics['precision'] = tp / (fp + tp)
metrics['recall'] = tp / (tp + fn)
return metrics, matrix, predicted_mapping, test_mapping
def __max_score_mapping(rr, predicted, test, max_angle = 1.0-2):
angle = DistanceMetric.get_metric('pyfunc', func=spherical_angle)
d = angle.pairwise(predicted, test)
# Each true sample maps to closest
test_mapping = np.zeros(shape=(test.shape[0], ), dtype=float)
for i in xrange(test.shape[0]):
if np.any(d[:, i] < max_angle):
close_predictions = d[:, i] < max_angle
scores = [ rr(p) for p in predicted[close_predictions, :] ]
test_mapping[i] = np.max(scores)
return test_mapping
def precision_recall(predicted, event, against = 'true', max_angle = 1.0e-2):
rr, jac, hess = event.get_response()
test = __against(event, against)
scores = np.array([ rr(p) for p in predicted ])
test_score_mapping = __max_score_mapping(rr, predicted, test, max_angle)
predicted_mapping, _ = mappings(predicted, test, max_angle)
tp = np.zeros(shape=scores.shape[0])
fp = np.zeros(shape=scores.shape[0])
fn = np.zeros(shape=scores.shape[0])
for i, s in enumerate(np.sort(scores[:-1])):
tp[i] = np.sum(test_score_mapping > s)
fp[i] = np.sum(predicted_mapping[scores > s] == 0)
fn[i] = np.sum(test_score_mapping <= s)
precision = tp / (tp + fp)
recall = tp / test.shape[0]
print precision
print recall
return np.sort(scores), precision, recall |
py | b40ee056424e65842de3f18b57cc9908875bdbc7 | """
Definition of views.
"""
#holas amigos preparados para la modificacion de un proyecto en tonces empecemos
from django.shortcuts import render
from django.http import HttpRequest
from django.template import RequestContext
from datetime import datetime
def home(request):
"""Renders the home page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/index.html',
context_instance = RequestContext(request,
{
'title':'Home Page',
'year':datetime.now().year,
})
)
def contact(request):
"""Renders the contact page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/contact.html',
context_instance = RequestContext(request,
{
'title':'Contact',
'message':'Your contact page.',
'year':datetime.now().year,
})
)
def about(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/about.html',
context_instance = RequestContext(request,
{
'title':'About',
'message':'Your application description page.',
'year':datetime.now().year,
})
)
|
py | b40ee05e24cb8a0a2a6921152c1dd09234328077 | import unittest
class TestBowling(unittest.TestCase):
def test_hello(self):
self.assertEqual(True, True)
if __name__ == "__main__":
unittest.main() |
py | b40ee079a577a77555888197b34380d7e63acfd3 | # Generated by Django 3.2 on 2022-01-31 14:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='emails',
field=models.JSONField(),
),
migrations.AlterField(
model_name='notification',
name='query',
field=models.JSONField(),
),
]
|
py | b40ee134bb327ad762419bf5cc6df0fea84d311c | import _point
class Point():
def __init__(self, x=None, y=None):
if x:
self.p = _point.lib.get_point(x, y)
else:
self.p = _point.lib.get_default_point()
def __repr__(self):
return '({0}, {1})'.format(self.p.x, self.p.y)
def show_point(self):
_point.lib.show_point(self.p)
def move_point(self):
_point.lib.move_point(self.p)
def move_point_by_ref(self):
ppoint = _point.ffi.new("Point*", self.p)
_point.lib.move_point_by_ref(ppoint)
self.p = ppoint
|
py | b40ee13e747659ecc17576bf4f8c3110c4f0b4dc | from random import randint
class CommonElements:
size = 0
arr1 = []
arr2 = []
def init(self):
print("Size of arrays")
# take input from console
self.size = int(input())
print(self.size)
# Initialize two same size array and asign some random values
self.arr1 = [randint(0,100)for i in range(0,self.size)]
self.arr2 = [randint(0,100)for i in range(0,self.size)]
# Basic Thinking : iterate every element in first array and check in second array if element exists.
def findCommonElementSolution1(self,arr1,arr2):
numberOfCommenElements = 0
for i in range(0,self.size):
for j in range(0,self.size):
if arr1[i]==arr2[j]:
numberOfCommenElements+=1
break
return numberOfCommenElements
obj = CommonElements()
obj.init()
commonElements = obj.findCommonElementSolution1(obj.arr1,obj.arr2)
print("Total comman elements are %d" %(commonElements))
|
py | b40ee1613658c506c250340dad9bc64f70cd8f5e | #!/usr/bin/env python3
"""Plot the live microphone signal(s) with matplotlib.
Matplotlib and NumPy have to be installed.
"""
import argparse
import queue
import sys
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
import math
import sounddevice as sd
from time import sleep
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'channels', type=int, default=[1], nargs='*', metavar='CHANNEL',
help='input channels to plot (default: the first)')
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-w', '--window', type=float, default=200, metavar='DURATION',
help='visible time slot (default: %(default)s ms)')
parser.add_argument(
'-i', '--interval', type=float, default=30,
help='minimum time between plot updates (default: %(default)s ms)')
parser.add_argument(
'-b', '--blocksize', type=int, help='block size (in samples)')
parser.add_argument(
'-r', '--samplerate', type=float, help='sampling rate of audio device')
parser.add_argument(
'-n', '--downsample', type=int, default=10, metavar='N',
help='display every Nth sample (default: %(default)s)')
args = parser.parse_args(remaining)
if any(c < 1 for c in args.channels):
parser.error('argument CHANNEL: must be >= 1')
mapping = [c - 1 for c in args.channels] # Channel numbers start with 1
# YUCKY PARSING ^^^^^
#############################################################################
# COOL PROCESSING
##TODO
##Replace np.abs with normal abs
##Add MIDI Output
##Add Attack detection
##
##
##FUTURE
##Multi-peak detection for polyphony
fs = args.samplerate #yea.h
cp_framesize = int(fs/4) #yea.h
cp_buffer = [0] * cp_framesize
cp_pointer = 0
max_freq = 1200 #yea.h
min_freq = 80 #yea.h
min_time = 1. / max_freq #yea.h
max_time = 1. / min_freq #yea.h
min_index = math.floor(min_time * fs)
max_index = math.ceil(max_time* fs)
midi_note_names = ["LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","LOW","A0","A#0/Bb0","B0","C1","C#1/Db1","D1","D#1/Eb1","E1","F1","F#1/Gb1","G1","G#1/Ab1","A1","A#1/Bb1","B1","C2","C#2/Db2","D2","D#2/Eb2","E2","F2","F#2/Gb2","G2","G#2/Ab2","A2","A#2/Bb2","B2","C3","C#3/Db3","D3","D#3/Eb3","E3","F3","F#3/Gb3","G3","G#3/Ab3","A3","A#3/Bb3","B3","C4 (middle C)","C#4/Db4","D4","D#4/Eb4","E4","F4","F#4/Gb4","G4","G#4/Ab4","A4 concert pitch","A#4/Bb4","B4","C5","C#5/Db5","D5","D#5/Eb5","E5","F5","F#5/Gb5","G5","G#5/Ab5","A5","A#5/Bb5","B5","C6","C#6/Db6","D6","D#6/Eb6","E6","F6","F#6/Gb6","G6","G#6/Ab6","A6","A#6/Bb6","B6","C7","C#7/Db7","D7","D#7/Eb7","E7","F7","F#7/Gb7","G7","G#7/Ab7","A7","A#7/Bb7","B7","C8","C#8/Db8","D8","D#8/Eb8","E8","F8","F#8/Gb8","G8","G#8/Ab8","A8","A#8/Bb8","B8","C9","C#9/Db9","D9","D#9/Eb9","E9","F9","F#9/Gb9","G9"]
major_scale = [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1] #yea.c
last_note = 0
t = [0] * cp_framesize #yea.c unitinitialized array
midi_enabled = [1] * 128
def copy_major_into_enabled(n): #yea.c
global major_scale
global midi_enabled
for i in range(len(major_scale)):
if (n+i >= len(midi_enabled)):
return
elif (n+i < 0):
pass
else:
midi_enabled[n+i] = major_scale[i]
return
def apply_major_scale(n): #been booped
global major_scale
global midi_enabled
assert(n >= 0)
assert(n <= 11)
for i in range(-1, 1 + math.ceil((116-n)/12)):
print(i)
copy_major_into_enabled(n + 12*i)
for i in range(cp_framesize):
t[i] = i / fs #yea.c init_vars check
def argmax(l):
maximum = l[0]
maxindex = 0
for i in range(len(l)):
if l[i] > maximum:
maximum = l[i]
maxindex = i
return maxindex
# https://www.inspiredacoustics.com/en/MIDI_note_numbers_and_center_frequencies
# This function takes an input frequency in Hz in_freq
# and outputs the nearest MIDI note in the following format:
# (midi_freq, midi_n, midi_name) where :
#midi_freq is the frequency of the note in Hz
#midi_n is the MIDI note number
#midi_name is a string with the actual note name, i.e. (F4)
# example: in_freq = 326.0
# return should be (329.62, 64, "E4")
def quantize_frequency_to_MIDI(in_freq): #yessir.c
n = 0
n = in_freq
n = n/440
n = np.log2(n)
n = n * 12
n = n + 69
n = round(n)
n = int(n)
return n
def note_name_from_midi_num(n): #yea.c
return midi_note_names [n]
def copy_into_circular_buffer(data):
global cp_framesize, cp_buffer, cp_pointer
cp_buffer[cp_pointer] = data
cp_pointer = (cp_pointer + 1) % cp_framesize
def audio_callback(indata, frames, time, status):
global last_note
"""This is called (from a separate thread) for each audio block."""
for x in indata:
copy_into_circular_buffer(x[0])
spectrum = np.fft.fft(cp_buffer, n=cp_framesize)
ceps = np.abs(np.fft.ifft(np.log((np.abs(spectrum))**2)))**2
index = argmax(ceps[min_index:max_index]) + min_index
new_note = quantize_frequency_to_MIDI(t[index]**-1)
if (new_note != last_note):
print(note_name_from_midi_num(new_note))
last_note = new_note
if __name__ == "__main__":
stream = sd.InputStream(
device=args.device, channels=max(args.channels),
samplerate=fs, callback=audio_callback)
with stream:
while(True):
sleep(1)
#apply_major_scale(11)
#for i in range(len(midi_note_names)):
# print(midi_note_names[i], midi_enabled[i])
# make a list from 0 to 11 for notes of the scale that have 0 or 1 corresponding to if you want them to be on or not. do for major and minor (and mode?)
|
py | b40ee30b63a688e4422f18d3a214147cea252e38 | import lldb
import optparse
import shlex
import os
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('command script add -f cmd_dump_module.dump_module dump_module')
print('The "dump_module" command has been installed.')
def dump_module(debugger, command, result, internal_dict):
'''
Dump modules.
'''
command_args = shlex.split(command)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
target = debugger.GetSelectedTarget()
if options.all:
for m in target.module_iter():
do_dump_module(target, m, options.output_dir)
elif len(args) > 0:
for e in args:
module = target.FindModule(lldb.SBFileSpec(e))
if not module.IsValid():
result.SetError("Unable to find module '{}', to see list of modules, use 'image list -b'".format(e))
return
do_dump_module(target, module, options.output_dir)
else:
result.SetError('You should either pass in module names or specify `-a` to dump all modules. (Hint: use `image list -b` to get all modules)')
def do_dump_module(target, m, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
filename = '%s__%s' % (m.file.basename, m.uuid)
with open(os.path.join(output_dir, filename), 'w') as f:
f.write('Fullpath: %s\n' % m.file.fullpath)
f.write('Number of sections: %s\n' % m.num_sections)
f.write('Number of symbols: %s\n' % m.num_symbols)
if m.num_symbols > 0:
f.write('Symbols:\n')
for sym in m:
f.write('%s : 0x%x - 0x%x\n' % (sym.name, sym.addr.GetLoadAddress(target), sym.end_addr.GetLoadAddress(target)))
def generate_option_parser():
usage = "usage: %prog [module-name]..."
parser = optparse.OptionParser(usage=usage, prog='dump_module')
parser.add_option('-a', '--all',
action='store_true',
default=False,
dest='all',
help='Dump all modules, one module per file, with module name and uuid as filename')
parser.add_option('-d', '--output-dir',
dest='output_dir',
default='modules',
help='The directory to output files, default is `modules`')
return parser
|
py | b40ee379fdf1d640f62069b014e60821bf385b00 | from unittest import TestCase
from booker import Booker
from searcher import Searcher
from exceptions import *
class TestBooker(TestCase):
def setUp(self):
self.booker = Booker()
self.searcher = Searcher()
self.token = self.searcher.search('13/04/2018', 'BCN', 'DUB')
def test_book_valid(self):
self.assertIsInstance(self.booker.book(self.token), str)
def test_book_bad_token(self):
with self.assertRaises(InvalidBookingTokenError):
self.assertIsInstance(self.booker.book('dsadsadsa'), str)
|
py | b40ee40dd5a72ce0e1ec7aa6660d1ea66128845b | from maza.core.exploit import *
# hack to import from directory/filename starting with a number
FTPDefault = utils.import_exploit("maza.modules.creds.generic.ftp_default")
class Exploit(FTPDefault):
__info__ = {
"name": "2Wire Router Default FTP Creds",
"description": "Module performs dictionary attack against 2Wire Router FTP service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"devices": (
"2Wire Router",
),
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(21, "Target FTP port")
threads = OptInteger(1, "Number of threads")
defaults = OptWordlist("admin:admin", "User:Pass or file with default credentials (file://)")
|
py | b40ee522704ce1d5660e84be59d5cd9cecf023bc | from __future__ import print_function, absolute_import
from copy import deepcopy
from collections import OrderedDict
from fontMath.mathFunctions import (
add, addPt, div, divPt, mul, mulPt, _roundNumber, sub, subPt)
from fontMath.mathGuideline import (
_compressGuideline, _expandGuideline, _pairGuidelines,
_processMathOneGuidelines, _processMathTwoGuidelines, _roundGuidelines)
from fontTools.pens.pointPen import AbstractPointPen
# ------------------
# UFO 3 branch notes
# ------------------
#
# to do:
# X components
# X identifiers
# X contours
# X identifiers
# X points
# X identifiers
# X guidelines
# X height
# X image
#
# - is there any cruft that can be removed?
# X why is divPt here? move all of those to the math functions
# - FilterRedundantPointPen._flushContour is a mess
# X for the pt math functions, always send (x, y) factors instead
# of coercing within the function. the coercion can happen at
# the beginning of the _processMathTwo method.
# - try list comprehensions in the point math for speed
#
# Questionable stuff:
# X is getRef needed?
# X nothing is ever set to _structure. should it be?
# X should the compatibilty be a function or pen?
# X the lib import is shallow and modifications to
# lower level objects (ie dict) could modify the
# original object. this probably isn't desirable.
# deepcopy won't work here since it will try to
# maintain the original class. may need to write
# a custom copier. or maybe something like this
# would be sufficient:
# self.lib = deepcopy(dict(glyph.lib))
# the class would be maintained for everything but
# the top level. that shouldn't matter for the
# purposes here.
# - __cmp__ is dubious but harmless i suppose.
# X is generationCount needed?
# X can box become bounds? have both?
try:
basestring, xrange
range = xrange
except NameError:
basestring = str
class MathGlyph(object):
"""
A very shallow glyph object for rapid math operations.
Notes about glyph math:
- absolute contour compatibility is required
- absolute component, anchor, guideline and image compatibility is NOT required.
in cases of incompatibility in this data, only compatible data is processed and
returned. becuase of this, anchors and components may not be returned in the
same order as the original.
"""
def __init__(self, glyph, scaleComponentTransform=True):
"""Initialize a new MathGlyph object.
Args:
glyph: Input defcon or defcon-like Glyph object to copy from. Set to None to
to make an empty MathGlyph.
scaleComponentTransform (bool): when performing multiplication or division, by
default all elements of a component's affine transformation matrix are
multiplied by the given scalar. If scaleComponentTransform is False, then
only the component's xOffset and yOffset attributes are scaled, whereas the
xScale, xyScale, yxScale and yScale attributes are kept unchanged.
"""
self.scaleComponentTransform = scaleComponentTransform
self.contours = []
self.components = []
if glyph is None:
self.anchors = []
self.guidelines = []
self.image = _expandImage(None)
self.lib = {}
self.name = None
self.unicodes = None
self.width = None
self.height = None
self.note = None
else:
p = MathGlyphPen(self)
glyph.drawPoints(p)
self.anchors = [dict(anchor) for anchor in glyph.anchors]
self.guidelines = [_expandGuideline(guideline) for guideline in glyph.guidelines]
self.image = _expandImage(glyph.image)
self.lib = deepcopy(dict(glyph.lib))
self.name = glyph.name
self.unicodes = list(glyph.unicodes)
self.width = glyph.width
self.height = glyph.height
self.note = glyph.note
def __eq__(self, other):
try:
return all(getattr(self, attr) == getattr(other, attr)
for attr in ("name", "unicodes", "width", "height",
"note", "lib", "contours", "components",
"anchors", "guidelines", "image"))
except AttributeError:
return NotImplemented
def __ne__(self, other):
return not self == other
# ----
# Copy
# ----
def copy(self):
"""return a new MathGlyph containing all data in self"""
return MathGlyph(self)
def copyWithoutMathSubObjects(self):
"""
return a new MathGlyph containing all data except:
contours
components
anchors
guidelines
this is used mainly for internal glyph math.
"""
n = MathGlyph(None)
n.name = self.name
if self.unicodes is not None:
n.unicodes = list(self.unicodes)
n.width = self.width
n.height = self.height
n.note = self.note
n.lib = deepcopy(dict(self.lib))
return n
# ----
# Math
# ----
# math with other glyph
def __add__(self, otherGlyph):
copiedGlyph = self.copyWithoutMathSubObjects()
self._processMathOne(copiedGlyph, otherGlyph, addPt, add)
return copiedGlyph
def __sub__(self, otherGlyph):
copiedGlyph = self.copyWithoutMathSubObjects()
self._processMathOne(copiedGlyph, otherGlyph, subPt, sub)
return copiedGlyph
def _processMathOne(self, copiedGlyph, otherGlyph, ptFunc, func):
# width
copiedGlyph.width = func(self.width, otherGlyph.width)
# height
copiedGlyph.height = func(self.height, otherGlyph.height)
# contours
copiedGlyph.contours = []
if self.contours:
copiedGlyph.contours = _processMathOneContours(self.contours, otherGlyph.contours, ptFunc)
# components
copiedGlyph.components = []
if self.components:
componentPairs = _pairComponents(self.components, otherGlyph.components)
copiedGlyph.components = _processMathOneComponents(componentPairs, ptFunc)
# anchors
copiedGlyph.anchors = []
if self.anchors:
anchorTree1 = _anchorTree(self.anchors)
anchorTree2 = _anchorTree(otherGlyph.anchors)
anchorPairs = _pairAnchors(anchorTree1, anchorTree2)
copiedGlyph.anchors = _processMathOneAnchors(anchorPairs, ptFunc)
# guidelines
copiedGlyph.guidelines = []
if self.guidelines:
guidelinePairs = _pairGuidelines(self.guidelines, otherGlyph.guidelines)
copiedGlyph.guidelines = _processMathOneGuidelines(guidelinePairs, ptFunc, func)
# image
copiedGlyph.image = _expandImage(None)
imagePair = _pairImages(self.image, otherGlyph.image)
if imagePair:
copiedGlyph.image = _processMathOneImage(imagePair, ptFunc)
# math with factor
def __mul__(self, factor):
if not isinstance(factor, tuple):
factor = (factor, factor)
copiedGlyph = self.copyWithoutMathSubObjects()
self._processMathTwo(copiedGlyph, factor, mulPt, mul)
return copiedGlyph
__rmul__ = __mul__
def __div__(self, factor):
if not isinstance(factor, tuple):
factor = (factor, factor)
copiedGlyph = self.copyWithoutMathSubObjects()
self._processMathTwo(copiedGlyph, factor, divPt, div)
return copiedGlyph
__truediv__ = __div__
__rdiv__ = __div__
__rtruediv__ = __rdiv__
def _processMathTwo(self, copiedGlyph, factor, ptFunc, func):
# width
copiedGlyph.width = func(self.width, factor[0])
# height
copiedGlyph.height = func(self.height, factor[1])
# contours
copiedGlyph.contours = []
if self.contours:
copiedGlyph.contours = _processMathTwoContours(self.contours, factor, ptFunc)
# components
copiedGlyph.components = []
if self.components:
copiedGlyph.components = _processMathTwoComponents(
self.components, factor, ptFunc, scaleComponentTransform=self.scaleComponentTransform
)
# anchors
copiedGlyph.anchors = []
if self.anchors:
copiedGlyph.anchors = _processMathTwoAnchors(self.anchors, factor, ptFunc)
# guidelines
copiedGlyph.guidelines = []
if self.guidelines:
copiedGlyph.guidelines = _processMathTwoGuidelines(self.guidelines, factor, func)
# image
if self.image:
copiedGlyph.image = _processMathTwoImage(self.image, factor, ptFunc)
# -------
# Additional math
# -------
def round(self, digits=None):
"""round the geometry."""
copiedGlyph = self.copyWithoutMathSubObjects()
# misc
copiedGlyph.width = _roundNumber(self.width, digits)
copiedGlyph.height = _roundNumber(self.height, digits)
# contours
copiedGlyph.contours = []
if self.contours:
copiedGlyph.contours = _roundContours(self.contours, digits)
# components
copiedGlyph.components = []
if self.components:
copiedGlyph.components = _roundComponents(self.components, digits)
# guidelines
copiedGlyph.guidelines = []
if self.guidelines:
copiedGlyph.guidelines = _roundGuidelines(self.guidelines, digits)
# anchors
copiedGlyph.anchors = []
if self.anchors:
copiedGlyph.anchors = _roundAnchors(self.anchors, digits)
# image
copiedGlyph.image = None
if self.image:
copiedGlyph.image = _roundImage(self.image, digits)
return copiedGlyph
# -------
# Pen API
# -------
def getPointPen(self):
"""get a point pen for drawing to this object"""
return MathGlyphPen(self)
def drawPoints(self, pointPen, filterRedundantPoints=False):
"""draw self using pointPen"""
if filterRedundantPoints:
pointPen = FilterRedundantPointPen(pointPen)
for contour in self.contours:
pointPen.beginPath(identifier=contour["identifier"])
for segmentType, pt, smooth, name, identifier in contour["points"]:
pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier)
pointPen.endPath()
for component in self.components:
pointPen.addComponent(component["baseGlyph"], component["transformation"], identifier=component["identifier"])
def draw(self, pen, filterRedundantPoints=False):
"""draw self using pen"""
from fontTools.pens.pointPen import PointToSegmentPen
pointPen = PointToSegmentPen(pen)
self.drawPoints(pointPen, filterRedundantPoints=filterRedundantPoints)
# ----------
# Extraction
# ----------
def extractGlyph(self, glyph, pointPen=None, onlyGeometry=False):
"""
"rehydrate" to a glyph. this requires
a glyph as an argument. if a point pen other
than the type of pen returned by glyph.getPointPen()
is required for drawing, send this the needed point pen.
"""
if pointPen is None:
pointPen = glyph.getPointPen()
glyph.clearContours()
glyph.clearComponents()
glyph.clearAnchors()
glyph.clearGuidelines()
glyph.lib.clear()
cleanerPen = FilterRedundantPointPen(pointPen)
self.drawPoints(cleanerPen)
glyph.anchors = [dict(anchor) for anchor in self.anchors]
glyph.guidelines = [_compressGuideline(guideline) for guideline in self.guidelines]
glyph.image = _compressImage(self.image)
glyph.lib = deepcopy(dict(self.lib))
glyph.width = self.width
glyph.height = self.height
glyph.note = self.note
if not onlyGeometry:
glyph.name = self.name
glyph.unicodes = list(self.unicodes)
return glyph
# ----------
# Point Pens
# ----------
class MathGlyphPen(AbstractPointPen):
"""
Point pen for building MathGlyph data structures.
"""
def __init__(self, glyph=None):
if glyph is None:
self.contours = []
self.components = []
else:
self.contours = glyph.contours
self.components = glyph.components
self._contourIdentifier = None
self._points = []
def _flushContour(self):
"""
This normalizes the contour so that:
- there are no line segments. in their place will be
curve segments with the off curves positioned on top
of the previous on curve and the new curve on curve.
- the contour starts with an on curve
"""
self.contours.append(
dict(identifier=self._contourIdentifier, points=[])
)
contourPoints = self.contours[-1]["points"]
points = self._points
# move offcurves at the beginning of the contour to the end
haveOnCurve = False
for point in points:
if point[0] is not None:
haveOnCurve = True
break
if haveOnCurve:
while 1:
if points[0][0] is None:
point = points.pop(0)
points.append(point)
else:
break
# convert lines to curves
holdingOffCurves = []
for index, point in enumerate(points):
segmentType = point[0]
if segmentType == "line":
pt, smooth, name, identifier = point[1:]
prevPt = points[index - 1][1]
if index == 0:
holdingOffCurves.append((None, prevPt, False, None, None))
holdingOffCurves.append((None, pt, False, None, None))
else:
contourPoints.append((None, prevPt, False, None, None))
contourPoints.append((None, pt, False, None, None))
contourPoints.append(("curve", pt, smooth, name, identifier))
else:
contourPoints.append(point)
contourPoints.extend(holdingOffCurves)
def beginPath(self, identifier=None):
self._contourIdentifier = identifier
self._points = []
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
self._points.append((segmentType, pt, smooth, name, identifier))
def endPath(self):
self._flushContour()
def addComponent(self, baseGlyph, transformation, identifier=None, **kwargs):
self.components.append(dict(baseGlyph=baseGlyph, transformation=transformation, identifier=identifier))
class FilterRedundantPointPen(AbstractPointPen):
def __init__(self, anotherPointPen):
self._pen = anotherPointPen
self._points = []
def _flushContour(self):
points = self._points
prevOnCurve = None
offCurves = []
pointsToDraw = []
# deal with the first point
pt, segmentType, smooth, name, identifier = points[0]
# if it is an offcurve, add it to the offcurve list
if segmentType is None:
offCurves.append((pt, segmentType, smooth, name, identifier))
else:
# potential redundancy
if segmentType == "curve":
# gather preceding off curves
testOffCurves = []
lastPoint = None
for i in range(len(points)):
i = -i - 1
testPoint = points[i]
testSegmentType = testPoint[1]
if testSegmentType is not None:
lastPoint = testPoint[0]
break
testOffCurves.append(testPoint[0])
# if two offcurves exist we can test for redundancy
if len(testOffCurves) == 2:
if testOffCurves[1] == lastPoint and testOffCurves[0] == pt:
segmentType = "line"
# remove the last two points
points = points[:-2]
# add the point to the contour
pointsToDraw.append((pt, segmentType, smooth, name, identifier))
prevOnCurve = pt
for pt, segmentType, smooth, name, identifier in points[1:]:
# store offcurves
if segmentType is None:
offCurves.append((pt, segmentType, smooth, name, identifier))
continue
# curves are a potential redundancy
elif segmentType == "curve":
if len(offCurves) == 2:
# test for redundancy
if offCurves[0][0] == prevOnCurve and offCurves[1][0] == pt:
offCurves = []
segmentType = "line"
# add all offcurves
for offCurve in offCurves:
pointsToDraw.append(offCurve)
# add the on curve
pointsToDraw.append((pt, segmentType, smooth, name, identifier))
# reset the stored data
prevOnCurve = pt
offCurves = []
# catch any remaining offcurves
if len(offCurves) != 0:
for offCurve in offCurves:
pointsToDraw.append(offCurve)
# draw to the pen
for pt, segmentType, smooth, name, identifier in pointsToDraw:
self._pen.addPoint(pt, segmentType, smooth=smooth, name=name, identifier=identifier)
def beginPath(self, identifier=None, **kwargs):
self._points = []
self._pen.beginPath(identifier=identifier)
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
self._points.append((pt, segmentType, smooth, name, identifier))
def endPath(self):
self._flushContour()
self._pen.endPath()
def addComponent(self, baseGlyph, transformation, identifier=None, **kwargs):
self._pen.addComponent(baseGlyph, transformation, identifier)
# -------
# Support
# -------
# contours
def _processMathOneContours(contours1, contours2, func):
result = []
for index, contour1 in enumerate(contours1):
contourIdentifier = contour1["identifier"]
points1 = contour1["points"]
points2 = contours2[index]["points"]
resultPoints = []
for index, point in enumerate(points1):
segmentType, pt1, smooth, name, identifier = point
pt2 = points2[index][1]
pt = func(pt1, pt2)
resultPoints.append((segmentType, pt, smooth, name, identifier))
result.append(dict(identifier=contourIdentifier, points=resultPoints))
return result
def _processMathTwoContours(contours, factor, func):
result = []
for contour in contours:
contourIdentifier = contour["identifier"]
points = contour["points"]
resultPoints = []
for point in points:
segmentType, pt, smooth, name, identifier = point
pt = func(pt, factor)
resultPoints.append((segmentType, pt, smooth, name, identifier))
result.append(dict(identifier=contourIdentifier, points=resultPoints))
return result
# anchors
def _anchorTree(anchors):
tree = OrderedDict()
for anchor in anchors:
x = anchor["x"]
y = anchor["y"]
name = anchor.get("name")
identifier = anchor.get("identifier")
color = anchor.get("color")
if name not in tree:
tree[name] = []
tree[name].append((identifier, x, y, color))
return tree
def _pairAnchors(anchorDict1, anchorDict2):
"""
Anchors are paired using the following rules:
Matching Identifiers
--------------------
>>> anchors1 = {
... "test" : [
... (None, 1, 2, None),
... ("identifier 1", 3, 4, None)
... ]
... }
>>> anchors2 = {
... "test" : [
... ("identifier 1", 1, 2, None),
... (None, 3, 4, None)
... ]
... }
>>> expected = [
... (
... dict(name="test", identifier=None, x=1, y=2, color=None),
... dict(name="test", identifier=None, x=3, y=4, color=None)
... ),
... (
... dict(name="test", identifier="identifier 1", x=3, y=4, color=None),
... dict(name="test", identifier="identifier 1", x=1, y=2, color=None)
... )
... ]
>>> _pairAnchors(anchors1, anchors2) == expected
True
Mismatched Identifiers
----------------------
>>> anchors1 = {
... "test" : [
... ("identifier 1", 3, 4, None)
... ]
... }
>>> anchors2 = {
... "test" : [
... ("identifier 2", 1, 2, None),
... ]
... }
>>> expected = [
... (
... dict(name="test", identifier="identifier 1", x=3, y=4, color=None),
... dict(name="test", identifier="identifier 2", x=1, y=2, color=None)
... )
... ]
>>> _pairAnchors(anchors1, anchors2) == expected
True
"""
pairs = []
for name, anchors1 in anchorDict1.items():
if name not in anchorDict2:
continue
anchors2 = anchorDict2[name]
# align with matching identifiers
removeFromAnchors1 = []
for anchor1 in anchors1:
match = None
identifier = anchor1[0]
for anchor2 in anchors2:
if anchor2[0] == identifier:
match = anchor2
break
if match is not None:
anchor2 = match
anchors2.remove(anchor2)
removeFromAnchors1.append(anchor1)
a1 = dict(name=name, identifier=identifier)
a1["x"], a1["y"], a1["color"] = anchor1[1:]
a2 = dict(name=name, identifier=identifier)
a2["x"], a2["y"], a2["color"] = anchor2[1:]
pairs.append((a1, a2))
for anchor1 in removeFromAnchors1:
anchors1.remove(anchor1)
if not anchors1 or not anchors2:
continue
# align by index
while 1:
anchor1 = anchors1.pop(0)
anchor2 = anchors2.pop(0)
a1 = dict(name=name)
a1["identifier"], a1["x"], a1["y"], a1["color"] = anchor1
a2 = dict(name=name, identifier=identifier)
a2["identifier"], a2["x"], a2["y"], a2["color"] = anchor2
pairs.append((a1, a2))
if not anchors1:
break
if not anchors2:
break
return pairs
def _processMathOneAnchors(anchorPairs, func):
result = []
for anchor1, anchor2 in anchorPairs:
anchor = dict(anchor1)
pt1 = (anchor1["x"], anchor1["y"])
pt2 = (anchor2["x"], anchor2["y"])
anchor["x"], anchor["y"] = func(pt1, pt2)
result.append(anchor)
return result
def _processMathTwoAnchors(anchors, factor, func):
result = []
for anchor in anchors:
anchor = dict(anchor)
pt = (anchor["x"], anchor["y"])
anchor["x"], anchor["y"] = func(pt, factor)
result.append(anchor)
return result
# components
def _pairComponents(components1, components2):
components1 = list(components1)
components2 = list(components2)
pairs = []
# align with matching identifiers
removeFromComponents1 = []
for component1 in components1:
baseGlyph = component1["baseGlyph"]
identifier = component1["identifier"]
match = None
for component2 in components2:
if component2["baseGlyph"] == baseGlyph and component2["identifier"] == identifier:
match = component2
break
if match is not None:
component2 = match
removeFromComponents1.append(component1)
components2.remove(component2)
pairs.append((component1, component2))
for component1 in removeFromComponents1:
components1.remove(component1)
# align with index
for component1 in components1:
baseGlyph = component1["baseGlyph"]
for component2 in components2:
if component2["baseGlyph"] == baseGlyph:
components2.remove(component2)
pairs.append((component1, component2))
break
return pairs
def _processMathOneComponents(componentPairs, func):
result = []
for component1, component2 in componentPairs:
component = dict(component1)
component["transformation"] = _processMathOneTransformation(component1["transformation"], component2["transformation"], func)
result.append(component)
return result
def _processMathTwoComponents(components, factor, func, scaleComponentTransform=True):
result = []
for component in components:
component = dict(component)
component["transformation"] = _processMathTwoTransformation(
component["transformation"], factor, func, doScale=scaleComponentTransform
)
result.append(component)
return result
# image
_imageTransformationKeys = "xScale xyScale yxScale yScale xOffset yOffset".split(" ")
_defaultImageTransformation = (1, 0, 0, 1, 0, 0)
_defaultImageTransformationDict = {}
for key, value in zip(_imageTransformationKeys, _defaultImageTransformation):
_defaultImageTransformationDict[key] = value
def _expandImage(image):
if image is None:
fileName = None
transformation = _defaultImageTransformation
color = None
else:
if hasattr(image, "naked"):
image = image.naked()
fileName = image["fileName"]
color = image.get("color")
transformation = tuple([
image.get(key, _defaultImageTransformationDict[key])
for key in _imageTransformationKeys
])
return dict(fileName=fileName, transformation=transformation, color=color)
def _compressImage(image):
fileName = image["fileName"]
transformation = image["transformation"]
color = image["color"]
if fileName is None:
return
image = dict(fileName=fileName, color=color)
for index, key in enumerate(_imageTransformationKeys):
image[key] = transformation[index]
return image
def _pairImages(image1, image2):
if image1["fileName"] != image2["fileName"]:
return ()
return (image1, image2)
def _processMathOneImage(imagePair, func):
image1, image2 = imagePair
fileName = image1["fileName"]
color = image1["color"]
transformation = _processMathOneTransformation(image1["transformation"], image2["transformation"], func)
return dict(fileName=fileName, transformation=transformation, color=color)
def _processMathTwoImage(image, factor, func):
fileName = image["fileName"]
color = image["color"]
transformation = _processMathTwoTransformation(image["transformation"], factor, func)
return dict(fileName=fileName, transformation=transformation, color=color)
# transformations
def _processMathOneTransformation(transformation1, transformation2, func):
xScale1, xyScale1, yxScale1, yScale1, xOffset1, yOffset1 = transformation1
xScale2, xyScale2, yxScale2, yScale2, xOffset2, yOffset2 = transformation2
xScale, yScale = func((xScale1, yScale1), (xScale2, yScale2))
xyScale, yxScale = func((xyScale1, yxScale1), (xyScale2, yxScale2))
xOffset, yOffset = func((xOffset1, yOffset1), (xOffset2, yOffset2))
return (xScale, xyScale, yxScale, yScale, xOffset, yOffset)
def _processMathTwoTransformation(transformation, factor, func, doScale=True):
xScale, xyScale, yxScale, yScale, xOffset, yOffset = transformation
if doScale:
xScale, yScale = func((xScale, yScale), factor)
xyScale, yxScale = func((xyScale, yxScale), factor)
xOffset, yOffset = func((xOffset, yOffset), factor)
return (xScale, xyScale, yxScale, yScale, xOffset, yOffset)
# rounding
def _roundContours(contours, digits=None):
results = []
for contour in contours:
contour = dict(contour)
roundedPoints = []
for segmentType, pt, smooth, name, identifier in contour["points"]:
roundedPt = (_roundNumber(pt[0],digits), _roundNumber(pt[1],digits))
roundedPoints.append((segmentType, roundedPt, smooth, name, identifier))
contour["points"] = roundedPoints
results.append(contour)
return results
def _roundTransformation(transformation, digits=None):
xScale, xyScale, yxScale, yScale, xOffset, yOffset = transformation
return (xScale, xyScale, yxScale, yScale, _roundNumber(xOffset, digits), _roundNumber(yOffset, digits))
def _roundImage(image, digits=None):
image = dict(image)
fileName = image["fileName"]
color = image["color"]
transformation = _roundTransformation(image["transformation"], digits)
return dict(fileName=fileName, transformation=transformation, color=color)
def _roundComponents(components, digits=None):
result = []
for component in components:
component = dict(component)
component["transformation"] = _roundTransformation(component["transformation"], digits)
result.append(component)
return result
def _roundAnchors(anchors, digits=None):
result = []
for anchor in anchors:
anchor = dict(anchor)
anchor["x"], anchor["y"] = _roundNumber(anchor["x"], digits), _roundNumber(anchor["y"], digits)
result.append(anchor)
return result
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)
|
py | b40ee544f737835fe1aa71d33f5b66c27a22fee1 | import re
import socket
import ssl
import logging
from urllib.parse import urlparse
from http import HTTPStatus
import requests
from rssant_common import _proxy_helper
from rssant_common.dns_service import (
DNSService, DNS_SERVICE,
PrivateAddressError,
NameNotResolvedError,
)
from rssant_common.requests_helper import requests_check_incomplete_response
from .response import FeedResponse, FeedResponseStatus
from .response_builder import FeedResponseBuilder
from .useragent import DEFAULT_USER_AGENT
from . import cacert
LOG = logging.getLogger(__name__)
class FeedReaderError(Exception):
"""FeedReaderError"""
status = None
class ContentTooLargeError(FeedReaderError):
"""Content too large"""
status = FeedResponseStatus.CONTENT_TOO_LARGE_ERROR.value
class ContentTypeNotSupportError(FeedReaderError):
"""ContentTypeNotSupportError"""
status = FeedResponseStatus.CONTENT_TYPE_NOT_SUPPORT_ERROR.value
class RSSProxyError(FeedReaderError):
"""RSSProxyError"""
status = FeedResponseStatus.RSS_PROXY_ERROR.value
RE_WEBPAGE_CONTENT_TYPE = re.compile(
r'(text/html|application/xml|text/xml|text/plain|application/json|'
r'application/.*xml|application/.*json|text/.*xml)', re.I)
RE_WEBPAGE_EXT = re.compile(
r'(html|xml|json|txt|opml|rss|feed|atom)', re.I)
RE_URL_EXT_SEP = re.compile(r'[./]')
def _get_url_ext(url: str):
"""
>>> _get_url_ext('http://example.com/blog/feed')
'feed'
>>> _get_url_ext('http://example.com/blog/feed.xml')
'xml'
>>> no_error = _get_url_ext('http://example.com')
"""
try:
url_path = urlparse(url).path.strip('/')
except ValueError:
return ''
parts = RE_URL_EXT_SEP.split(url_path[::-1], 1)
if len(parts) > 0:
return parts[0][::-1]
return ''
def is_webpage(content_type, url=None):
"""
>>> is_webpage(' text/HTML ')
True
>>> is_webpage('application/rss+xml; charset=utf-8')
True
>>> is_webpage('application/atom+json')
True
>>> is_webpage('image/jpeg')
False
>>> is_webpage('')
True
>>> is_webpage('application/octet-stream', 'https://www.example.com/feed.XML?q=1')
True
>>> is_webpage('application/octet-stream', 'https://www.example.com/feed')
True
"""
if content_type:
content_type = content_type.split(';', maxsplit=1)[0].strip()
if bool(RE_WEBPAGE_CONTENT_TYPE.fullmatch(content_type)):
return True
# for most of compatibility
if not content_type:
return True
# feed use may 'application/octet-stream', check url ext for the case
# eg: https://blog.racket-lang.org/
if url:
url_ext = _get_url_ext(url)
if url_ext:
if bool(RE_WEBPAGE_EXT.fullmatch(url_ext.lstrip('.'))):
return True
return False
def is_ok_status(status):
return status and 200 <= status <= 299
class FeedReader:
def __init__(
self,
session=None,
user_agent=DEFAULT_USER_AGENT,
request_timeout=30,
max_content_length=10 * 1024 * 1024,
allow_non_webpage=False,
proxy_url=None,
rss_proxy_url=None,
rss_proxy_token=None,
dns_service: DNSService = DNS_SERVICE,
):
if session is None:
session = requests.session()
if dns_service:
session.mount('http://', dns_service.requests_http_adapter())
session.mount('https://', dns_service.requests_http_adapter())
self._close_session = True
else:
self._close_session = False
self.session = session
self.user_agent = user_agent
self.request_timeout = request_timeout
self.max_content_length = max_content_length
self.allow_non_webpage = allow_non_webpage
self.proxy_url = proxy_url
self.rss_proxy_url = rss_proxy_url
self.rss_proxy_token = rss_proxy_token
self._use_rss_proxy = self._choice_proxy()
self.dns_service = dns_service
self._cacert = cacert.where()
@property
def has_proxy(self):
return bool(self.rss_proxy_url or self.proxy_url)
def _choice_proxy(self) -> bool:
return _proxy_helper.choice_proxy(
proxy_url=self.proxy_url, rss_proxy_url=self.rss_proxy_url)
def check_content_type(self, response):
if self.allow_non_webpage:
return
if not is_ok_status(response.status_code):
return
content_type = response.headers.get('content-type')
if not is_webpage(content_type, str(response.url)):
raise ContentTypeNotSupportError(
f'content-type {content_type!r} not support')
def _read_content(self, response: requests.Response):
content_length = response.headers.get('Content-Length')
if content_length:
content_length = int(content_length)
if content_length > self.max_content_length:
msg = 'content length {} larger than limit {}'.format(
content_length, self.max_content_length)
raise ContentTooLargeError(msg)
content_length = 0
content = bytearray()
for data in response.iter_content(chunk_size=64 * 1024):
content_length += len(data)
if content_length > self.max_content_length:
msg = 'content length larger than limit {}'.format(
self.max_content_length)
raise ContentTooLargeError(msg)
content.extend(data)
requests_check_incomplete_response(response)
return content
def _decode_content(self, content: bytes):
if not content:
return ''
return content.decode('utf-8', errors='ignore')
def _prepare_headers(self, url, etag=None, last_modified=None):
headers = {}
if callable(self.user_agent):
headers['User-Agent'] = self.user_agent(url)
else:
headers['User-Agent'] = self.user_agent
headers['Accept-Encoding'] = 'gzip, deflate'
if etag:
headers["ETag"] = etag
if last_modified:
headers["If-Modified-Since"] = last_modified
return headers
def _send_request(self, request, ignore_content, proxies=None):
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
response = self.session.send(
request,
verify=self._cacert,
timeout=self.request_timeout,
stream=True,
proxies=proxies,
)
try:
if not is_ok_status(response.status_code):
content = self._read_content(response)
return response, content
self.check_content_type(response)
content = None
if not ignore_content:
content = self._read_content(response)
finally:
# Fix: Requests memory leak
# https://github.com/psf/requests/issues/4601
response.close()
return response, content
def _read(self, url, etag=None, last_modified=None, ignore_content=False, proxies=None):
headers = self._prepare_headers(url, etag=etag, last_modified=last_modified)
req = requests.Request('GET', url, headers=headers)
prepared = self.session.prepare_request(req)
response, content = self._send_request(
prepared, ignore_content=ignore_content, proxies=proxies)
return response.headers, content, response.url, response.status_code
def _read_by_rss_proxy(self, url, etag=None, last_modified=None, ignore_content=False):
headers = self._prepare_headers(url, etag=etag, last_modified=last_modified)
data = dict(
url=url,
token=self.rss_proxy_token,
headers=headers,
)
req = requests.Request('POST', self.rss_proxy_url, json=data)
prepared = self.session.prepare_request(req)
response, content = self._send_request(prepared, ignore_content=ignore_content)
if not is_ok_status(response.status_code):
message = 'status={} body={!r}'.format(
response.status_code, self._decode_content(content))
raise RSSProxyError(message)
proxy_status = response.headers.get('x-rss-proxy-status', None)
if proxy_status and proxy_status.upper() == 'ERROR':
message = 'status={} body={!r}'.format(
response.status_code, self._decode_content(content))
raise RSSProxyError(message)
proxy_status = int(proxy_status) if proxy_status else HTTPStatus.OK.value
return response.headers, content, url, proxy_status
def _read_by_proxy(self, url, *args, **kwargs):
if self._use_rss_proxy:
if not self.rss_proxy_url:
raise ValueError("rss_proxy_url not provided")
return self._read_by_rss_proxy(url, *args, **kwargs)
else:
if not self.proxy_url:
raise ValueError("proxy_url not provided")
proxies = {'http': self.proxy_url, 'https': self.proxy_url}
return self._read(url, *args, **kwargs, proxies=proxies)
def read(self, url, *args, use_proxy=False, **kwargs) -> FeedResponse:
headers = content = None
try:
if use_proxy:
headers, content, url, status = self._read_by_proxy(url, *args, **kwargs)
else:
headers, content, url, status = self._read(url, *args, **kwargs)
except (socket.gaierror, NameNotResolvedError):
status = FeedResponseStatus.DNS_ERROR.value
except requests.exceptions.ReadTimeout:
status = FeedResponseStatus.READ_TIMEOUT.value
except (socket.timeout, TimeoutError, requests.exceptions.Timeout,
requests.exceptions.ConnectTimeout):
status = FeedResponseStatus.CONNECTION_TIMEOUT.value
except (ssl.SSLError, ssl.CertificateError, requests.exceptions.SSLError):
status = FeedResponseStatus.SSL_ERROR.value
except requests.exceptions.ProxyError:
status = FeedResponseStatus.PROXY_ERROR.value
except (ConnectionError, requests.exceptions.ConnectionError):
status = FeedResponseStatus.CONNECTION_RESET.value
except requests.exceptions.TooManyRedirects:
status = FeedResponseStatus.TOO_MANY_REDIRECT_ERROR.value
except requests.exceptions.ChunkedEncodingError:
status = FeedResponseStatus.CHUNKED_ENCODING_ERROR.value
except requests.exceptions.ContentDecodingError:
status = FeedResponseStatus.CONTENT_DECODING_ERROR.value
except UnicodeDecodeError:
status = FeedResponseStatus.CONTENT_DECODING_ERROR.value
except PrivateAddressError:
status = FeedResponseStatus.PRIVATE_ADDRESS_ERROR.value
except FeedReaderError as ex:
status = ex.status
LOG.warning(type(ex).__name__ + " url=%s %s", url, ex)
except (requests.HTTPError, requests.RequestException) as ex:
if ex.response is not None:
status = ex.response.status_code
else:
status = FeedResponseStatus.UNKNOWN_ERROR.value
builder = FeedResponseBuilder(use_proxy=use_proxy)
builder.url(url)
builder.status(status)
builder.content(content)
builder.headers(headers)
return builder.build()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if self._close_session:
self.session.close()
|
py | b40ee58700b020894071bc629669dbdb9418e443 | import sys
import sqlite3
import math
from utils import *
def main():
#connect to database
if len(sys.argv)<5:
print ("vs_query.py [index location] [k] [scores(Y/N)] [term_1] [term_2] ...")
return
else:#init
index_dir = str(sys.argv[1])
scores = sys.argv[3]
k = sys.argv[2]
raw_terms= sys.argv[4:]
#connect database
try:
conn = sqlite3.connect(index_dir+'/table.db')
except sqlite3.OperationalError:
print ("database error")
return
for i in range(len(raw_terms)):
raw_terms[i]=clean_word(raw_terms[i])
#make the terms list unique
terms = delete_dup(raw_terms)
#tf-idf scheme for the term weights
#N(# of documents in the collection):
c = conn.cursor()#get all IDs
c.execute("SELECT DISTINCT movie_id FROM searchIndex;")
total = len(c.fetchall())
#calculate idf weight for each term
test_documents=[]#this contains all the documents that contains any terms
term_idf = {}
for term in terms:
#get idf
c= conn.cursor()
c.execute("SELECT DISTINCT movie_id FROM searchIndex WHERE word='"+term+"';")
df = c.fetchall()#contains all the movie_id with term
idf = 0
if len(df)!= 0:
idf = math.log10(total/float(len(df)))
if idf >= 0.05:#if the term is too common just ignore it
term_idf[term]=idf
test_documents = test_documents+(df)
#take all the duplicate out
test_documents = delete_dup(test_documents)
#calculate the td-idf weight for the query
query_tfidf={}
for term in terms:
if term in term_idf:
query_tf = 0
for raw_term in raw_terms:
if term == raw_term:
query_tf += 1
query_tf = 1+math.log10(query_tf)
query_tfidf[term] = query_tf*term_idf[term]
#create dictionary with{'id':[score]}
weight_output = {}
length_output = {}
for movie_id in test_documents:
document_score = 0.0
length = 0.0
for term in term_idf:
c=conn.cursor()
c.execute("SELECT position FROM searchIndex WHERE word='"+term+"' AND movie_id ="+str(movie_id[0])+";")
#there should be only one line(unique constraint)
position_output = c.fetchone()
if position_output is not None:#make sure there are things coming out
list_pos = len(position_output[0].split(","))#turn position into a list,length is the word frequency
tfidf_td = (1+math.log10(list_pos))*term_idf[term]#get weight t,d
length+= tfidf_td #track length
tfidf_final = tfidf_td * query_tfidf[term]#get cosine score without normalization
#calculate the weight and add it to the list
document_score += tfidf_final
weight_output[str(movie_id[0])]=document_score
length_output[str(movie_id[0])]=math.sqrt(length)
#normalization
#calculate length
for movie_id in test_documents:
weight_output[str(movie_id[0])] = weight_output[str(movie_id[0])]/length_output[str(movie_id[0])]
#get K biggest
K_biggest = sorted(weight_output,key=weight_output.get,reverse=True)[:int(k)]
#output
for i in range(int(k)):
if scores.upper()=='Y':
try:
print(K_biggest[i]+"\t"+str(weight_output[K_biggest[i]]))
except:
print("-")
else:
try:
print(K_biggest[i])
except:
print("-")
if len(K_biggest)== 0 :
print("no output(search terms too general or K=0)")
main()
|
py | b40ee7161b874ec33ebf182632979c02412f202a | """Provide the Removal Reason class."""
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union
from warnings import warn
from ...const import API_PATH
from ...exceptions import ClientException
from ...util.cache import cachedproperty
from .base import RedditBase
if TYPE_CHECKING: # pragma: no cover
from .... import praw
class RemovalReason(RedditBase):
"""An individual Removal Reason object.
**Typical Attributes**
This table describes attributes that typically belong to objects of this class.
Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a guarantee that
these attributes will always be present, nor is this list necessarily complete.
=========== ==================================
Attribute Description
=========== ==================================
``id`` The id of the removal reason.
``message`` The message of the removal reason.
``title`` The title of the removal reason.
=========== ==================================
"""
STR_FIELD = "id"
@staticmethod
def _warn_reason_id(reason_id_value: Optional[str], id_value: Optional[str]):
"""Reason id param is deprecated. Warns if it's used.
:param reason_id_value: The value passed as parameter ``reason_id``.
:param id_value: Returns the actual value of parameter ``id`` is parameter
``reason_id`` is not used.
"""
if reason_id_value is not None:
warn(
"Parameter ``reason_id`` is deprecated. Either use positional"
' arguments (reason_id="x" -> "x") or change the parameter '
'name to ``id`` (reason_id="x" -> id="x"). The parameter will'
" be removed in PRAW 8.",
category=DeprecationWarning,
stacklevel=3,
)
return reason_id_value
return id_value
def __eq__(self, other: Union[str, "RemovalReason"]) -> bool:
"""Return whether the other instance equals the current."""
if isinstance(other, str):
return other == str(self)
return isinstance(other, self.__class__) and str(self) == str(other)
def __hash__(self) -> int:
"""Return the hash of the current instance."""
return hash(self.__class__.__name__) ^ hash(str(self))
def __init__(
self,
reddit: "praw.Reddit",
subreddit: "praw.models.Subreddit",
id: Optional[str] = None, # pylint: disable=redefined-builtin
reason_id: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Construct an instance of the Removal Reason object.
:param reddit: An instance of :class:`.Reddit`.
:param subreddit: An instance of :class:`.Subreddit`.
:param id: The id of the removal reason.
:param reason_id: (Deprecated) The original name of the ``id`` parameter. Used
for backwards compatibility. This parameter should not be used.
"""
id = self._warn_reason_id(reason_id, id)
if (id, _data).count(None) != 1:
raise ValueError("Either id or _data needs to be given.")
if id:
self.id = id
self.subreddit = subreddit
super().__init__(reddit, _data=_data)
def _fetch(self):
for removal_reason in self.subreddit.mod.removal_reasons:
if removal_reason.id == self.id:
self.__dict__.update(removal_reason.__dict__)
self._fetched = True
return
raise ClientException(
f"Subreddit {self.subreddit} does not have the removal reason {self.id}"
)
def delete(self):
"""Delete a removal reason from this subreddit.
To delete ``"141vv5c16py7d"`` from the subreddit ``"NAME"`` try:
.. code-block:: python
reddit.subreddit("NAME").mod.removal_reasons["141vv5c16py7d"].delete()
"""
url = API_PATH["removal_reason"].format(subreddit=self.subreddit, id=self.id)
self._reddit.delete(url)
def update(self, message: Optional[str] = None, title: Optional[str] = None):
"""Update the removal reason from this subreddit.
.. note::
Existing values will be used for any unspecified arguments.
:param message: The removal reason's new message.
:param title: The removal reason's new title.
To update ``"141vv5c16py7d"`` from the subreddit ``"NAME"`` try:
.. code-block:: python
reddit.subreddit("NAME").mod.removal_reasons["141vv5c16py7d"].update(
message="New message", title="New title"
)
"""
url = API_PATH["removal_reason"].format(subreddit=self.subreddit, id=self.id)
data = {
name: getattr(self, name) if value is None else value
for name, value in {"message": message, "title": title}.items()
}
self._reddit.put(url, data=data)
class SubredditRemovalReasons:
"""Provide a set of functions to a Subreddit's removal reasons."""
def __getitem__(self, reason_id: Union[str, int, slice]) -> RemovalReason:
"""Return the Removal Reason with the ID/number/slice ``reason_id``.
:param reason_id: The ID or index of the removal reason
.. note::
Removal reasons fetched using a specific rule name are lazily loaded, so you
might have to access an attribute to get all of the expected attributes.
This method is to be used to fetch a specific removal reason, like so:
.. code-block:: python
reason_id = "141vv5c16py7d"
reason = reddit.subreddit("NAME").mod.removal_reasons[reason_id]
print(reason)
You can also use indices to get a numbered removal reason. Since Python uses
0-indexing, the first removal reason is index 0, and so on.
.. note::
Both negative indices and slices can be used to interact with the removal
reasons.
:raises: :py:class:`IndexError` if a removal reason of a specific number does
not exist.
For example, to get the second removal reason of the subreddit ``"NAME"``:
.. code-block:: python
reason = reddit.subreddit("NAME").mod.removal_reasons[1]
To get the last three removal reasons in a subreddit:
.. code-block:: python
reasons = reddit.subreddit("NAME").mod.removal_reasons[-3:]
for reason in reasons:
print(reason)
"""
if not isinstance(reason_id, str):
return self._removal_reason_list[reason_id]
return RemovalReason(self._reddit, self.subreddit, reason_id)
def __init__(self, subreddit: "praw.models.Subreddit"):
"""Create a SubredditRemovalReasons instance.
:param subreddit: The subreddit whose removal reasons to work with.
"""
self.subreddit = subreddit
self._reddit = subreddit._reddit
def __iter__(self) -> Iterator[RemovalReason]:
"""Return a list of Removal Reasons for the subreddit.
This method is used to discover all removal reasons for a subreddit:
.. code-block:: python
for removal_reason in reddit.subreddit("NAME").mod.removal_reasons:
print(removal_reason)
"""
return iter(self._removal_reason_list)
@cachedproperty
def _removal_reason_list(self) -> List[RemovalReason]:
"""Get a list of Removal Reason objects.
:returns: A list of instances of :class:`.RemovalReason`.
"""
response = self._reddit.get(
API_PATH["removal_reasons_list"].format(subreddit=self.subreddit)
)
return [
RemovalReason(self._reddit, self.subreddit, _data=reason_data)
for id, reason_data in response["data"].items()
]
def add(self, message: str, title: str) -> RemovalReason:
"""Add a removal reason to this subreddit.
:param message: The message associated with the removal reason.
:param title: The title of the removal reason
:returns: The RemovalReason added.
The message will be prepended with `Hi u/username,` automatically.
To add ``"Test"`` to the subreddit ``"NAME"`` try:
.. code-block:: python
reddit.subreddit("NAME").mod.removal_reasons.add(message="Foobar", title="Test")
"""
data = {"message": message, "title": title}
url = API_PATH["removal_reasons_list"].format(subreddit=self.subreddit)
id = self._reddit.post(url, data=data)
return RemovalReason(self._reddit, self.subreddit, id)
|
py | b40ee792d43a339eebdbd110f90e7d6364367a80 | pkgname = "gst-libav"
pkgver = "1.20.1"
pkgrel = 0
build_style = "meson"
configure_args = ["-Ddefault_library=shared"]
hostmakedepends = ["meson", "pkgconf"]
makedepends = [
"gstreamer-devel", "gst-plugins-base-devel", "orc-devel", "ffmpeg-devel"
]
depends = ["orc", f"gst-plugins-base~{pkgver}"]
pkgdesc = "GStreamer FFmpeg plugin"
maintainer = "q66 <[email protected]>"
license = "LGPL-2.1-or-later"
url = "https://gstreamer.freedesktop.org"
source = f"{url}/src/{pkgname}/{pkgname}-{pkgver}.tar.xz"
sha256 = "91a71fb633b75e1bd52e22a457845cb0ba563a2972ba5954ec88448f443a9fc7"
|
py | b40ee7a109fa4fac7845a59a5d465ceadd979fa9 | class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
letter_dict = dict()
for word in strs:
key = sorted(word)
key = "".join(key)
if letter_dict.get(key, None) is None:
letter_dict[key] = []
letter_dict[key].append(word)
return [word for word in letter_dict.values()]
|
py | b40ee8d836b5a21d3161bcb56d03c7f90107868c | import time
from behave import given, when, then
from selenium.webdriver.common.by import By
from helpers import confirm_table_row_count, confirm_span_exists
@given("I navigate to the category list page")
@when("I navigate to the category list page")
def _(context):
url = context.flask_runner.make_url("categories/list")
context.browser.get(url)
assert "Categories" in context.browser.title
@when("I fill in the category details")
def _(context):
# Having clicked, we need to sleep this thread to allow the server round trip to refresh the page.
# WebDriverWait won't work in this context
time.sleep(1)
row = context.table.rows[0]
context.browser.find_element(By.NAME, "name").send_keys(row["Category"])
@then("There will be {number} categories in the category list")
@then("There will be {number} category in the category list")
def _(context, number):
confirm_table_row_count(context, number, 1)
@then("The category list will be empty")
def _(context):
confirm_span_exists(context, "There are no categories in the database", 1)
|
py | b40ee9047195767539fe6b988c0e5f6735d245a7 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class AzureFirewallNetworkRuleCollection(SubResource):
"""Network rule collection resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param priority: Priority of the network rule collection resource.
:type priority: int
:param action: The action type of a rule collection.
:type action: ~azurefirewall.models.AzureFirewallRCAction
:param rules: Collection of rules used by a network rule collection.
:type rules: list[~azurefirewall.models.AzureFirewallNetworkRule]
:ivar provisioning_state: The provisioning state of the network rule
collection resource. Possible values include: 'Succeeded', 'Updating',
'Deleting', 'Failed'
:vartype provisioning_state: str or
~azurefirewall.models.ProvisioningState
:param name: The name of the resource that is unique within the Azure
firewall. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'priority': {'maximum': 65000, 'minimum': 100},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'action': {'key': 'properties.action', 'type': 'AzureFirewallRCAction'},
'rules': {'key': 'properties.rules', 'type': '[AzureFirewallNetworkRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, priority: int=None, action=None, rules=None, name: str=None, **kwargs) -> None:
super(AzureFirewallNetworkRuleCollection, self).__init__(id=id, **kwargs)
self.priority = priority
self.action = action
self.rules = rules
self.provisioning_state = None
self.name = name
self.etag = None
|
py | b40ee9cabbeaf0d6cbd5f7f3de131430f025fde2 | # -*- coding: utf-8 -*-
'''
salting.py module of salt specific interfaces to raet
'''
# pylint: skip-file
# pylint: disable=W0611
# Import Python libs
# Import ioflo libs
from ioflo.base.odicting import odict
from ioflo.base.consoling import getConsole
console = getConsole()
from raet import raeting, nacling, keeping
from raet.road.keeping import RoadKeep
from salt.key import RaetKey
class SaltSafe(object):
'''
Interface between Salt Key management and RAET keep key management
'''
LocalFields = ['sighex', 'prihex']
RemoteFields = ['eid', 'name', 'acceptance', 'verhex', 'pubhex']
def __init__(self, opts, **kwa):
'''
Setup SaltSafe instance
'''
self.auto = opts['auto_accept']
self.dirpath = opts['pki_dir']
self.saltRaetKey = RaetKey(opts)
def verifyLocalData(self, data):
'''
Returns True if the fields in .LocalFields match the fields in data
'''
return (set(self.LocalFields) == set(data.keys()))
def dumpLocalData(self, data):
'''
Dump the key data from the local estate
'''
self.saltRaetKey.write_local(data['prihex'], data['sighex'])
def loadLocalData(self):
'''
Load and Return the data from the local estate
'''
data = self.saltRaetKey.read_local()
if not data:
return None
return (odict(sighex=data['sign'], prihex=data['priv']))
def clearLocalData(self):
'''
Load and Return the data from the local estate
'''
pass
def verifyRemoteData(self, data):
'''
Returns True if the fields in .RemoteFields match the fields in data
'''
return (set(self.RemoteFields) == set(data.keys()))
def dumpRemoteData(self, data, uid):
'''
Dump the data from the remote estate given by uid
'''
self.saltRaetKey.status(data['name'],
data['eid'],
data['pubhex'],
data['verhex'])
def loadAllRemoteData(self):
'''
Load and Return the data from the all the remote estate files
'''
data = odict()
for status, mids in self.saltRaetKey.list_keys().items():
for mid in mids:
keydata = self.saltRaetKey.read_remote(mid, status)
if keydata:
rdata = odict()
rdata['eid'] = keydata['device_id']
rdata['name'] = keydata['minion_id']
rdata['acceptance'] = raeting.ACCEPTANCES[status]
rdata['verhex'] = keydata['verify']
rdata['pubhex'] = keydata['pub']
data[str(rdata['eid'])] = rdata
return data
def clearAllRemoteData(self):
'''
Remove all the remote estate files
'''
self.saltRaetKey.delete_all()
def dumpLocal(self, local):
'''
Dump the key data from the local estate
'''
data = odict([
('sighex', local.signer.keyhex),
('prihex', local.priver.keyhex),
])
if self.verifyLocalData(data):
self.dumpLocalData(data)
def dumpRemote(self, remote):
'''
Dump the data from the remote estate by calling status on it which
will persist the data
'''
data = odict([
('eid', remote.eid),
('name', remote.name),
('acceptance', remote.acceptance),
('verhex', remote.verfer.keyhex),
('pubhex', remote.pubber.keyhex),
])
if self.verifyRemoteData(data):
self.dumpRemoteData(data, remote.uid)
def loadRemote(self, remote):
'''
Load and Return the data from the remote estate file
Override this in sub class to change uid
'''
status='accepted'
mid = remote.name
keydata = self.saltRaetKey.read_remote(mid, status)
if not keydata:
return None
data = odict()
data['eid'] = keydata['device_id']
data['name'] = keydata['minion_id']
data['acceptance'] = raeting.ACCEPTANCES[status]
data['verhex'] = keydata['verify']
data['pubhex'] = keydata['pub']
return data
def clearRemote(self, remote):
'''
Clear the remote estate file
Override this in sub class to change uid
'''
mid = remote.eid
self.saltRaetKey.delete_key(mid)
def statusRemote(self, remote, verhex, pubhex, main=True):
'''
Evaluate acceptance status of remote estate per its keys
persist key data differentially based on status
'''
status = raeting.ACCEPTANCES[self.saltRaetKey.status(remote.name,
remote.eid,
pubhex,
verhex)]
if status != raeting.acceptances.rejected:
if (verhex and verhex != remote.verfer.keyhex):
remote.verfer = nacling.Verifier(verhex)
if (pubhex and pubhex != remote.pubber.keyhex):
remote.pubber = nacling.Publican(pubhex)
remote.acceptance = status
return status
def rejectRemote(self, remote):
'''
Set acceptance status to rejected
'''
remote.acceptance = raeting.acceptances.rejected
mid = remote.name
self.saltRaetKey.reject(match=mid, include_accepted=True)
def pendRemote(self, remote):
'''
Set acceptance status to pending
'''
pass
def acceptRemote(self, remote):
'''
Set acceptance status to accepted
'''
remote.acceptance = raeting.acceptances.accepted
mid = remote.name
self.saltRaetKey.accept(match=mid, include_rejected=True)
def clearAllKeepSafe(dirpath, opts):
'''
Convenience function to clear all road and safe keep data in dirpath
'''
road = RoadKeep(dirpath=dirpath)
road.clearLocalData()
road.clearAllRemoteData()
safe = SaltSafe(opts=opts)
safe.clearLocalData()
safe.clearAllRemoteData()
|
py | b40eeb9b245e9ad02789d903e73b861bb156107f | #!/usr/bin/env python3
#
# C19Web.py
#
# C19Web is a web application written in Python and using
# Streamlit as the presentation method and Streamlit Share
# make it generally available.
#
# The structure of this program has all the Streamlit code
# in the main program because of Streamlit requirements.
#
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dateutil import parser
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import streamlit as st
import urllib
test_variable = 'Can i see you?'
#-------------------------------------------------------------------------
# Contains a collection or group of countries in one plot
#-------------------------------------------------------------------------
class Countries():
def __init__(self, groupName, countryList = []):
self.groupName = groupName
self.countryList = countryList
#-------------------------------------------------------------------------
# Country class contains all the input data required to produce a plot
#-------------------------------------------------------------------------
class Country():
def __init__(self, name):
self.name = name
|
py | b40eee03ab85c8390c1fa5a5759cf3bfcce63b52 | import re
from enum import Enum
minute_re = re.compile("{0}|{1}|{2}|{3}|{4}".format("(?P<all>\\*)",
"(?P<specific>[0-5]?\\d)",
"(?P<range>[0-5]?\\d-[0-5]?\\d)",
"(?P<list>[0-5]?\\d(,[0-5]?\\d)+)",
"(?P<step>(\\*|[0-5]?\\d)/(([0-5]?[1-9])|([1-5]0)))"))
hour_re = re.compile("{0}|{1}|{2}|{3}|{4}".format("(?P<all>\\*)",
"(?P<specific>[01]?\\d|2[0-3])",
"(?P<range>([01]?\\d|2[0-3])-([01]?\\d|2[0-3]))",
"(?P<list>([01]?\\d|2[0-3])(,([01]?\\d|2[0-3]))+)",
"(?P<step>(\\*|[01]?\\d|2[0-3])/([01]?[1-9]|2[0-3]|10))"))
day_of_month_re = re.compile("{0}|{1}|{2}|{3}|{4}".
format("(?P<all>\\*)",
"(?P<specific>[1-2]?[1-9]|[1-3]0|31)",
"(?P<range>([1-2]?[1-9]|[1-3]0|31)-([1-2]?[1-9]|[1-3]0|31))",
"(?P<list>([1-2]?[1-9]|[1-3]0|31)(,([1-2]?[1-9]|[1-3]0|31))+)",
"(?P<step>(\\*|[1-2]?[1-9]|[1-3]0|31)/([1-2]?[1-9]|[1-3]0|31))"))
month_re = re.compile("{0}|{1}|{2}|{3}|{4}".format("(?P<all>\\*)",
"(?P<specific>[1-9]|1[0-2])",
"(?P<range>([1-9]|1[0-2])-([1-9]|1[0-2]))",
"(?P<list>([1-9]|1[0-2])(,([1-9]|1[0-2]))+)",
"(?P<step>(\\*|[1-9]|1[0-2])/([1-9]|1[0-2]))"))
day_of_week_re = re.compile("{0}|{1}|{2}|{3}|{4}".format("(?P<all>\\*)",
"(?P<specific>[0-6])",
"(?P<range>[0-6]-[0-6])",
"(?P<list>[0-6](,[0-6])+)",
"(?P<step>(\\*|[0-6])/[1-6])"))
regex_list = [
minute_re,
hour_re,
day_of_month_re,
month_re,
day_of_week_re
]
class ElementPart(Enum):
PART_MINUTE = 1
PART_HOUR = 2
PART_DAY_OF_MONTH = 3
PART_MONTH = 4
PART_DAY_OF_WEEK = 5
class ElementKind(Enum):
GROUP_TYPE_ALL = 1
GROUP_TYPE_SPECIFIC = 2
GROUP_TYPE_RANGE = 3
GROUP_TYPE_LIST = 4
GROUP_TYPE_STEP = 5
class Element(object):
kind = None
max_value_map = {
ElementPart.PART_MINUTE: 59,
ElementPart.PART_HOUR: 23,
ElementPart.PART_DAY_OF_MONTH: 31,
ElementPart.PART_MONTH: 11,
ElementPart.PART_DAY_OF_WEEK: 6
}
def __init__(self, part):
"""
:param ElementPart part:
"""
self.part = part
def _get_value(self, dt):
"""
:param datetime.datetime dt:
:return:
"""
maps = {
ElementPart.PART_MINUTE: 'minute',
ElementPart.PART_HOUR: 'hour',
ElementPart.PART_DAY_OF_MONTH: 'day',
ElementPart.PART_MONTH: 'month',
}
attribute = maps.get(self.part)
if attribute:
return dt.__getattribute__(attribute)
return self._convert_weekday(dt.weekday())
def match(self, dt):
"""
:param datetime.datetime dt:
:return:
"""
raise NotImplementedError()
@staticmethod
def _convert_weekday(weekday):
""" converts the weekday from starting from a week starting from Monday to a week starting from Sunday
For the official crontab documentation (https://man7.org/linux/man-pages/man5/crontab.5.html (2020-09-20)) it
can be seen that their week starts on Sunday, which means SUN = 0, MON = 1, ..., SAT = 6. However, for the
package dateutil, which performs the actual scheduling, the week starts on a Monday, which means MON = 1,
TUE = 2, ..., SUN = 6. Since this package shall imitate the real cron syntax to avoid further confusion, the
weekday is converted to a week where SUN = 0. Nb. the official cron documentation states that 7 shall also be a
valid input and be corresponding to SUN leading to a week where MON = 1, TUE = 2, ..., SUN = 7. This method
respects that, however the regex only allows the maximal input of 6.
:param weekday: integer representing weekday, assuming MON = 1, TUE = 2, ..., SUN = 6
:return: integer representing passed weekday, however the week starts on Sunday meaning SUN = 0, MON = 1, ...
"""
if weekday <= 5:
weekday_week_starting_sunday = weekday + 1
else:
weekday_week_starting_sunday = 0
return weekday_week_starting_sunday
class MatchAllElement(Element):
kind = ElementKind.GROUP_TYPE_ALL
def __init__(self, part, body):
super().__init__(part)
if body != '*':
raise ValueError('MatchAllElement only allow *')
def match(self, dt):
return True
class MatchSpecificElement(Element):
kind = ElementKind.GROUP_TYPE_SPECIFIC
def __init__(self, part, body):
super().__init__(part)
self.value = int(body)
def match(self, dt):
if self._get_value(dt) == self.value:
return True
return False
class MatchListElement(Element):
kind = ElementKind.GROUP_TYPE_LIST
def __init__(self, part, body):
super().__init__(part)
possible_values = body.split(',')
self.values = set()
for value in possible_values:
self.values.add(int(value))
def match(self, dt):
if self._get_value(dt) in self.values:
return True
return False
class MatchRangeElement(Element):
kind = ElementKind.GROUP_TYPE_RANGE
def __init__(self, part, body):
super().__init__(part)
ranges = body.split('-')
from_value = int(ranges[0])
to_value = int(ranges[1])
self.values = set()
if from_value <= to_value:
for i in range(from_value, to_value + 1):
self.values.add(i)
else:
for i in range(from_value, self.max_value_map[self.part] + 1):
self.values.add(i)
for i in range(0, to_value + 1):
self.values.add(i)
def match(self, dt):
if self._get_value(dt) in self.values:
return True
return False
class MatchStepElement(Element):
kind = ElementKind.GROUP_TYPE_STEP
def __init__(self, part, body):
super().__init__(part)
step_parts = body.split('/')
if step_parts[0] == '*':
from_value = 0
else:
from_value = int(step_parts[0])
step_value = int(step_parts[1])
self.values = set()
for i in range(from_value, self.max_value_map[self.part] + 1, step_value):
self.values.add(i)
def match(self, dt):
if self._get_value(dt) in self.values:
return True
return False
element_kind_map = {
'all': MatchAllElement,
'specific': MatchSpecificElement,
'range': MatchRangeElement,
'list': MatchListElement,
'step': MatchStepElement
}
|
py | b40eee7cab30515034bf27a72b86cd6aad2aae0c | """The tests for the Template automation."""
from datetime import timedelta
from unittest import mock
from unittest.mock import patch
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.template import trigger as template_trigger
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context, callback
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_fire_time_changed,
async_mock_service,
mock_component,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
hass.states.async_set("test.entity", "hello")
async def test_if_fires_on_change_bool(hass, calls):
"""Test for firing on boolean change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ states.test.entity.state == "world" and true }}',
},
"action": {"service": "test.automation"},
}
},
)
assert len(calls) == 0
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set("test.entity", "planet")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_str(hass, calls):
"""Test for firing on change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ states.test.entity.state == "world" and "true" }}',
},
"action": {"service": "test.automation"},
}
},
)
assert len(calls) == 0
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_str_crazy(hass, calls):
"""Test for firing on change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ states.test.entity.state == "world" and "TrUE" }}',
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_when_true_at_setup(hass, calls):
"""Test for not firing during startup."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
},
"action": {"service": "test.automation"},
}
},
)
assert len(calls) == 0
hass.states.async_set("test.entity", "hello", force_update=True)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_when_true_at_setup_variables(hass, calls):
"""Test for not firing during startup + trigger_variables."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"entity": "test.entity"},
"trigger": {
"platform": "template",
"value_template": '{{ is_state(entity|default("test.entity2"), "hello") }}',
},
"action": {"service": "test.automation"},
}
},
)
assert len(calls) == 0
# Assert that the trigger doesn't fire immediately when it's setup
# If trigger_variable 'entity' is not passed to initial check at setup, the
# trigger will immediately fire
hass.states.async_set("test.entity", "hello", force_update=True)
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set("test.entity", "goodbye", force_update=True)
await hass.async_block_till_done()
assert len(calls) == 0
# Assert that the trigger fires after state change
# If trigger_variable 'entity' is not passed to the template trigger, the
# trigger will never fire because it falls back to 'test.entity2'
hass.states.async_set("test.entity", "hello", force_update=True)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_because_fail(hass, calls):
"""Test for not firing after TemplateError."""
hass.states.async_set("test.number", "1")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ 84 / states.test.number.state|int == 42 }}",
},
"action": {"service": "test.automation"},
}
},
)
assert len(calls) == 0
hass.states.async_set("test.number", "2")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.number", "0")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.number", "2")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_on_change_bool(hass, calls):
"""Test for not firing on boolean change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ states.test.entity.state == "world" and false }}',
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_on_change_str(hass, calls):
"""Test for not firing on string change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "template", "value_template": "true"},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_on_change_str_crazy(hass, calls):
"""Test for not firing on string change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ "Anything other than true is false." }}',
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_no_change(hass, calls):
"""Test for firing on no change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "template", "value_template": "{{ true }}"},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
cur_len = len(calls)
hass.states.async_set("test.entity", "hello")
await hass.async_block_till_done()
assert cur_len == len(calls)
async def test_if_fires_on_two_change(hass, calls):
"""Test for firing on two changes."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ states.test.entity.state == 'world' }}",
},
"action": {"service": "test.automation"},
}
},
)
# Trigger once
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
# Trigger again
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_template(hass, calls):
"""Test for firing on change with template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ is_state("test.entity", "world") }}',
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_on_change_with_template(hass, calls):
"""Test for not firing on change with template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ is_state("test.entity", "hello") }}',
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_change_with_template_advanced(hass, calls):
"""Test for firing on change with template advanced."""
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ is_state("test.entity", "world") }}',
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world", context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert "template - test.entity - hello - world - None" == calls[0].data["some"]
async def test_if_fires_on_no_change_with_template_advanced(hass, calls):
"""Test for firing on no change with template advanced."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": """{%- if is_state("test.entity", "world") -%}
true
{%- else -%}
false
{%- endif -%}""",
},
"action": {"service": "test.automation"},
}
},
)
# Different state
hass.states.async_set("test.entity", "worldz")
await hass.async_block_till_done()
assert len(calls) == 0
# Different state
hass.states.async_set("test.entity", "hello")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_change_with_template_2(hass, calls):
"""Test for firing on change with template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ not is_state("test.entity", "world") }}',
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set("test.entity", "home")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity", "work")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity", "not_home")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity", "home")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action(hass, calls):
"""Test for firing if action."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": [
{
"condition": "template",
"value_template": '{{ is_state("test.entity", "world") }}',
}
],
"action": {"service": "test.automation"},
}
},
)
# Condition is not true yet
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# Change condition to true, but it shouldn't be triggered yet
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
# Condition is true and event is triggered
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_bad_template(hass, calls):
"""Test for firing on change with bad template."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "template", "value_template": "{{ "},
"action": {"service": "test.automation"},
}
},
)
async def test_if_fires_on_change_with_bad_template_2(hass, calls):
"""Test for firing on change with bad template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ xyz | round(0) }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_wait_template_with_trigger(hass, calls):
"""Test using wait template with 'trigger.entity_id'."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ states.test.entity.state == 'world' }}",
},
"action": [
{"event": "test_event"},
{"wait_template": "{{ is_state(trigger.entity_id, 'hello') }}"},
{
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
],
}
},
)
await hass.async_block_till_done()
@callback
def event_handler(event):
hass.states.async_set("test.entity", "hello")
hass.bus.async_listen_once("test_event", event_handler)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "template - test.entity - hello - world - None"
async def test_if_fires_on_change_with_for(hass, calls):
"""Test for firing on change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_for_advanced(hass, calls):
"""Test for firing on change with for advanced."""
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ is_state("test.entity", "world") }}',
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world", context=context)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert "template - test.entity - hello - world - 0:00:05" == calls[0].data["some"]
async def test_if_fires_on_change_with_for_0(hass, calls):
"""Test for firing on change with for: 0."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": {"seconds": 0},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_for_0_advanced(hass, calls):
"""Test for firing on change with for: 0 advanced."""
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": '{{ is_state("test.entity", "world") }}',
"for": {"seconds": 0},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world", context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert calls[0].data["some"] == "template - test.entity - hello - world - 0:00:00"
async def test_if_fires_on_change_with_for_2(hass, calls):
"""Test for firing on change with for."""
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": 5,
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
},
)
hass.states.async_set("test.entity", "world", context=context)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert calls[0].data["some"] == "template - test.entity - hello - world - 0:00:05"
async def test_if_not_fires_on_change_with_for(hass, calls):
"""Test for firing on change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=4))
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set("test.entity", "hello")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=6))
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_when_turned_off_with_for(hass, calls):
"""Test for firing on change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=4))
await hass.async_block_till_done()
assert len(calls) == 0
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=6))
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_change_with_for_template_1(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": {"seconds": "{{ 5 }}"},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_for_template_2(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": "{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_for_template_3(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": "00:00:{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_invalid_for_template_1(hass, calls):
"""Test for invalid for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ is_state('test.entity', 'world') }}",
"for": {"seconds": "{{ five }}"},
},
"action": {"service": "test.automation"},
}
},
)
with mock.patch.object(template_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert mock_logger.error.called
async def test_if_fires_on_time_change(hass, calls):
"""Test for firing on time changes."""
start_time = dt_util.utcnow() + timedelta(hours=24)
time_that_will_not_match_right_away = start_time.replace(minute=1, second=0)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "template",
"value_template": "{{ utcnow().minute % 2 == 0 }}",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
assert len(calls) == 0
# Trigger once (match template)
first_time = start_time.replace(minute=2, second=0)
with patch("homeassistant.util.dt.utcnow", return_value=first_time):
async_fire_time_changed(hass, first_time)
await hass.async_block_till_done()
assert len(calls) == 1
# Trigger again (match template)
second_time = start_time.replace(minute=4, second=0)
with patch("homeassistant.util.dt.utcnow", return_value=second_time):
async_fire_time_changed(hass, second_time)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(calls) == 1
# Trigger again (do not match template)
third_time = start_time.replace(minute=5, second=0)
with patch("homeassistant.util.dt.utcnow", return_value=third_time):
async_fire_time_changed(hass, third_time)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(calls) == 1
# Trigger again (match template)
forth_time = start_time.replace(minute=8, second=0)
with patch("homeassistant.util.dt.utcnow", return_value=forth_time):
async_fire_time_changed(hass, forth_time)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(calls) == 2
|
py | b40eef6988c680f5a817670036098ca7556a3872 | import logging
from pathlib import Path
import pandas as pd
from pandas import DataFrame
log = logging.getLogger(__name__)
class PandasJsonDataSet:
def __init__(self, filepath: Path) -> None:
self.filepath = filepath
@staticmethod
def _check_file_exist(filepath: Path) -> None:
if not filepath.is_file():
raise ValueError(f"{filepath} is not a file")
def load(self) -> DataFrame: # type: ignore
self._check_file_exist(filepath=self.filepath)
return self._load(filepath=self.filepath)
@staticmethod
def _load(filepath: Path) -> DataFrame: # type: ignore
with open(filepath, "r") as f:
df = pd.read_json(f)
log.info(f"Loaded a {type(df)} object from {filepath}")
return df
def save(self, df: DataFrame) -> None: # type: ignore
self._save(filepath=self.filepath, df=df)
@staticmethod
def _save(filepath: Path, df: DataFrame) -> None: # type: ignore
df.to_json(filepath)
log.info(f"Saved a {type(df)} object to {filepath}")
|
py | b40eef81b6cf54b9dc6c33f063e8dd4c3d24bb22 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RankingRankingItem(Model):
"""Defines a search result item to display.
Variables are only populated by the server, and will be ignored when
sending a request.
:param answer_type: The answer that contains the item to display. Use the
type to find the answer in the SearchResponse object. The type is the name
of a SearchResponse field. Possible values include: 'WebPages', 'Images',
'SpellSuggestions', 'News', 'RelatedSearches', 'Videos', 'Computation',
'TimeZone'. Default value: "WebPages" .
:type answer_type: str or
~azure.cognitiveservices.search.websearch.models.AnswerType
:ivar result_index: A zero-based index of the item in the answer.If the
item does not include this field, display all items in the answer. For
example, display all news articles in the News answer.
:vartype result_index: int
:ivar value: The ID that identifies either an answer to display or an item
of an answer to display. If the ID identifies an answer, display all items
of the answer.
:vartype value:
~azure.cognitiveservices.search.websearch.models.Identifiable
:ivar html_index:
:vartype html_index: int
:ivar textual_index:
:vartype textual_index: int
:ivar screenshot_index:
:vartype screenshot_index: int
"""
_validation = {
'answer_type': {'required': True},
'result_index': {'readonly': True},
'value': {'readonly': True},
'html_index': {'readonly': True},
'textual_index': {'readonly': True},
'screenshot_index': {'readonly': True},
}
_attribute_map = {
'answer_type': {'key': 'answerType', 'type': 'str'},
'result_index': {'key': 'resultIndex', 'type': 'int'},
'value': {'key': 'value', 'type': 'Identifiable'},
'html_index': {'key': 'htmlIndex', 'type': 'int'},
'textual_index': {'key': 'textualIndex', 'type': 'int'},
'screenshot_index': {'key': 'screenshotIndex', 'type': 'int'},
}
def __init__(self, answer_type="WebPages"):
super(RankingRankingItem, self).__init__()
self.answer_type = answer_type
self.result_index = None
self.value = None
self.html_index = None
self.textual_index = None
self.screenshot_index = None
|
py | b40ef010c1495a798eb80d0aabdda8d10ad584f0 | import protocol
import math
import numpy as np
'''
!q3EFEFA44FF68F64602B6F5763B6CFE97FF99FE19 026.1049
0.9842529 -0.08959961 -0.009277344 -0.15197754
!q3EFEFA44FF68F64602B4F55C3BA6FE97FF99FE19 026.1050
0.9842529 -0.08959961 -0.009277344 -0.15197754
!q3EFEFA44FF68F64602BEF58A3B52FE97FF99FE19 026.1052
0.9842529 -0.08959961 -0.009277344 -0.15197754
'''
accel_fsr_g = 2
LITTLE_G = 9.81 # m/s^2
yaw_offset = 0
def handle_quaternion(update):
# Calculate Quaternions
q = [0, 0, 0, 0]
q[0] = update.q1 / 16384.0
q[1] = update.q2 / 16384.0
q[2] = update.q3 / 16384.0
q[3] = update.q4 / 16384.0
for i in xrange(0, 4):
if q[i] >= 2:
q[i] -= 4
print "Quaternion:", q
# Calculate the gravity vector
g = [0, 0, 0]
g[0] = 2 * (q[1]*q[3] - q[0]*q[2])
g[1] = 2 * (q[0]*q[1] + q[2]*q[3])
g[2] = q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]
print "Gravity:", g
# YPR in radians
ypr = [0, 0, 0]
ypr[0] = math.atan2(2*q[1]*q[2] - 2*q[0]*q[3], 2*q[0]*q[0] + 2*q[1]*q[1] - 1)
ypr[1] = math.atan(g[0] / math.sqrt(g[1]**2 + g[2]**2))
ypr[2] = math.atan(g[1] / math.sqrt(g[0]**2 + g[2]**2))
print "YPR:", ypr
# Calculate acceleration
a = [update.accel_x, update.accel_y, update.accel_z]
a = map(lambda x: float(x) / (32768.0 / accel_fsr_g), a)
print "Raw Acceleration:", a
# Calculate Linear Acceleration with gravoty removed
linear_accel = map(lambda x: x*LITTLE_G, [a[0] - g[0], a[1] - g[1], a[2] - g[2]])
print "Linear Acceleration:", linear_accel
return q, g, ypr, linear_accel
def calculate_angular_velocity(now, last, dt):
assert len(now) == 3
assert len(last) == 3
assert dt == 0.0
return np.divide(np.subtract(now, last), dt) # in Rad/Sec
if __name__ == "__main__":
last_update = protocol.QuaternionUpdate.from_data("!q3EFEFA44FF68F64602B4F55C3BA6FE97FF99FE19 026.1050\r\n")
last = handle_quaternion(last_update)
now_update = protocol.QuaternionUpdate.from_data("!q3EFEFA44FF68F64602BEF58A3B52FE97FF99FE19 026.1052\r\n")
now = handle_quaternion(now_update)
print calculate_angular_velocity(now[2], last[2], 1.0/100.0)
|
py | b40ef0ad556b61903d8c5558f36a967d507041c9 | import os
import boto3
import pyfaaster.aws.tools as tools
logger = tools.setup_logging('motolicious')
def table(namespace):
name = f'motolicious-{namespace}-test-table'
dynamodb_endpointurl = os.environ.get('DYNAMODB_ENDPOINTURL')
if dynamodb_endpointurl:
logger.debug(f'Connecting to {name} table at {dynamodb_endpointurl}')
return boto3.resource('dynamodb', endpoint_url=dynamodb_endpointurl).Table(name)
else:
logger.debug(f'Connecting to {name} table using default endpoint')
return boto3.resource('dynamodb').Table(name)
def set_value(table, key, value):
logger.debug(f'Saving {value} for {key} in {table.name}')
try:
policy = table.update_item(
Key={'key': key},
UpdateExpression=f'SET #name = :type_value',
ExpressionAttributeNames={
'#name': 'value'
},
ExpressionAttributeValues={
':type_value': value
},
ReturnValues='ALL_NEW'
)
logger.debug(f'Saved record to {table.name}')
return policy
except Exception as err:
logger.exception(err)
raise Exception('General boto error')
|
py | b40ef1773d35d680762b3adc920c20f6e4c09b06 | #!/usr/bin/env python
from __future__ import with_statement
from setuptools import setup, find_packages
from fabric.version import get_version
long_description = """
Fabric39 is a fork of `Fabric 3 <https://github.com/mathiasertl/fabric>`_ to provide compatibility with Python3.9+. Fabric3 is a deprecated fork of `Fabric <http://fabfile.org>`_ to provide compatability
with Python 2.7 and 3.4+. Here is the originaly description of Fabric 3:
The goal is to stay 100% compatible with the original Fabric. Any new releases
of Fabric will also be released here. Please file issues for any differences
you find. Known differences are `documented on github
<https://github.com/mathiasertl/fabric/>`.
To find out what's new in this version of Fabric, please see `the changelog
<http://fabfile.org/changelog.html>`_ of the original Fabric.
For more information, please see the Fabric website or execute ``fab --help``.
"""
install_requires=['paramiko>=2.0,<3.0', 'six>=1.10.0', 'pathos==0.2.8']
setup(
name='Fabric39',
version=get_version('short'),
description='Fabric is a simple, Pythonic tool for remote execution and deployment (py2.7/py3.4+ compatible fork).',
long_description=long_description,
author='Jeff Forcier',
author_email='[email protected]',
maintainer='Brian Abelson',
maintainer_email='[email protected]',
url='https://github.com/Parsely/fabric/',
packages=find_packages(),
test_suite='nose.collector',
tests_require=['nose<2.0', 'fudge<1.0', 'jinja2<3.0'],
install_requires=install_requires,
entry_points={
'console_scripts': [
'fab = fabric.main:main',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Clustering',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
],
)
|
py | b40ef17bcc6b45a0826fefcafe5cab6517b500e5 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from skl2onnx.proto import onnx_proto
from skl2onnx.common import utils as convert_utils
def set_model_domain(model, domain):
"""
Sets the domain on the ONNX model.
:param model: instance of an ONNX model
:param domain: string containing the domain name of the model
Example:
::
from test_utils import set_model_domain
onnx_model = load_model("SqueezeNet.onnx")
set_model_domain(onnx_model, "com.acme")
"""
if model is None or not isinstance(model, onnx_proto.ModelProto):
raise ValueError("Parameter model is not an onnx model.")
if not convert_utils.is_string_type(domain):
raise ValueError("Parameter domain must be a string type.")
model.domain = domain
def set_model_version(model, version):
"""
Sets the version of the ONNX model.
:param model: instance of an ONNX model
:param version: integer containing the version of the model
Example:
::
from test_utils import set_model_version
onnx_model = load_model("SqueezeNet.onnx")
set_model_version(onnx_model, 1)
"""
if model is None or not isinstance(model, onnx_proto.ModelProto):
raise ValueError("Parameter model is not an onnx model.")
if not convert_utils.is_numeric_type(version):
raise ValueError("Parameter version must be a numeric type.")
model.model_version = version
def set_model_doc_string(model, doc, override=False):
"""
Sets the doc string of the ONNX model.
:param model: instance of an ONNX model
:param doc: string containing the doc string that describes the model.
:param override: bool if true will always override the doc
string with the new value
Example:
::
from test_utils import set_model_doc_string
onnx_model = load_model("SqueezeNet.onnx")
set_model_doc_string(onnx_model, "Sample doc string")
"""
if model is None or not isinstance(model, onnx_proto.ModelProto):
raise ValueError("Parameter model is not an onnx model.")
if not convert_utils.is_string_type(doc):
raise ValueError("Parameter doc must be a string type.")
if model.doc_string and not doc and override is False:
raise ValueError(
"Failed to overwrite the doc string with a blank string,"
" set override to True if intentional."
)
model.doc_string = doc
|
py | b40ef18936de0594b3ed91f387a10d6eaac288d8 | # Generated by Django 2.2.17 on 2021-01-11 14:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('battles', '0003_battle_created_at'),
]
operations = [
migrations.AlterField(
model_name='battle',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='battle_as_creator', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='battle',
name='opponent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='battle_as_opponent', to=settings.AUTH_USER_MODEL),
),
]
|
py | b40ef1a93c795a02d82521b146cbeedc5257af0c | """padlock URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import path, include
from django.contrib.auth.views import login, logout_then_login
from userextensions.urls import *
from _common import views as common
from hostlock import views as hostlock
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', login, {'template_name': 'registration/login.html'}, name="login"),
path('logout/', logout_then_login, name="logout"),
path('detail_user/', common.ShowUserProfile.as_view(), name='detail_user'),
# app urls
path('userextensions/', include('userextensions.urls'), ),
path('hostlock/', include('hostlock.urls'), ),
path('common/', include('_common.urls'), ),
# home/default/index urls
path('', common.PadLockIndex.as_view(), name='index'),
path(r'default', common.PadLockIndex.as_view(), name='default'),
path(r'home', common.PadLockIndex.as_view(), name='home'),
path(r'index', common.PadLockIndex.as_view(), name='index'),
# swagger API docs
path('swagger', common.schema_view, name="swagger"),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
|
py | b40ef32c6c9286181d452cff2f4dcb362067f46e |
def square_number(n):
return n ** 2
def triangle_number(n):
return n * (n + 1) // 2
squares = []
both = []
n = 1
while len(both) < 3:
nth_square = square_number(n)
nth_triangle = triangle_number(n)
squares.append(nth_square)
# triangle numbers grow more slowly than squares
if nth_triangle in squares:
both.append(nth_triangle)
n += 1
print(both)
|
py | b40ef34975d77ae78bc28c397bdcec354a275fad | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Play as a human against an agent by setting up a LAN game.
This needs to be called twice, once for the human, and once for the agent.
The human plays on the host. There you run it as:
$ python -m pysc2.bin.play_vs_agent --human --map <map> --remote <agent ip>
And on the machine the agent plays on:
$ python -m pysc2.bin.play_vs_agent --agent <import path>
The `--remote` arg is used to create an SSH tunnel to the remote agent's
machine, so can be dropped if it's running on the same machine.
SC2 is limited to only allow LAN games on localhost, so we need to forward the
ports between machines. SSH is used to do this with the `--remote` arg. If the
agent is on the same machine as the host, this arg can be dropped. SSH doesn't
forward UDP, so this also sets up a UDP proxy. As part of that it sets up a TCP
server that is also used as a settings server. Note that you won't have an
opportunity to give ssh a password, so you must use ssh keys for authentication.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from absl import logging
import platform
import sys
import time
from absl import app
from absl import flags
import portpicker
from pysc2 import maps
from pysc2 import run_configs
from pysc2.env import lan_sc2_env
from pysc2.env import run_loop
from pysc2.env import sc2_env
from pysc2.lib import point_flag
from pysc2.lib import renderer_human
from s2clientprotocol import sc2api_pb2 as sc_pb
FLAGS = flags.FLAGS
flags.DEFINE_bool("render", platform.system() == "Linux",
"Whether to render with pygame.")
flags.DEFINE_bool("realtime", False, "Whether to run in realtime mode.")
flags.DEFINE_string("agent", "pysc2.agents.random_agent.RandomAgent",
"Which agent to run, as a python path to an Agent class.")
flags.DEFINE_enum("agent_race", "random", sc2_env.Race._member_names_, # pylint: disable=protected-access
"Agent's race.")
flags.DEFINE_float("fps", 22.4, "Frames per second to run the game.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
point_flag.DEFINE_point("feature_screen_size", "84",
"Resolution for screen feature layers.")
point_flag.DEFINE_point("feature_minimap_size", "64",
"Resolution for minimap feature layers.")
point_flag.DEFINE_point("rgb_screen_size", "256",
"Resolution for rendered screen.")
point_flag.DEFINE_point("rgb_minimap_size", "128",
"Resolution for rendered minimap.")
flags.DEFINE_enum("action_space", "FEATURES",
sc2_env.ActionSpace._member_names_, # pylint: disable=protected-access
"Which action space to use. Needed if you take both feature "
"and rgb observations.")
flags.DEFINE_bool("use_feature_units", False,
"Whether to include feature units.")
flags.DEFINE_enum("user_race", "random", sc2_env.Race._member_names_, # pylint: disable=protected-access
"User's race.")
flags.DEFINE_string("host", "127.0.0.1", "Game Host. Can be 127.0.0.1 or ::1")
flags.DEFINE_integer(
"config_port", 14380,
"Where to set/find the config port. The host starts a tcp server to share "
"the config with the client, and to proxy udp traffic if played over an "
"ssh tunnel. This sets that port, and is also the start of the range of "
"ports used for LAN play.")
flags.DEFINE_string("remote", None,
"Where to set up the ssh tunnels to the client.")
flags.DEFINE_string("map", None, "Name of a map to use to play.")
flags.DEFINE_bool("human", False, "Whether to host a game as a human.")
def main(unused_argv):
if FLAGS.human:
human()
else:
agent()
def agent():
"""Run the agent, connecting to a (remote) host started independently."""
agent_module, agent_name = FLAGS.agent.rsplit(".", 1)
agent_cls = getattr(importlib.import_module(agent_module), agent_name)
logging.info("Starting agent:")
with lan_sc2_env.LanSC2Env(
host=FLAGS.host,
config_port=FLAGS.config_port,
race=sc2_env.Race[FLAGS.agent_race],
step_mul=FLAGS.step_mul,
agent_interface_format=sc2_env.parse_agent_interface_format(
feature_screen=FLAGS.feature_screen_size,
feature_minimap=FLAGS.feature_minimap_size,
rgb_screen=FLAGS.rgb_screen_size,
rgb_minimap=FLAGS.rgb_minimap_size,
action_space=FLAGS.action_space,
use_feature_units=FLAGS.use_feature_units),
visualize=FLAGS.render) as env:
agents = [agent_cls()]
logging.info("Connected, starting run_loop.")
try:
run_loop.run_loop(agents, env)
except lan_sc2_env.RestartException:
pass
logging.info("Done.")
def human():
"""Run a host which expects one player to connect remotely."""
run_config = run_configs.get()
map_inst = maps.get(FLAGS.map)
if not FLAGS.rgb_screen_size or not FLAGS.rgb_minimap_size:
logging.info("Use --rgb_screen_size and --rgb_minimap_size if you want rgb "
"observations.")
ports = [FLAGS.config_port + p for p in range(5)] # tcp + 2 * num_players
if not all(portpicker.is_port_free(p) for p in ports):
sys.exit("Need 5 free ports after the config port.")
proc = None
ssh_proc = None
tcp_conn = None
udp_sock = None
try:
proc = run_config.start(extra_ports=ports[1:], timeout_seconds=300,
host=FLAGS.host, window_loc=(50, 50))
tcp_port = ports[0]
settings = {
"remote": FLAGS.remote,
"game_version": proc.version.game_version,
"realtime": FLAGS.realtime,
"map_name": map_inst.name,
"map_path": map_inst.path,
"map_data": map_inst.data(run_config),
"ports": {
"server": {"game": ports[1], "base": ports[2]},
"client": {"game": ports[3], "base": ports[4]},
}
}
create = sc_pb.RequestCreateGame(
realtime=settings["realtime"],
local_map=sc_pb.LocalMap(map_path=settings["map_path"]))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(type=sc_pb.Participant)
controller = proc.controller
controller.save_map(settings["map_path"], settings["map_data"])
controller.create_game(create)
if FLAGS.remote:
ssh_proc = lan_sc2_env.forward_ports(
FLAGS.remote, proc.host, [settings["ports"]["client"]["base"]],
[tcp_port, settings["ports"]["server"]["base"]])
print("-" * 80)
print("Join: play_vs_agent --host %s --config_port %s" % (proc.host,
tcp_port))
print("-" * 80)
tcp_conn = lan_sc2_env.tcp_server(
lan_sc2_env.Addr(proc.host, tcp_port), settings)
if FLAGS.remote:
udp_sock = lan_sc2_env.udp_server(
lan_sc2_env.Addr(proc.host, settings["ports"]["client"]["game"]))
lan_sc2_env.daemon_thread(
lan_sc2_env.tcp_to_udp,
(tcp_conn, udp_sock,
lan_sc2_env.Addr(proc.host, settings["ports"]["server"]["game"])))
lan_sc2_env.daemon_thread(lan_sc2_env.udp_to_tcp, (udp_sock, tcp_conn))
join = sc_pb.RequestJoinGame()
join.shared_port = 0 # unused
join.server_ports.game_port = settings["ports"]["server"]["game"]
join.server_ports.base_port = settings["ports"]["server"]["base"]
join.client_ports.add(game_port=settings["ports"]["client"]["game"],
base_port=settings["ports"]["client"]["base"])
join.race = sc2_env.Race[FLAGS.user_race]
if FLAGS.render:
join.options.raw = True
join.options.score = True
if FLAGS.feature_screen_size and FLAGS.feature_minimap_size:
fl = join.options.feature_layer
fl.width = 24
FLAGS.feature_screen_size.assign_to(fl.resolution)
FLAGS.feature_minimap_size.assign_to(fl.minimap_resolution)
if FLAGS.rgb_screen_size and FLAGS.rgb_minimap_size:
FLAGS.rgb_screen_size.assign_to(join.options.render.resolution)
FLAGS.rgb_minimap_size.assign_to(join.options.render.minimap_resolution)
controller.join_game(join)
if FLAGS.render:
renderer = renderer_human.RendererHuman(
fps=FLAGS.fps, render_feature_grid=False)
renderer.run(run_configs.get(), controller, max_episodes=1)
else: # Still step forward so the Mac/Windows renderer works.
try:
while True:
frame_start_time = time.time()
if not FLAGS.realtime:
controller.step()
obs = controller.observe()
if obs.player_result:
break
time.sleep(max(0, frame_start_time - time.time() + 1 / FLAGS.fps))
except KeyboardInterrupt:
pass
finally:
if tcp_conn:
tcp_conn.close()
if proc:
proc.close()
if udp_sock:
udp_sock.close()
if ssh_proc:
ssh_proc.terminate()
for _ in range(5):
if ssh_proc.poll() is not None:
break
time.sleep(1)
if ssh_proc.poll() is None:
ssh_proc.kill()
ssh_proc.wait()
def entry_point(): # Needed so setup.py scripts work.
app.run(main)
if __name__ == "__main__":
app.run(main)
|
py | b40ef36a34d4c60f1b474679e50a056f1674b2e9 | import datetime
import tempfile
import pytest
from iceqube.exceptions import JobNotFound
from iceqube.queue import Queue
from iceqube.scheduler import Scheduler
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
@pytest.fixture
def queue():
with tempfile.NamedTemporaryFile() as f:
connection = create_engine(
"sqlite:///{path}".format(path=f.name),
connect_args={"check_same_thread": False},
poolclass=NullPool,
)
q = Queue("pytest", connection)
yield q
@pytest.fixture
def scheduler(queue):
with tempfile.NamedTemporaryFile() as f:
s = Scheduler(queue=queue)
yield s
class TestScheduler(object):
def test_enqueue_at_a_function(self, scheduler):
job_id = scheduler.enqueue_at(datetime.datetime.utcnow(), id)
# is the job recorded in the chosen backend?
assert scheduler.get_job(job_id).job_id == job_id
def test_enqueue_at_a_function_sets_time(self, scheduler):
now = datetime.datetime.utcnow()
job_id = scheduler.enqueue_at(now, id)
with scheduler.session_scope() as session:
scheduled_job = (
scheduler._ns_query(session).filter_by(id=job_id).one_or_none()
)
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == now
def test_enqueue_at_preserves_extra_metadata(self, scheduler):
metadata = {"saved": True}
job_id = scheduler.enqueue_at(
datetime.datetime.utcnow(), id, extra_metadata=metadata
)
# Do we get back the metadata we save?
assert scheduler.get_job(job_id).extra_metadata == metadata
def test_enqueue_in_a_function(self, scheduler):
job_id = scheduler.enqueue_in(datetime.timedelta(seconds=1000), id)
# is the job recorded in the chosen backend?
assert scheduler.get_job(job_id).job_id == job_id
def test_enqueue_in_a_function_sets_time(self, scheduler):
diff = datetime.timedelta(seconds=1000)
now = datetime.datetime.utcnow()
scheduler._now = lambda: now
job_id = scheduler.enqueue_in(diff, id)
with scheduler.session_scope() as session:
scheduled_job = (
scheduler._ns_query(session).filter_by(id=job_id).one_or_none()
)
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == now + diff
def test_cancel_removes_job(self, scheduler):
job_id = scheduler.enqueue_at(datetime.datetime.utcnow(), id)
scheduler.cancel(job_id)
with pytest.raises(JobNotFound):
scheduler.get_job(job_id)
def test_schedule_a_function_sets_time(self, scheduler):
now = datetime.datetime.utcnow()
job_id = scheduler.schedule(now, id)
with scheduler.session_scope() as session:
scheduled_job = (
scheduler._ns_query(session).filter_by(id=job_id).one_or_none()
)
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == now
def test_schedule_a_function_gives_value_error_without_datetime(self, scheduler):
now = "test"
with pytest.raises(ValueError):
scheduler.schedule(now, id)
def test_schedule_a_function_gives_value_error_repeat_zero_interval(
self, scheduler
):
now = datetime.datetime.utcnow()
with pytest.raises(ValueError):
scheduler.schedule(now, id, interval=0, repeat=None)
def test_scheduled_repeating_function_updates_old_job(self, scheduler):
now = datetime.datetime.utcnow()
old_id = scheduler.schedule(now, id, interval=1000, repeat=None)
scheduler.check_schedule()
new_id = scheduler.get_jobs()[0].job_id
assert old_id == new_id
def test_scheduled_repeating_function_sets_endless_repeat_new_job(self, scheduler):
now = datetime.datetime.utcnow()
scheduler.schedule(now, id, interval=1000, repeat=None)
scheduler.check_schedule()
with scheduler.session_scope() as session:
scheduled_job = scheduler._ns_query(session).one_or_none()
repeat = scheduled_job.repeat
assert repeat is None
def test_scheduled_repeating_function_enqueues_job(self, scheduler):
now = datetime.datetime.utcnow()
job_id = scheduler.schedule(now, id, interval=1000, repeat=None)
scheduler.check_schedule()
assert scheduler.queue.fetch_job(job_id).job_id == job_id
def test_scheduled_repeating_function_sets_new_job_with_one_fewer_repeats(
self, scheduler
):
now = datetime.datetime.utcnow()
scheduler.schedule(now, id, interval=1000, repeat=1)
scheduler.check_schedule()
with scheduler.session_scope() as session:
scheduled_job = scheduler._ns_query(session).one_or_none()
repeat = scheduled_job.repeat
assert repeat == 0
def test_scheduled_repeating_function_sets_new_job_at_interval(self, scheduler):
now = datetime.datetime.utcnow()
scheduler.schedule(now, id, interval=1000, repeat=1)
scheduler._now = lambda: now
scheduler.check_schedule()
with scheduler.session_scope() as session:
scheduled_job = scheduler._ns_query(session).one_or_none()
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == now + datetime.timedelta(seconds=1000)
|
py | b40ef36ac523fa46acc2e9176559007c953d14d4 | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logging middleware for the Swift proxy.
This serves as both the default logging implementation and an example of how
to plug in your own logging format/method.
The logging format implemented below is as follows:
client_ip remote_addr datetime request_method request_path protocol
status_int referer user_agent auth_token bytes_recvd bytes_sent
client_etag transaction_id headers request_time source log_info
These values are space-separated, and each is url-encoded, so that they can
be separated with a simple .split()
* remote_addr is the contents of the REMOTE_ADDR environment variable, while
client_ip is swift's best guess at the end-user IP, extracted variously
from the X-Forwarded-For header, X-Cluster-Ip header, or the REMOTE_ADDR
environment variable.
* source (swift.source in the WSGI environment) indicates the code
that generated the request, such as most middleware. (See below for
more detail.)
* log_info (swift.log_info in the WSGI environment) is for additional
information that could prove quite useful, such as any x-delete-at
value or other "behind the scenes" activity that might not
otherwise be detectable from the plain log information. Code that
wishes to add additional log information should use code like
``env.setdefault('swift.log_info', []).append(your_info)`` so as to
not disturb others' log information.
* Values that are missing (e.g. due to a header not being present) or zero
are generally represented by a single hyphen ('-').
The proxy-logging can be used twice in the proxy server's pipeline when there
is middleware installed that can return custom responses that don't follow the
standard pipeline to the proxy server.
For example, with staticweb, the middleware might intercept a request to
/v1/AUTH_acc/cont/, make a subrequest to the proxy to retrieve
/v1/AUTH_acc/cont/index.html and, in effect, respond to the client's original
request using the 2nd request's body. In this instance the subrequest will be
logged by the rightmost middleware (with a swift.source set) and the outgoing
request (with body overridden) will be logged by leftmost middleware.
Requests that follow the normal pipeline (use the same wsgi environment
throughout) will not be double logged because an environment variable
(swift.proxy_access_log_made) is checked/set when a log is made.
All middleware making subrequests should take care to set swift.source when
needed. With the doubled proxy logs, any consumer/processor of swift's proxy
logs should look at the swift.source field, the rightmost log value, to decide
if this is a middleware subrequest or not. A log processor calculating
bandwidth usage will want to only sum up logs with no swift.source.
"""
import time
from urllib import quote, unquote
from swift.common.swob import Request
from swift.common.utils import (get_logger, get_remote_client,
get_valid_utf8_str, config_true_value,
InputProxy)
from swift.common.constraints import MAX_HEADER_SIZE
QUOTE_SAFE = '/:'
class ProxyLoggingMiddleware(object):
"""
Middleware that logs Swift proxy requests in the swift log format.
"""
def __init__(self, app, conf):
self.app = app
self.log_hdrs = config_true_value(conf.get(
'access_log_headers',
conf.get('log_headers', 'no')))
# The leading access_* check is in case someone assumes that
# log_statsd_valid_http_methods behaves like the other log_statsd_*
# settings.
self.valid_methods = conf.get(
'access_log_statsd_valid_http_methods',
conf.get('log_statsd_valid_http_methods',
'GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS'))
self.valid_methods = [m.strip().upper() for m in
self.valid_methods.split(',') if m.strip()]
access_log_conf = {}
for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host',
'log_udp_port', 'log_statsd_host', 'log_statsd_port',
'log_statsd_default_sample_rate',
'log_statsd_sample_rate_factor',
'log_statsd_metric_prefix'):
value = conf.get('access_' + key, conf.get(key, None))
if value:
access_log_conf[key] = value
self.access_logger = get_logger(access_log_conf,
log_route='proxy-access')
self.access_logger.set_statsd_prefix('proxy-server')
self.reveal_sensitive_prefix = int(conf.get('reveal_sensitive_prefix',
MAX_HEADER_SIZE))
def method_from_req(self, req):
return req.environ.get('swift.orig_req_method', req.method)
def req_already_logged(self, req):
return req.environ.get('swift.proxy_access_log_made')
def mark_req_logged(self, req):
req.environ['swift.proxy_access_log_made'] = True
def obscure_sensitive(self, value):
if not value:
return '-'
if len(value) > self.reveal_sensitive_prefix:
return value[:self.reveal_sensitive_prefix] + '...'
return value
def log_request(self, req, status_int, bytes_received, bytes_sent,
request_time):
"""
Log a request.
:param req: swob.Request object for the request
:param status_int: integer code for the response status
:param bytes_received: bytes successfully read from the request body
:param bytes_sent: bytes yielded to the WSGI server
:param request_time: time taken to satisfy the request, in seconds
"""
if self.req_already_logged(req):
return
req_path = get_valid_utf8_str(req.path)
the_request = quote(unquote(req_path), QUOTE_SAFE)
if req.query_string:
the_request = the_request + '?' + req.query_string
logged_headers = None
if self.log_hdrs:
logged_headers = '\n'.join('%s: %s' % (k, v)
for k, v in req.headers.items())
method = self.method_from_req(req)
self.access_logger.info(' '.join(
quote(str(x) if x else '-', QUOTE_SAFE)
for x in (
get_remote_client(req),
req.remote_addr,
time.strftime('%d/%b/%Y/%H/%M/%S', time.gmtime()),
method,
the_request,
req.environ.get('SERVER_PROTOCOL'),
status_int,
req.referer,
req.user_agent,
self.obscure_sensitive(req.headers.get('x-auth-token')),
bytes_received,
bytes_sent,
req.headers.get('etag', None),
req.environ.get('swift.trans_id'),
logged_headers,
'%.4f' % request_time,
req.environ.get('swift.source'),
','.join(req.environ.get('swift.log_info') or '-'),
)))
self.mark_req_logged(req)
# Log timing and bytes-transfered data to StatsD
metric_name = self.statsd_metric_name(req, status_int, method)
# Only log data for valid controllers (or SOS) to keep the metric count
# down (egregious errors will get logged by the proxy server itself).
if metric_name:
self.access_logger.timing(metric_name + '.timing',
request_time * 1000)
self.access_logger.update_stats(metric_name + '.xfer',
bytes_received + bytes_sent)
def statsd_metric_name(self, req, status_int, method):
if req.path.startswith('/v1/'):
try:
stat_type = [None, 'account', 'container',
'object'][req.path.strip('/').count('/')]
except IndexError:
stat_type = 'object'
else:
stat_type = req.environ.get('swift.source')
if stat_type is None:
return None
stat_method = method if method in self.valid_methods \
else 'BAD_METHOD'
return '.'.join((stat_type, stat_method, str(status_int)))
def __call__(self, env, start_response):
start_response_args = [None]
input_proxy = InputProxy(env['wsgi.input'])
env['wsgi.input'] = input_proxy
start_time = time.time()
def my_start_response(status, headers, exc_info=None):
start_response_args[0] = (status, list(headers), exc_info)
def status_int_for_logging(client_disconnect=False, start_status=None):
# log disconnected clients as '499' status code
if client_disconnect or input_proxy.client_disconnect:
return 499
elif start_status is None:
return int(start_response_args[0][0].split(' ', 1)[0])
return start_status
def iter_response(iterable):
iterator = iter(iterable)
try:
chunk = iterator.next()
while not chunk:
chunk = iterator.next()
except StopIteration:
chunk = ''
for h, v in start_response_args[0][1]:
if h.lower() in ('content-length', 'transfer-encoding'):
break
else:
if not chunk:
start_response_args[0][1].append(('content-length', '0'))
elif isinstance(iterable, list):
start_response_args[0][1].append(
('content-length', str(sum(len(i) for i in iterable))))
start_response(*start_response_args[0])
req = Request(env)
# Log timing information for time-to-first-byte (GET requests only)
method = self.method_from_req(req)
if method == 'GET' and not self.req_already_logged(req):
status_int = status_int_for_logging()
metric_name = self.statsd_metric_name(req, status_int, method)
if metric_name:
self.access_logger.timing_since(
metric_name + '.first-byte.timing', start_time)
bytes_sent = 0
client_disconnect = False
try:
while chunk:
bytes_sent += len(chunk)
yield chunk
chunk = iterator.next()
except GeneratorExit: # generator was closed before we finished
client_disconnect = True
raise
finally:
status_int = status_int_for_logging(client_disconnect)
self.log_request(
req, status_int, input_proxy.bytes_received, bytes_sent,
time.time() - start_time)
try:
iterable = self.app(env, my_start_response)
except Exception:
req = Request(env)
status_int = status_int_for_logging(start_status=500)
self.log_request(
req, status_int, input_proxy.bytes_received, 0,
time.time() - start_time)
raise
else:
return iter_response(iterable)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def proxy_logger(app):
return ProxyLoggingMiddleware(app, conf)
return proxy_logger
|
py | b40ef3835af54d1a4c149e7a4983391ad246cf23 | from __future__ import unicode_literals
import calendar
from datetime import datetime, timedelta
import re
import time
try:
from bzrlib import bzrdir, revisionspec
from bzrlib.errors import BzrError, NotBranchError
from bzrlib.transport import register_lazy_transport
from bzrlib.transport.remote import RemoteSSHTransport
from bzrlib.transport.ssh import (SubprocessVendor, register_ssh_vendor,
register_default_ssh_vendor)
has_bzrlib = True
except ImportError:
has_bzrlib = False
from django.utils import six
from reviewboard.scmtools.core import SCMTool, HEAD, PRE_CREATION
from reviewboard.scmtools.errors import RepositoryNotFoundError, SCMError
from reviewboard.ssh import utils as sshutils
try:
import urlparse
uses_netloc = urlparse.uses_netloc
except ImportError:
import urllib.parse
uses_netloc = urllib.parse.uses_netloc
# Register these URI schemes so we can handle them properly.
sshutils.ssh_uri_schemes.append('bzr+ssh')
uses_netloc.extend(['bzr', 'bzr+ssh'])
if has_bzrlib:
class RBSSHVendor(SubprocessVendor):
"""SSH vendor class that uses rbssh"""
executable_path = 'rbssh'
def __init__(self, local_site_name=None, *args, **kwargs):
super(RBSSHVendor, self).__init__(*args, **kwargs)
self.local_site_name = local_site_name
def _get_vendor_specific_argv(self, username, host, port,
subsystem=None, command=None):
args = [self.executable_path]
if port is not None:
args.extend(['-p', six.text_type(port)])
if username is not None:
args.extend(['-l', username])
if self.local_site_name:
args.extend(['--rb-local-site', self.local_site_name])
if subsystem is not None:
args.extend(['-s', host, subsystem])
else:
args.extend([host] + command)
return args
class RBRemoteSSHTransport(RemoteSSHTransport):
LOCAL_SITE_PARAM_RE = \
re.compile(r'\?rb-local-site-name=([A-Za-z0-9\-_.]+)')
def __init__(self, base, *args, **kwargs):
m = self.LOCAL_SITE_PARAM_RE.search(base)
if m:
self.local_site_name = m.group(1)
base = base.replace(m.group(0), '')
else:
self.local_site_name = None
super(RBRemoteSSHTransport, self).__init__(
base.encode('ascii'), *args, **kwargs)
def _build_medium(self):
client_medium, auth = \
super(RBRemoteSSHTransport, self)._build_medium()
client_medium._vendor = RBSSHVendor(self.local_site_name)
return client_medium, auth
vendor = RBSSHVendor()
register_ssh_vendor("rbssh", vendor)
register_default_ssh_vendor(vendor)
sshutils.register_rbssh('BZR_SSH')
register_lazy_transport('bzr+ssh://', 'reviewboard.scmtools.bzr',
'RBRemoteSSHTransport')
class BZRTool(SCMTool):
"""An interface to the Bazaar SCM (http://bazaar-vcs.org/)"""
name = "Bazaar"
dependencies = {
'modules': ['bzrlib'],
}
# Timestamp format in bzr diffs.
# This isn't totally accurate: there should be a %z at the end.
# Unfortunately, strptime() doesn't support %z.
DIFF_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
# "bzr diff" indicates that a file is new by setting the old
# timestamp to the epoch time.
PRE_CREATION_TIMESTAMP = '1970-01-01 00:00:00 +0000'
def __init__(self, repository):
SCMTool.__init__(self, repository)
def get_file(self, path, revision):
if revision == BZRTool.PRE_CREATION_TIMESTAMP:
return ''
revspec = self._revspec_from_revision(revision)
filepath = self._get_full_path(path)
branch = None
try:
try:
branch, relpath = bzrdir.BzrDir.open_containing_tree_or_branch(
filepath.encode('ascii'))[1:]
branch.lock_read()
revtree = revisionspec.RevisionSpec.from_string(
revspec.encode('ascii')).as_tree(branch)
fileid = revtree.path2id(relpath)
if fileid:
# XXX: get_file_text returns str, which isn't Python 3
# safe. According to the internet they have no immediate
# plans to port to 3, so we may find it hard to support
# that combination.
contents = bytes(revtree.get_file_text(fileid))
else:
contents = b''
except BzrError as e:
raise SCMError(e)
finally:
if branch:
branch.unlock()
return contents
def parse_diff_revision(self, file_str, revision_str, *args, **kwargs):
if revision_str == BZRTool.PRE_CREATION_TIMESTAMP:
return (file_str, PRE_CREATION)
return file_str, revision_str
def get_fields(self):
return ['basedir', 'diff_path', 'parent_diff_path']
def get_diffs_use_absolute_paths(self):
return False
def _get_full_path(self, path, basedir=None):
"""Returns the full path to a file."""
parts = [self.repository.path.rstrip("/")]
if basedir:
parts.append(basedir.strip("/"))
parts.append(path.strip("/"))
final_path = "/".join(parts)
if final_path.startswith("/"):
final_path = "file://%s" % final_path
if self.repository.local_site and sshutils.is_ssh_uri(final_path):
final_path += '?rb-local-site-name=%s' % \
self.repository.local_site.name
return final_path
def _revspec_from_revision(self, revision):
"""Returns a revspec based on the revision found in the diff.
In addition to the standard date format from "bzr diff", this function
supports the revid: syntax provided by the bzr diff-revid plugin.
"""
if revision == HEAD:
revspec = 'last:1'
elif revision.startswith('revid:'):
revspec = revision
else:
revspec = 'date:' + six.text_type(
self._revision_timestamp_to_local(revision))
return revspec
def _revision_timestamp_to_local(self, timestamp_str):
"""Convert a timestamp to local time.
When using a date to ask bzr for a file revision, it expects the date
to be in local time. So, this function converts a timestamp from a bzr
diff file to local time.
"""
timestamp = datetime(*time.strptime(
timestamp_str[0:19], BZRTool.DIFF_TIMESTAMP_FORMAT)[0:6])
# Now, parse the difference to GMT time (such as +0200). If only
# strptime() supported %z, we wouldn't have to do this manually.
delta = timedelta(hours=int(timestamp_str[21:23]),
minutes=int(timestamp_str[23:25]))
if timestamp_str[20] == '+':
timestamp -= delta
else:
timestamp += delta
# convert to local time
return datetime.utcfromtimestamp(
calendar.timegm(timestamp.timetuple()))
@classmethod
def check_repository(cls, path, username=None, password=None,
local_site_name=None):
"""
Performs checks on a repository to test its validity.
This should check if a repository exists and can be connected to.
This will also check if the repository requires an HTTPS certificate.
The result is returned as an exception. The exception may contain
extra information, such as a human-readable description of the problem.
If the repository is valid and can be connected to, no exception
will be thrown.
"""
super(BZRTool, cls).check_repository(path, username, password,
local_site_name)
if local_site_name and sshutils.is_ssh_uri(path):
path += '?rb-local-site-name=%s' % local_site_name
try:
tree, branch, repository, relpath = \
bzrdir.BzrDir.open_containing_tree_branch_or_repository(
path.encode('ascii'))
except AttributeError:
raise RepositoryNotFoundError()
except NotBranchError:
raise RepositoryNotFoundError()
except Exception as e:
raise SCMError(e)
|
py | b40ef5d6daf09c5f1bcd7ce4b3673854247912b7 | # Generated by Django 2.0.10 on 2019-04-05 22:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('companies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('price', models.CharField(max_length=200)),
('responsibility', models.CharField(max_length=200)),
('requirements', models.CharField(max_length=200)),
('conditions', models.CharField(max_length=200)),
('skills', models.CharField(max_length=201)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='companies.Company')),
],
options={
'verbose_name': 'Job',
'verbose_name_plural': 'Jobs',
},
),
]
|
py | b40ef6a7257804bbe3a51fe777802c9111540d99 | import os
import json
import random
import requests
from base64 import b64encode
from dotenv import load_dotenv, find_dotenv
from flask import Flask, Response, jsonify, render_template
load_dotenv(find_dotenv())
# Spotify scopes:
# user-read-currently-playing
# user-read-recently-played
PLACEHOLDER_IMAGE = "iVBORw0KGgoAAAANSUhEUgAAA4QAAAOEBAMAAAALYOIIAAAAFVBMVEXm5ub///8AAAAxMTG+vr6RkZFfX1/R+IhpAAAfE0lEQVR42uzdS3fayBaGYVUHZ1w6AY+zSoYxAZsxxDhjQ8DjGBv//59wBPgeB5fu2rvevfqc1V93LzvSg6Sq2pKI4kPZ6FBEcZHdASERQiKEELI7ICRCSIQQQnYHhEQIiRBCyO6AkNgg4WOZx39OlBrZHRASISRCCCG7A0IihEQIIWR3QEiEkAghhOwOCIlNRHpvtHyJEBIhhJDdASERQiKEELI7ICRCSIQQQnYHhEQIibkJ6b3R8iVCSISQyPZDSISQCCGR7YeQCCERQiK7A0IihMTckd4bLV8ihEQIIWR3QEiEkAghhOwOCIkQEiGEkN0BIRFCYm5Cem+0fIkQEiEksv0QEiEkQkhk+yEkQkiEkMjugJAIITF3pPdGy5cIIRFCCNkdEBIhJEIIIbsDQiKERAghZHdASISQmJuQ3hstXyKERAiJbD+ERAiJEBLZfgiJEBIhJLI7ICRCSMwd6b3R8iVCSIQQQnYHhEQIiRBCyO6AkAghEUII2R0QEiEk5iak90bLlwghEUIi2w8hEUIihES2H0IihEQIiewOCIkQEnNHem+0fIkQEiGEkN0BIRFCIoQQsjsgJEJIhBBCdgeERAiJuQnpvdHyJUJYTbSPKTY2/cs+RwgFxL1bFE2jzvxQV7v/m8YGQimxM79aP9yN3bsaT9br+XT/X0HY4thN9UbuSCUP29U0/S8hbF9MQ+fmON8z42S7grBl0cS28zB2/pWMt1MI2xNNHN1k8Xusyb2BsC3jlzuXr5JJelmEsOlo53kB9zVYRbuVKggbi/Zq5ApWsprGFsJmYgq4cSVUsjYxhA3EFPDOlVSTFYT1x/ikNMA94kIuodCGmf3tSq6LaUzLt8Z4MnKlV7IyFsJ6YmzvXCV1MYWwjmjsr5GrqJIVhNXHuPPbVVjbGYQVx7icqeCR5ZprCKuMxt64ymsLYXUx7t65GmpiIKwq9kaulkquYwgriSeurkruDYQVxF+uxjrfn0whbPeK2ifT/JmFsNRYz0DmzaAmNYSwvNjduNprsLAQlhVNE4L7gakIQgktsbomE38bWlq+pcSmBJ8niBAWjCeNCaZ1HUNYODYqeDgOISwUe40KHq6HEBaJTQvuDSEsEJsXfDW3gDB7NN0WCKaGMwhzRtPZuFbUwECYKza0JvNR9Q2EeaJdutbUBYQ5Yt3dpU/6hxBmjvV2eD+vPxBmjOara1ktIMwUzcmobYTJgiebsjQI7ca1rgYzWr7+sU2D0VdTCwi9Y3zpWln3EHrG9g1lXg1pIPSIrVjbPrZaCuFnsY1DmTeXQwg/iW29EL6s0kB4PLb3Qvh8OYTwaDSdUbsJ08shhEdjmy+ET5dDCI/Fti1uf7jgDeGx2Bu1n/DpHm8IP4wbJ6AGBsJ/xi9ORJ1bCP8Re05IHU6lPNn0980yGymEA1q+H0Zz6cTUEMKPvqjgxAmqBYR/Rzmn0af1bgjf3e906UTVHwjfx3Y8PpFprRTCt7esLZ2w6kP4Nn514mphIXwdN/IIn9bZIJS0svbhOhuEu5I2lnka0UD4HJdOZPUthI+x54TWfrkbQpljmVcjGgjj+NSJrd1NGDzZFMkcyzyOaAwtXytucfSvrhOEkg/CVtxW2vy9vz+d6DqHsCtbMD0MQyeUOqt/P78PmLDnxNcscMKlfMK+DZpQwUHo3CJowqUGwr4NmFDFQfh0GIZJuNRB2LfBEio5CB8PwyAJl1oI94dhiIRqDsLDYRjgk012qYewBffnN/GLFR2Ezs1CJJTeonhbZyESdjUJtuARiwYeolB1EO76hsERym7Wf3QXTXCEl05ZDW1ohCNthIkJjPDUqas/NizCjT7Cx4fVQiHsOYW1CIpwqZGw2Qedav7FXaeyZgERftFJeG7DIRzpJEyaeVatiS7XV6e0bkNp+ZqNVsJ+KIQ9p7YWYRBq61G8axuGQKitR/F2QBMCofnqFNdtEEfhRjNhPwTCnlNdM/2E8U/dhMMAjsKRbsIkVk946pTXH/WES+2EtT/0Wzehdeprppzwm37CodVNuNFPOKibsN5+Yc8FUDPNLV9zGQLhUHXXfhMC4UAz4akLohZ6CTV3Cl/Xd72E2hfXXhbZ1BIGch5t5tsNa/lNoZxH9/dfKD0KR6EQJloJgzmP7s+kGgnDOY828gKMOn6THYVDmOgkPHEupDOpQsKQzqO72b3Go3AUEuFA45NNPefCOpOqa/ma/8IiPFPYtd+ERTjQRxjYefSpd6+I0HwJjfCHOsJNaIR9bYRdF1wZZYRfwyO81UVoluERnik7CkfhESa6CHsuwFpoIgxrifuphqoIlyES9jURdl2QZRQRnoZJWM8r2erpF/4Mk/DM6mn5jsIkHOjp2lc+pZg8vK72fGAWaggreTp7NFmv5vPdkxq7/5nIPr3YZv+i+nlaV+ubcbOcQzWEJU8pxtvVfJr+4GO/1z6JRp35+ve4qWmF1UJY2qGQTLbT/Ug9PdKM9/2rsbXpUdnEEZkYJYTlXArHk/tp/j/G/qCMrh7uRjVfDHUQFm/Yjx+20+eXuBX7U3VqZRxaHYQFL4WT7Wp39izrjuT0n8zXdY1a+zoICz1LMVjPzcuNjCU9L777gfP1po6LoY4Taf5LYXIYvFS0/bazrv5YXKggzDsrHKyq/wh3bipWHKogzLVAmlykB2ANZ6F0lHpXecNJPGGOz/l4Oz30GWvY/vSEelPd1D/RcBRm7xVOVs93n9ay/Sa2V5UNURcKnmz6lhnQ1P9NUrE9qeiqOFTQ8v2ZHbCR77BJz6dVTDOqf0lw9Xsny4e7P4/iep+3ehV3KzflIybyj8IMl8Lkercg3RjhvvdxVfrpdCae0Pu2meTe1G72UTwpGfFWPKHvpfDCNGX2V/xVKuKZlU7oucZ936TZ+2jLHJ0OpBN6rnFfxG0iTGf75c0Tk5lwQr817r5pGWE62S9t3W0hnPCb3wfVtoww/buorMHpUDih12jm3EatI0z/vvO7xPGMXEKfufLARG0k3C3YlDHVH8g+Cr2+omn3DTltJNx1MX6VNbkXS9jzHbK1lDCd6d+VM54RS/g/32tFawlLmCR+l/xkk9ejoYtaOqL5u4m26OL3meiWr8cHeBC3m3A3vyg2NE0kE3Z9P6OtJkzjVSFDI5jQp01xK4Ew7hY5mS4EE/rcij8TQRjbAifTH3IJfUYzg1gGYRz/KjSekUrocfbpiyHM3wweyCX0Gc0M5RDGJzkviLubSYUS9rxHMzIITd6F74VYwi/KCNNpfj7DW6mEXk9TzCQRRlG+de8zsYRLz5ULQYT5DPtiT6QjhYRRnoFpIpWw6zQSRibHwHQmlPBUJ2GUY7VtIfTJJp9bn5JG78DPGU3mGzKGQlu+/2kljDIbnlmZhBu1hJkN+0IJfUZuSSSSMDXMNC5NZJ5Ive5ec0YmYVbDqUhCv3vxF0IJI5NpXLoQSej3kP2tVMJshkOJhJ5fGzoUS5jOD/3Ppd9FEvo9WXhuS/y9+79qFPVfa+uLJBx5Drdz/6LY7F8MbP7xb63dvXm24nu9M92IKI3Q+g63s384HmNnPr96eBin9f7lUePxw8N2vntztzHVHpTeX+onkdD3BYjZnqDcTZHTk+V8dTdOfD4g44vV/NG8mu299F7olkfo+6qLPxnOnLHtzNc3d1m7PclkPZ9Pzf7nlb69nn38W4GEX7zXnnx/sr0q8jbf5GG7nr58FkrbXs+pxQ95hN7vsEyM10/urh+KPyaWMq5K316/K8ZZdYSV9bGWvvv19vOf3Ml+8vxnjSfb6GUYW8b2el0OH99dIqrl67108dmzW/ZX6S99nWxnJW6v1wxfIGGGl6qbYz+qU9FLe8crU9r2XnpfL0QRZnh93nn8rze9RuW+S+v9sTh/vsu62PZ6betCHGGG9+Ins4+ex0gHoL9dxZWs5ubQ/iu2vUuVhFm+OPT8gy8E6Vb72vOXQ3G9ew14we312dg/4gj/y7If/7rBy97UA7gfaGxfpos5t9fnwn9Yz5dE+DPTCW32dvnzpD7Aw3RxUXB7Pc6kZ+IIl9mOhNmrMXfNgIcluFmh7fVob/fFEWa8Sy+53n/75+6k9Ns1UnvEvNvrMSYdSLsW2szHwflu5cjYSqcRx+timn+O4fGnNsIIu7kGh+s712htpybn6c7jpDMLgLAFlRy+aSj79noMwBfCCE+d0Bpsc33dl0dr7VYY4TcntibXOaaJHh/ZobAnm/5zgusiynyXhsd64ndZLd98X1rYnkviythsm+9x7T8TRrhxsiudYJRN2BdGOBJOeDgQSyVMhN074+TXxTQuldBB2MCBWC6hEUXYcypqWyrhTBThqQ7Cw9p3WYQLUYTflBDuOiilEf4QRfjFqan7sqb2sgg9Hw8VMjI1JRF+F0X4UxGh608/X2/7qo5wo4nQDa4/+55vr9NOH8JGZ4ifbL7PaWcginDklBnex8XXhBNRTzZpIzx8X/SRzff6HEhq+VrnNBoWuoMtLUmEXYWE7rzYfaRufwOUGMLe/9k7m77EeSCAJy7uOfktePZpSs9ClTNl0TNlKWdQ4ft/hIeCL6gobZJJO5PksptL7fDvJDOZl0QkGXKTbO5onwCFBuEVSYQ7J99s41gHhC1hqC1vQNiW/VCvvjDa57ChQfgnosxQr8o32l8djgbhBVmE0fMJDBOCCJ2dcqdp+vR0V+zHYjqdPjw9PaUp6MHCTHzpeFH5vBwRwjk0OpWm28X0qLPX0SjfYTpdboFQqtmnXO/q0gaEr/S2m+mYye/b/4jdz3qokLsstvY5Jh971vDKzRADwhLfaLNgR11NqrxVR6Pv3hmG/OgP8RpXOCWIEILEmpLNYqzxVmVvmRKjxTcZ8ffPqE6D7oT5jFC9LJ56b8XLG0AttrE5JLaxuteoASEEiRda3n/U89j8rSRnHWu9bNSC754oxbKWpApPyNduoGK0sFbpwUXnPrW1LS8e6u6xfiJU+/J3ey/Jy8Z8TQWkFfcOoRo9s/3TLb8kQHvMaiPzDKHazqB6okt5uR0GhD9MrUR8y85oAuwlpbhfuoeY+4Tw0NpOAL6kFB3nEPFo4aWpqKMxTNXjp6nsLIMWnp4aRnzVgksnCHf2qVgGhNYRqmfu9OqsrkPrdI0G4W+zNZQ5vs/OXfNMPAgNqgvvMsGY8ysJHx4Dwg9Tro+wfyDo/FbJ7jIgtKOFyWs1pmuEOzfRhSIO0GihdupM7ozZVzeRPcAjvEZT2aSbwNYHjIhWmHbBFTEWWEK+ugjzZhHuzJphQGiEMGFNI4T2L8gjBM2UrZhoA3tagwehpjmTNa+FjIn7YUCoi7C84a95hEx2/gWEmgiv24GQcQa2mPaJI1yzdiAEXEypa2HWGoRMdocBoUaQibUHIZfiKSDU2CfahFDKZdgL63+h7UIoH4IW1jVIWcsQ1quXCAjbiFB2hgFhreO19iG0zhBmL2xNvHDtOEBYaSrsxp9iNFF7OggtOxdoEOpp4aCVCO1eLUxcC/9rJ0KrDIlr4XVLEdpkSFwLW4tQinnQwmoGd2sR2mNIXAuT+n9oN6TkxzU0nHOIkhpbvgVxLax/N1ynM70vijRN1ftI0839dDp+8XutCWgpP5G4Fla9WKyEw3bwlulPRydqVPbVezvBMBbQDkPqWjioknMtRee+2FZsPKKSbfFaq2goILdy5k0d4fn7cMoC+dptDtS2GFsQ0ErcgjrCs5epXC512/6k25mQ0kxAfmmO8Jo4wvKU9Psnd5dmbZtU2URKGFV1PwSEFVbSk0/e+Q4PNtpuqdF7Jz4dAcXfdiJsUUK++qYsxmJubroYc6ktoLGLHwPGOO0+WrdB/qmgtphaziXbTLnUFdDUtbgR1BFG+cdHcZji29Hi3V2sJyA3zC9dodFC7ULt/odHyW4BAHAPsdAUkP/2BKF+35nn90dxsUwjsJEs9AQ02w7xIDTowTZ7XbLkJSDAvSbOtAQ06vK49gGhWknbVuj3mjjTqXuaBIRntWPKBHPUo/BuzEVdAU3UMPcDoduxGdd2E+c+IMR0D2yyqOkmGvS2Cgjh7JpaAnYDwtYNteG1PH39fTpDgxDdneijvIaAXHszVAEhoCI+10CovRkqhgfhEB3D6I5XFvCqZQhBkjAf8SEsO4JXbCit7TMlIMmuIAg5RoQ7H5GLSgJ2A8L2WjXjagLq7hN9RAjnOBHuT03PCygDwvZbpmcQPnqAcBKhHXeACOOA0Jl3AYTwBg9Ck4sqWrAhZlAIBwGhO4YwCNeIEF6hRhipmQSxSANClyc1EsIvxISwF2Fn+CwBTmfygNDlWEn7Z6QZIoRd/AijUg9PC6ibrK44IoQGge1WMTwt4IUuQpi2HCDxQn2brVVj9k3IV9enUAJPyFffc2onw0/pT7rfZ4IK4TyiwfBUorC2x9RHhXBCA6Gaia95pNqyxagQXkSUGH4UUHufv2aYEP4hgjBKsk+qY1BheBMQNsbwWECDdmwDVAivyCCM7vixgCZlvjkqhN2IEsMjAU0aJmSoEApCCN+qx8sLKk0qV8eoENI4nnk/8n7NmzZqAMUCwlNjtN0URTEtx5h19v8WxXK7tR16OpxKGl3FpZAhBD+eSe+K13ajQvLj1+BCCsE642nxz9qHVCYJC8MGDsBXw1l/9ASU3mLKGOc/vsYht150Hiw1zlDGz4mRIQQ6nlHpZlw+n9donMam27QNO2oMtZDCxAshcthUul2UlW9C461Y8dS4gTWA+J3hQr7st21+h96+Bm/VWTZMMUeGsGeX3yIzfqvSHlk2GcfMkCG0eDwzKsaW3orLzv2/gLDq1JYCbqa8YuVmlenuUZ2GICqODaGVfWc0ZVKyVt9nVz3kIZEhtFBimD7vzU+IlxT/3Fs2fWwINW+MeV929r0mBdhLdp2bpzE6LZyY7YBjKIFfow7y8sktwht0WmjiGL6WvAMiFDvLBqLv9/djjQ6hgWOYjIUDhLv/dpYOEeYeIVRj4eiSSS4cehgZOoT6TbxWwt09oVz+dmTWKIkOoXYTr4QzxDfXn3ELUSHUdgxXgjm+rffBhSL24RACxQu1g74KKCL6w1ReOjBNYwEsEcCjNbOBY/cIGXdg1dwwfAivTGxvtwh3s3voxXSNEKGeV3E4z3eNsAxDATPMESKUJoabe4RSwG6IDCPCof5W2ARCKSE3xERgRKjlVQwaRCgfwA1SZAi1vIp1kwgBj2quUS6kv/QN0qYQwhmmA5QIe/i0UMrLIahBigxhF6EWMg50UsNRItTKgGpYC8uTGgg9VBIlQq2D7qa1EIhhHynCCUYthGF4jROhVmmMFsI3Y+RjNFf/1Ns6wwEkQsBQnI5JGtdJHH0VQZTFvYviUO5rI3u/+2jdIIX7nSER6pik/ToIO0Wx2W7T9IPSqDTdbouXEmDdFEXbvkWGFKFOP8SkwpPL/3aK7VP64++cpqPNgn1eXiuKwO0yVAwrwrnW9/rjk3eLJ7tfPlUt21XpdqyXZXpp1SAVWBHqmKSrn9Y3zqbL2kXXajTm9UXgNotcY7QIr7TtmZNPZve6hRBqVG6Nol4xosW4xQrtQqpjkqrs9JNNizvVZspkrQxV8dfmCSlShFptvL4mIZb8bPQCKsuF66T626tD5GgRarXoVh9TgblkxdaWeTja1PIau3M7fzYReBFqRX1vjwTmsrO02jRGbWrUNvCunW+njxihXi7p7N1+u7efLa82HPaA6XQOKVaEermkagxbxVl2xasqwl9L1gxahJptSdWidOQAy3DV7Pik50eLzMZSmiFGqN3TUqUpbGru3biim2ihE5limBHOo7aO0q6pIoKFHkgxbK0WZLxw99823wi776lxXgTzr/A/J1FrqEe3ulN+aZueF8H8K8xRI2x5p/xRJs+KYO5XcNQItcu1XSniMz8rgqldlUjUCM06CLkYd0ycEcH0K4xxI0Rww30ylrBf4QA5QgQXM5c3oEN+hTlyhJYOisEt0x9EMLxCzFXdMtijjXcSNxsih0Poqm4ZDuEkwsQQAOENdoTWW+VDeYgcCuEaPcIeDoRRkgEhzNAj5ENMDO0jTCR+hPMIEUP7TkWMHyEC5/6NYX5ShAsLjr1EWtlkUHLfGMMTIpgtIxl4XNZB28ghHoYqOyGC0fsrQQChnEeI9JB/EcHsWuI+BYTiV4SO4bEIZlHrAQmEPUwIo/7nLFOz46WcBEI2RMXw9lMepdlWyEkg5HNUCKNnC9nMbzotaWjhL1wIo5VpZc/xGTcNhD1kCNXRBmZ4Sp8TQYhsMyzdw9d0fcOIteJEEGLbDPeFOXsJTJs9x5IKQmybYVk3U5bEdAwJlgekRBbSXoRvjIrCvLIqI4OQDyMvRyLpIJz4iTB2gxA8XsiN/WO0YwX+w7oJ+XLj0360IyOEEFXAyd6JuaCE8JePCG9JIez5iDBnlBCaZS/gHIrRQjjxdCskhNBDt2JFDKGHbkVGbCH1z614ubGQEMI/viG8IYew5+c6SgmhYRIKvnWU0UM48XIdJYXQs5U0d6eFLuKFh+mjX+uosx/WIUKvVtJYUETY82sdpYjQp6NuxWginPi0jtJE6NFKuiaqhf6spIpTRTjxaB0litCblTRnVBH6spIqRhfhhT/rKFWEPX/WUaoI/TgnTRhlhF6kBO9TgMki7HmzjjpE6Da4LDxYSV0143Yf8j1MPFhJB7QRtv0SJxuDE0eIr/tF3dGXxBFi6ZhvEKQgj5D6IZvi5BFy4odst5I+QuKuYe6BFtIukEmYDwhJlxquhA8IMbXMr23MZF5oIeX8i1j4gZCwQZMzPxDSNWjKDgl+ICRr0KxYEwjdxgsPU6oGzf5kxukv2UDI9zDlf2kivJHeIGREDZrMI4Q0Q06x9AghTTXMvULIHgkej0qvEHKCkd+1XwgJ+hVKeoaQXirbjW8Iyanh6zXAHiGUxNz7W+Efwi4tNcyYfwhphQ1dtrhoD0JS7n3OfERISQ37okGETcQLX6aXtA64G/wlG/vDdKL3sfQUIZ3D7txbhFRiTn3pLUIqaph7jJCGGsbSY4Q01DDzGiEFNYyl1wgpqGHuOUL8atiXviPEHrBQjdWEtgYh9rhhLAJC3GrYYEFhexDibq13KwJCibrqt+yj3jjCBuOFL1PMOaXrZn+6xkO+b9NHrAQTGRAeatXQxn7zgBB5oVMsA8K3QieUFs1L9m9AyNCW/d7IgPCoSgahRZPwgPB4ijBiMZMB4YcpOoumLwJC3CUWKmMB4acpsjOaZxEQfpnOcdkyLCD8MkW1lM5EQHhiiijqtL+WKSD8MsXjHJbLaHsQNh8vfM/PR3Pc3Yr07RaFfI+Su5Gcs93IgPC7KY6lNJEB4ffTDgKr9P927m4njSAMwPBOguc7ib0CohdQIseYgscYheNGhfu/hApVW9NWYdifme3T9KBvTJvMPKHs7LcwvkX4URZwwH+KCD/M7A/4FyEgLPqx0nweHM2WMPfvMlnWCD/NrE8Wu9syCD/LnE8W+9syCD/NfN8Ox7c1woPyS8ZvhAgPy5tM3whjhfDQzPJ0eBUqhAfn+UOmlzI5EmY0L8z7aajxss5zr3IljNnNDr/XFcLjMrNLmqdYITw2v2V3KYPw2Edp7nIaT1QIE7LO5mhxcV0hTMpc7pZeXtcIEzOPo8X4RRBhSp59zUgQYUqG/g3HtxHhKdm74e55NYQnZc+G42VEeGr2argXRHhq9mj48tQvwlOzN8PX57YzJ8x0BvY++zF8O03kvTllEMZRD/dpLq9jhbCxDKPH7mcTEWGTGc7vuhdE2GiGet39x5cQNpv1osMHRiPCFjKErj5AmumHQMsn3P1n2slFzXQWEbaXHTwUtQl1QNhinrV8Qry8jwXtRpGEsd3TxdUsVghbz0VrVzXj+1BXCDvI0V1rx/kCd6NIwlC38ULczQYrhJ3db2v+hbgp7Y7aWxYwL/x7Nnvje7qMZS2/rJHvv/KmMcTpy2UMwo4zVqvHhgBjwbtRMmFdxdHq4fSzfCh1+UMgfH5LHy0eT30FRoT9Pt8WqvSr06t56csfAuE+68XkeL/J/eztSw0R5nDGWB+lONnMwu4fQJhVztePBzFOppvZ699GmFU+/x4t1tsPGSfb1f4Cph7O8odE+POPVZwvVuvtH9ep0+12tZoPbL0DJPyV9Wj++69q9PyDsH+pIizm3s3777yLg13vcAn/m7Qd5RMWPOSUJY98JUKEEqFEKBEilAglQokQoUQoEUqEw0izNyNfiVAiRGg7EEqEEiFC24FQIpQIEdoOhBKhTCY0ezPylQglQmn9CCVCiVBaP0KJUCKUtgOhRCiT0+zNyFcilAgR2g6EEqFEiNB2IJQIJUKEtgOhRCiTCc3ejHwlQolQWj9CiVAilNaPUCKUCKXtQCgRyuQ0ezPylQglQoS2A6FEKBEitB0IJUKJEKHtQCgRymRCszcjX4lQIpTWj1AilAil9SOUCCVCaTsQSoQyOc3ejHwlQokQoe1AKBFKhAhtB0KJUCJEaDsQSoQymdDszchXIpQIpfUjlAglQmn9CCVCiVDaDoQSoUxOszcjX4lQIkRoOxBKhBIhQtuBUCKUCBHaDoQSoUwmNHsz8pUIJUJp/QglQolQWj9CiVAilLYDoUQoU/MHrbl8N90396UAAAAASUVORK5CYII="
SPOTIFY_CLIENT_ID = os.getenv("SPOTIFY_CLIENT_ID")
SPOTIFY_SECRET_ID = os.getenv("SPOTIFY_SECRET_ID")
SPOTIFY_REFRESH_TOKEN = os.getenv("SPOTIFY_REFRESH_TOKEN")
REFRESH_TOKEN_URL = "https://accounts.spotify.com/api/token"
NOW_PLAYING_URL = "https://api.spotify.com/v1/me/player/currently-playing"
RECENTLY_PLAYING_URL = (
"https://api.spotify.com/v1/me/player/recently-played?limit=10"
)
app = Flask(__name__)
def getAuth():
return b64encode(f"{SPOTIFY_CLIENT_ID}:{SPOTIFY_SECRET_ID}".encode()).decode(
"ascii"
)
def refreshToken():
data = {
"grant_type": "refresh_token",
"refresh_token": SPOTIFY_REFRESH_TOKEN,
}
headers = {"Authorization": "Basic {}".format(getAuth())}
response = requests.post(REFRESH_TOKEN_URL, data=data, headers=headers)
try:
return response.json()["access_token"]
except KeyError:
print(json.dumps(response.json()))
print("\n---\n")
raise KeyError(str(response.json()))
def recentlyPlayed():
token = refreshToken()
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(RECENTLY_PLAYING_URL, headers=headers)
if response.status_code == 204:
return {}
return response.json()
def nowPlaying():
token = refreshToken()
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(NOW_PLAYING_URL, headers=headers)
if response.status_code == 204:
return {}
return response.json()
def barGen(barCount):
barCSS = ""
left = 1
for i in range(1, barCount + 1):
anim = random.randint(1000, 1350)
barCSS += (
".bar:nth-child({}) {{ left: {}px; animation-duration: {}ms; }}".format(
i, left, anim
)
)
left += 4
return barCSS
def loadImageB64(url):
resposne = requests.get(url)
return b64encode(resposne.content).decode("ascii")
def makeSVG(data):
barCount = 84
contentBar = "".join(["<div class='bar'></div>" for i in range(barCount)])
barCSS = barGen(barCount)
if data == {} or data["item"] == "None" or data["item"] is None:
contentBar = "" #Shows/Hides the EQ bar if no song is currently playing
currentStatus = "Щас играет:"
recentPlays = recentlyPlayed()
recentPlaysLength = len(recentPlays["items"])
itemIndex = random.randint(0, recentPlaysLength - 1)
item = recentPlays["items"][itemIndex]["track"]
else:
item = data["item"]
currentStatus = "Vibing to:"
if item["album"]["images"] == []:
image = PLACEHOLDER_IMAGE
else :
image = loadImageB64(item["album"]["images"][1]["url"])
artistName = item["artists"][0]["name"].replace("&", "&")
songName = item["name"].replace("&", "&")
dataDict = {
"contentBar": contentBar,
"barCSS": barCSS,
"artistName": artistName,
"songName": songName,
"image": image,
"status": currentStatus,
}
return render_template("spotify.html.j2", **dataDict)
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def catch_all(path):
data = nowPlaying()
svg = makeSVG(data)
resp = Response(svg, mimetype="image/svg+xml")
resp.headers["Cache-Control"] = "s-maxage=1"
return resp
if __name__ == "__main__":
app.run(debug=True)
|
py | b40ef6e0a6b1ab1c8945a9bb1f7ce1c2530dc6a7 | class UnknownTld(Exception):
pass
class FailedParsingWhoisOutput(Exception):
pass
class UnknownDateFormat(Exception):
pass
class WhoisCommandFailed(Exception):
pass
|
py | b40ef6fa3d3909a2d1db8cd33b47b7f26371d990 | from PySide2 import QtWidgets, QtCore
from modules.GUI_layouts import asap_layout
from modules.applications import widgets, times_tab, clients_tab, invoices_tab
from modules.worker_thread import background_execution
from modules.utilities import toolbox
import pandas as pd
import logging
class Signals(QtCore.QObject):
logging_message_signal = QtCore.Signal(str)
# times tab
times_request_client_name_list_signal = QtCore.Signal()
times_deliver_client_name_list_signal = QtCore.Signal(list)
times_update_client_name_combobox_signal = QtCore.Signal(list)
times_request_client_code_signal = QtCore.Signal(str)
times_deliver_client_code_signal = QtCore.Signal(str)
pushbutton_add_working_day_clicked_signal = QtCore.Signal(dict, dict)
radiobutton_times_query_clicked_signal = QtCore.Signal(str, dict)
pushbutton_times_run_query_clicked_signal = QtCore.Signal(dict)
radiobutton_times_sort_clicked_signal = QtCore.Signal(str)
display_times_query_df_in_tableview_signal = QtCore.Signal(pd.DataFrame)
pushbutton_times_export_query_results_clicked_signal = QtCore.Signal()
pushbutton_times_overwrite_clicked_signal = QtCore.Signal(dict, str, dict)
pushbutton_times_delete_clicked_signal = QtCore.Signal(str, dict)
# clients tab
pushbutton_add_client_clicked_signal = QtCore.Signal(dict, dict)
request_next_index_within_year_signal = QtCore.Signal(int)
deliver_next_index_within_year_signal = QtCore.Signal(int)
radiobutton_clients_query_clicked_signal = QtCore.Signal(str, dict)
pushbutton_clients_run_query_clicked_signal = QtCore.Signal(dict)
radiobutton_clients_sort_clicked_signal = QtCore.Signal(str)
display_clients_query_df_in_tableview_signal = QtCore.Signal(pd.DataFrame)
pushbutton_clients_export_query_results_clicked_signal = QtCore.Signal()
pushbutton_clients_overwrite_clicked_signal = QtCore.Signal(dict, str, dict)
pushbutton_clients_delete_clicked_signal = QtCore.Signal(str, dict)
# invoices tab
invoices_request_client_name_list_signal = QtCore.Signal()
invoices_deliver_client_name_list_signal = QtCore.Signal(list)
invoices_update_client_name_combobox_signal = QtCore.Signal(list)
invoices_request_client_code_next_invoice_index_at_client_rates_and_compensations_signal = QtCore.Signal(str)
invoices_deliver_client_code_next_invoice_index_at_client_rates_and_compensations_signal = QtCore.Signal(dict)
invoices_deliver_client_code_next_invoice_index_at_client_rates_signal = QtCore.Signal(dict)
pushbutton_generate_invoice_clicked_signal = QtCore.Signal(dict, dict)
radiobutton_invoices_query_clicked_signal = QtCore.Signal(str, dict)
pushbutton_invoices_run_query_clicked_signal = QtCore.Signal(dict)
radiobutton_invoices_sort_clicked_signal = QtCore.Signal(str)
display_invoices_query_df_in_tableview_signal = QtCore.Signal(pd.DataFrame)
pushbutton_invoices_export_query_results_clicked_signal = QtCore.Signal()
pushbutton_invoices_overwrite_clicked_signal = QtCore.Signal(dict, str, dict)
pushbutton_invoices_delete_clicked_signal = QtCore.Signal(str, dict)
def __init__(self):
super().__init__()
class ASAPApplication(QtWidgets.QMainWindow):
def __init__(self, params):
super().__init__()
self.params = params
self.DATE_FORMAT = params.DATE_FORMAT
self._initialize_user_interface()
self.signals = Signals()
self._initialize_logger()
self._initialize_background_execution_thread()
self._initialize_tabs()
self._display_welcome_message()
def _initialize_user_interface(self):
self.ui = asap_layout.Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle('A Simple Accounting Program')
self._set_tableview_selection_modes()
self.widgets = widgets.Widgets(self.ui, self.DATE_FORMAT)
def _set_tableview_selection_modes(self):
self.ui.tableView_times_query.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.ui.tableView_clients_query.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.ui.tableView_invoices_query.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
def _initialize_background_execution_thread(self):
self.background_execution_thread = QtCore.QThread()
self.background_execution = background_execution.BackgroundExecution(self.signals, self.params)
self.background_execution.moveToThread(self.background_execution_thread)
self.background_execution_thread.start()
def _initialize_tabs(self):
self.times_tab = times_tab.TimesTab(self.ui, self.widgets, self.signals, self.background_execution, self.params)
self.clients_tab = clients_tab.ClientsTab(self.ui, self.widgets, self.signals, self.background_execution, self.params)
self.invoices_tab = invoices_tab.InvoicesTab(self.ui, self.widgets, self.signals, self.background_execution, self.params)
self.times_tab.set_initial_values()
self.times_tab.connect_buttons_to_slots()
self.invoices_tab.set_initial_values()
self.invoices_tab.connect_buttons_to_slots()
def _initialize_logger(self):
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
handler = toolbox.QLogHandler(self.signals.logging_message_signal, self.ui.textBrowser_console)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='{asctime} | {message}', datefmt='%H:%M:%S', style='{')
handler.setFormatter(formatter)
logger.addHandler(handler)
def _display_welcome_message(self):
self.logger = logging.getLogger('main.' + __name__)
self.logger.info('Welcome to A Simple Accounting Program!')
def closeEvent(self, event):
self.background_execution_thread.quit()
self.deleteLater()
|
py | b40ef74fd84599ef03c741c3a00dac1236c9b2b2 | from django.db import models
import django.contrib.gis.db.models as geo_models
class ObjectLocationType(models.Model):
name = models.CharField(max_length=100, default="")
def __str__(self):
return self.name
class ObjectSubject(models.Model):
name = models.CharField(max_length=100, default="")
def __str__(self):
return self.name
class ObjectCreator(models.Model):
name = models.CharField(max_length=100, default="")
def __str__(self):
return self.name
class ObjectDateType(models.Model):
name = models.CharField(max_length=100, default="")
def __str__(self):
return self.name
class ObjectPublisher(models.Model):
name = models.CharField(max_length=100, default="")
def __str__(self):
return self.name
class LanguageCode(models.Model):
code = models.CharField(max_length=6, default="")
def __str__(self):
return self.code
class Museum(models.Model):
name = models.CharField(max_length=200, default="")
def __str__(self):
return self.name
class CollectionDomain(models.Model):
name = models.CharField(max_length=200, default="")
def __str__(self):
return self.name
class Collection(models.Model):
museum = models.ForeignKey(Museum, on_delete=models.CASCADE)
name = models.CharField(max_length=200, default="")
domain = models.ForeignKey(CollectionDomain, blank=True, on_delete=models.CASCADE)
class MuseumObject(models.Model):
collection = models.ManyToManyField(Collection)
internal_identifier = models.CharField(max_length=100, default="")
subject = models.ManyToManyField(ObjectSubject, blank=True)
description = models.CharField(max_length=5000, default="", blank=True)
creator = models.ManyToManyField(ObjectCreator, blank=True)
publisher = models.ManyToManyField(ObjectPublisher)
language = models.ManyToManyField(LanguageCode)
def __str__(self):
for title in self.objecttitle_set.all():
return f"{title} ({self.pk})"
return f"{self.internal_identifier}"
class ImageColor(models.Model):
value = models.CharField(max_length=80)
def __str__(self):
return self.value
class ObjectImage(models.Model):
museum_object = models.ForeignKey(MuseumObject, on_delete=models.CASCADE)
image = models.ImageField(upload_to="images/")
color = models.ManyToManyField(ImageColor)
class ObjectLocation(models.Model):
location_type = models.ForeignKey(ObjectLocationType, on_delete=models.CASCADE)
term = models.CharField(max_length=200, default="")
location = geo_models.PointField()
museum_object = models.ForeignKey(MuseumObject, on_delete=models.CASCADE)
def __str__(self):
return f"[{self.location_type}] {self.term} {self.location}"
class ObjectDate(models.Model):
type = models.ForeignKey(ObjectDateType, on_delete=models.CASCADE)
value = models.CharField(max_length=80, default="")
museum_object = models.ForeignKey(MuseumObject, on_delete=models.CASCADE)
def __str__(self):
return f"[{self.type}] {self.value} - {self.museum_object}"
class ObjectTitle(models.Model):
title = models.CharField(max_length=1000, default="")
museum_object = models.ForeignKey(MuseumObject, on_delete=models.CASCADE)
def __str__(self):
return self.title
|
py | b40ef7fd6e590c27df4079531fb652a0e58fecc5 | from multiprocessing.pool import Pool
from functools import partial
from itertools import chain
from pathlib import Path
from tqdm import tqdm
import numpy as np
from encoder import inference as encoder
from synthesizer.preprocess_speaker import preprocess_speaker_aidatatang_200zh
data_info = {
"aidatatang_200zh": {
"subfolders": ["corpus/train"],
"speak_func": preprocess_speaker_aidatatang_200zh
}
# TODO add more
}
def preprocess_dataset(datasets_root: Path, out_dir: Path, n_processes: int,
skip_existing: bool, hparams, no_alignments: bool,
dataset: str):
# Gather the input directories
dataset_root = datasets_root.joinpath(dataset)
input_dirs = [dataset_root.joinpath(subfolder.strip()) for subfolder in data_info[dataset]["subfolders"]]
print("\n ".join(map(str, ["Using data from:"] + input_dirs)))
assert all(input_dir.exists() for input_dir in input_dirs)
# Create the output directories for each output file type
out_dir.joinpath("mels").mkdir(exist_ok=True)
out_dir.joinpath("audio").mkdir(exist_ok=True)
# Create a metadata file
metadata_fpath = out_dir.joinpath("train.txt")
metadata_file = metadata_fpath.open("a" if skip_existing else "w", encoding="utf-8")
# Preprocess the dataset
speaker_dirs = list(chain.from_iterable(input_dir.glob("*") for input_dir in input_dirs))
func = partial(data_info[dataset]["speak_func"], out_dir=out_dir, skip_existing=skip_existing,
hparams=hparams, directory=dataset_root, no_alignments=no_alignments)
job = Pool(n_processes).imap(func, speaker_dirs)
for speaker_metadata in tqdm(job, dataset, len(speaker_dirs), unit="speakers"):
for metadatum in speaker_metadata:
metadata_file.write("|".join(str(x) for x in metadatum) + "\n")
metadata_file.close()
# Verify the contents of the metadata file
with metadata_fpath.open("r", encoding="utf-8") as metadata_file:
metadata = [line.split("|") for line in metadata_file]
mel_frames = sum([int(m[4]) for m in metadata])
timesteps = sum([int(m[3]) for m in metadata])
sample_rate = hparams.sample_rate
hours = (timesteps / sample_rate) / 3600
print("The dataset consists of %d utterances, %d mel frames, %d audio timesteps (%.2f hours)." %
(len(metadata), mel_frames, timesteps, hours))
print("Max input length (text chars): %d" % max(len(m[5]) for m in metadata))
print("Max mel frames length: %d" % max(int(m[4]) for m in metadata))
print("Max audio timesteps length: %d" % max(int(m[3]) for m in metadata))
def embed_utterance(fpaths, encoder_model_fpath):
if not encoder.is_loaded():
encoder.load_model(encoder_model_fpath)
# Compute the speaker embedding of the utterance
wav_fpath, embed_fpath = fpaths
wav = np.load(wav_fpath)
wav = encoder.preprocess_wav(wav)
embed = encoder.embed_utterance(wav)
np.save(embed_fpath, embed, allow_pickle=False)
def create_embeddings(synthesizer_root: Path, encoder_model_fpath: Path, n_processes: int):
wav_dir = synthesizer_root.joinpath("audio")
metadata_fpath = synthesizer_root.joinpath("train.txt")
assert wav_dir.exists() and metadata_fpath.exists()
embed_dir = synthesizer_root.joinpath("embeds")
embed_dir.mkdir(exist_ok=True)
# Gather the input wave filepath and the target output embed filepath
with metadata_fpath.open("r", encoding="utf-8") as metadata_file:
metadata = [line.split("|") for line in metadata_file]
fpaths = [(wav_dir.joinpath(m[0]), embed_dir.joinpath(m[2])) for m in metadata]
# TODO: improve on the multiprocessing, it's terrible. Disk I/O is the bottleneck here.
# Embed the utterances in separate threads
func = partial(embed_utterance, encoder_model_fpath=encoder_model_fpath)
job = Pool(n_processes).imap(func, fpaths)
list(tqdm(job, "Embedding", len(fpaths), unit="utterances"))
|
py | b40ef8857b8e23ca9d443ad46843b72dd9a2ffbf | import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import graphlearning as gl
from .utils import MixedDropout, sparse_matrix_to_torch
def calc_A_hat(adj_matrix: sp.spmatrix) -> sp.spmatrix:
nnodes = adj_matrix.shape[0]
A = adj_matrix + sp.eye(nnodes)
D_vec = np.sum(A, axis=1).A1
D_vec_invsqrt_corr = 1 / np.sqrt(D_vec)
D_invsqrt_corr = sp.diags(D_vec_invsqrt_corr)
# yes @ means multiplication
return D_invsqrt_corr @ A @ D_invsqrt_corr
# this might be the key function to modify
def calc_ppr_exact(adj_matrix: sp.spmatrix, alpha: float) -> np.ndarray:
nnodes = adj_matrix.shape[0]
M = calc_A_hat(adj_matrix)
A_inner = sp.eye(nnodes) - (1 - alpha) * M
return alpha * np.linalg.inv(A_inner.toarray())
class PPRExact(nn.Module):
def __init__(self, adj_matrix: sp.spmatrix, alpha: float, drop_prob: float = None):
super().__init__()
ppr_mat = calc_ppr_exact(adj_matrix, alpha)
self.register_buffer('mat', torch.FloatTensor(ppr_mat))
if drop_prob is None or drop_prob == 0:
self.dropout = lambda x: x
else:
self.dropout = MixedDropout(drop_prob)
def forward(self, predictions: torch.FloatTensor, idx: torch.LongTensor):
return self.dropout(self.mat[idx]) @ predictions
class PPRPowerIteration(nn.Module):
def __init__(self, adj_matrix: sp.spmatrix, alpha: float, niter: int, drop_prob: float = None):
super().__init__()
self.alpha = alpha
self.niter = niter
M = calc_A_hat(adj_matrix)
self.register_buffer('A_hat', sparse_matrix_to_torch((1 - alpha) * M))
if drop_prob is None or drop_prob == 0:
self.dropout = lambda x: x
else:
self.dropout = MixedDropout(drop_prob)
def forward(self, local_preds: torch.FloatTensor, idx: torch.LongTensor):
preds = local_preds
for _ in range(self.niter):
A_drop = self.dropout(self.A_hat)
preds = A_drop @ preds + self.alpha * local_preds
return preds[idx]
class PL(nn.Module):
def __init__(self, adj_matrix: sp.spmatrix, alpha: float, niter: int, drop_prob: float = None):
super().__init__()
w=gl.diag_multiply(adj_matrix, 0)
n = adj_matrix.shape[0]
D = gl.degree_matrix(w + 1e-10 * gl.sparse.identity(n), p=-1)
P = D @ w.transpose()
u=np.zeros((n,15))
u=sp.csr_matrix(u)
self.register_buffer('u', sparse_matrix_to_torch(u))
self.register_buffer('P', sparse_matrix_to_torch(P))
self.register_buffer('Diag', sparse_matrix_to_torch(D))
def forward(self, local_preds: torch.FloatTensor, idx: torch.LongTensor):
ut = self.u
Db=self.Diag@local_preds
T=0
while (T<10):
temp=torch.sparse.mm(self.P,ut)
ut=Db+temp
T=T+1
return ut[idx]
|
py | b40ef9f177e162fdf1bf06f3ab5448b8d1b5970f | from __future__ import annotations
from abc import abstractmethod
from enum import Enum
from typing import List
# SQL операции
class Operation(Enum):
EQ = '='
NE = '!='
GE = '>='
LE = '<='
GT = '>'
LT = '<'
LIKE = 'like'
AND = 'and'
OR = 'or'
IS_NULL = 'is null'
IS_NOT_NULL = 'is not null'
# SQL сортировка
class SortOrder(Enum):
# Сортировать в порядке возрастания.
ASC = 'asc'
# Сортировать в порядке убывания.
DESC = 'desc'
class MatchMode(Enum):
ANYWHERE = ('%', '%')
END = ('%', '')
EXACT = ('', '')
START = ('', '%')
def __init__(self, prePattern, postPattern):
self.__prePattern = ''
self.__postPattern = ''
# Преобразует шаблон, добавляя "%" в начало/конец шаблона.
# Выполняется удаление пробелов в начале и конце шаблона.
# Для <code>null</code> и пустых строк возвращается <code>null</code>.
# @param pattern поисковый шаблон
# @return шаблон для текущего режима, если шаблон равен <code>null</code>, то возвращает <code>null</code>
def toMatchString(self, pattern: str) -> str:
return self.__prePattern + pattern + self.__postPattern
# SQL выражение (часть SQL запроса)
class AbstractExpression(object):
@abstractmethod
def hasValue(self) -> bool:
pass
@abstractmethod
def getSqlString(self) -> str:
pass
@abstractmethod
def getArguments(self) -> List[object]:
pass
# SQL строитель запросов
class AbstractQueryBuilder(object):
@abstractmethod
def getQuery(self) -> str:
pass
@abstractmethod
def getArguments(self) -> List[object]:
pass
@abstractmethod
def addPaging(self, startIndex: int, count: int) -> AbstractQueryBuilder:
pass
@abstractmethod
def addSorting(self, sortOrder: SortOrder) -> AbstractQueryBuilder:
pass
# Добавить условие поиска
@abstractmethod
def addExpression(self, expression: AbstractExpression) -> AbstractQueryBuilder:
pass
|
py | b40efacb3b27135af03347f8592d11ab3c7da10b | import tornado.httpserver
import tornado.ioloop
from tornado.platform.asyncio import AsyncIOMainLoop
from mitmproxy import addons
from mitmproxy import log
from mitmproxy import master
from mitmproxy import optmanager
from mitmproxy.addons import eventstore
from mitmproxy.addons import intercept
from mitmproxy.addons import readfile
from mitmproxy.addons import termlog
from mitmproxy.addons import view
from mitmproxy.addons import termstatus
from mitmproxy.tools.web import app, webaddons, static_viewer
import click
class WebMaster(master.Master):
def __init__(self, options, with_termlog=True):
super().__init__(options)
self.view = view.View()
self.view.sig_view_add.connect(self._sig_view_add)
self.view.sig_view_remove.connect(self._sig_view_remove)
self.view.sig_view_update.connect(self._sig_view_update)
self.view.sig_view_refresh.connect(self._sig_view_refresh)
self.events = eventstore.EventStore()
self.events.sig_add.connect(self._sig_events_add)
self.events.sig_refresh.connect(self._sig_events_refresh)
self.options.changed.connect(self._sig_options_update)
self.options.changed.connect(self._sig_settings_update)
self.addons.add(*addons.default_addons())
self.addons.add(
webaddons.WebAddon(),
intercept.Intercept(),
readfile.ReadFile(),
static_viewer.StaticViewer(),
self.view,
self.events,
)
if with_termlog:
self.addons.add(termlog.TermLog(), termstatus.TermStatus())
self.app = app.Application(
self, self.options.web_debug
)
def _sig_view_add(self, view, flow):
app.ClientConnection.broadcast(
resource="flows",
cmd="add",
data=app.flow_to_json(flow)
)
def _sig_view_update(self, view, flow):
app.ClientConnection.broadcast(
resource="flows",
cmd="update",
data=app.flow_to_json(flow)
)
def _sig_view_remove(self, view, flow, index):
app.ClientConnection.broadcast(
resource="flows",
cmd="remove",
data=flow.id
)
def _sig_view_refresh(self, view):
app.ClientConnection.broadcast(
resource="flows",
cmd="reset"
)
def _sig_events_add(self, event_store, entry: log.LogEntry):
app.ClientConnection.broadcast(
resource="events",
cmd="add",
data=app.logentry_to_json(entry)
)
def _sig_events_refresh(self, event_store):
app.ClientConnection.broadcast(
resource="events",
cmd="reset"
)
def _sig_options_update(self, options, updated):
options_dict = optmanager.dump_dicts(options, updated)
app.ClientConnection.broadcast(
resource="options",
cmd="update",
data=options_dict
)
def _sig_settings_update(self, options, updated):
app.ClientConnection.broadcast(
resource="settings",
cmd="update",
data={k: getattr(options, k) for k in updated}
)
def run(self): # pragma: no cover
AsyncIOMainLoop().install()
iol = tornado.ioloop.IOLoop.instance()
http_server = tornado.httpserver.HTTPServer(self.app)
http_server.listen(self.options.web_port, self.options.web_host)
web_url = f"http://{self.options.web_host}:{self.options.web_port}/"
click.secho(f"接口mock地址: {web_url}", fg='green')
self.run_loop(iol.start)
|
py | b40efcd43291d808b8f35bad33a06dd2b83da97a | import torch
from .shape import Shape
class Compose(Shape):
def __init__(self,list_of_oparations):
super().__init__()
self.list_of_oparations = list_of_oparations
def signed_distance_function(self, position_tensor):
signed_distance_tensor = None
for operation in self.list_of_oparations:
signed_distance_tensor = operation.modify_signed_distance(position_tensor,signed_distance_tensor)
return signed_distance_tensor |
py | b40efd05637e6ff4900a2353f923655edd3e76f0 | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Dhcpv6RateCpf(Base):
"""
The Dhcpv6RateCpf class encapsulates a list of dhcpv6RateCpf resources that are managed by the user.
A list of resources can be retrieved from the server using the Dhcpv6RateCpf.find() method.
The list can be managed by using the Dhcpv6RateCpf.add() and Dhcpv6RateCpf.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'dhcpv6RateCpf'
_SDM_ATT_MAP = {
'ForceApplyQTConfig': 'forceApplyQTConfig',
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
_SDM_ENUM_MAP = {
'mode': ['existingMode', 'newMode'],
}
def __init__(self, parent, list_op=False):
super(Dhcpv6RateCpf, self).__init__(parent, list_op)
@property
def Results(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_ea5b17fb9e92e99dd19e87ec238a3bb6.Results): An instance of the Results class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_ea5b17fb9e92e99dd19e87ec238a3bb6 import Results
if self._properties.get('Results', None) is not None:
return self._properties.get('Results')
else:
return Results(self)._select()
@property
def TestConfig(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_082d5de4bf8808f63ffee5f08adb2723.TestConfig): An instance of the TestConfig class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_082d5de4bf8808f63ffee5f08adb2723 import TestConfig
if self._properties.get('TestConfig', None) is not None:
return self._properties.get('TestConfig')
else:
return TestConfig(self)._select()
@property
def ForceApplyQTConfig(self):
# type: () -> bool
"""
Returns
-------
- bool: Apply QT config
"""
return self._get_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'])
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'], value)
@property
def InputParameters(self):
# type: () -> str
"""
Returns
-------
- str: Input Parameters
"""
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
# type: () -> str
"""
Returns
-------
- str(existingMode | newMode): Test mode
"""
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Test name
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Dhcpv6RateCpf
"""Updates dhcpv6RateCpf resource on the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Dhcpv6RateCpf
"""Adds a new dhcpv6RateCpf resource on the server and adds it to the container.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with all currently retrieved dhcpv6RateCpf resources using find and the newly added dhcpv6RateCpf resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dhcpv6RateCpf resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Dhcpv6RateCpf
"""Finds and retrieves dhcpv6RateCpf resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dhcpv6RateCpf resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dhcpv6RateCpf resources from the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with matching dhcpv6RateCpf resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dhcpv6RateCpf data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dhcpv6RateCpf resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
|
py | b40efd08d02ed3606b9d83825776f4f3a97c2f31 | import PySimpleGUI as sg
import pandas as pd
from more_itertools import grouper
import itertools
sg.SetOptions(element_padding=(0, 0))
"""
Restrict the characters allowed in an input element to digits and . or -
Accomplished by removing last character input if not a valid character
"""
layout = [ [sg.Text('Input value for you would like to convert, please input only numbers')],
[sg.Input(key='-IN-', enable_events=True, size=(10,1))],
[sg.Text('Convert From',pad=(30,10)), sg.Text('Convert to')],
[sg.InputOptionMenu(["Binary","Base 10", "hexadecimal","Octal"],pad=(10,5), key='-from-',size=(15,5)), sg.InputOptionMenu(["Binary","Base 10", "hexadecimal","Octal"],key='-to-',size=(15,30))],
[sg.Button('Submit'),sg.Button('Exit')]
]
window = sg.Window('Bucky', layout)
def bintobase10():
user_input = values["-IN-"]
user_input_string = str(user_input)
for contents in user_input_string:
if contents != '0' and contents != '1':
return sg.popup('Number not binary! Binary numbers are made up of only 1s and 0s ',title='error')
base = 2
running_total = 0
user_input_str = str(user_input)
num_of_digits = len(user_input_str) - 1
for i in user_input_str:
x = int(i)
y = x * (base ** num_of_digits)
running_total += y
num_of_digits-=1
user_message = str(running_total) + ' is the equivalent of ' + user_input_string + ' in base 10 decimal'
return sg.popup(user_message,title='Info')
def base10tobin():
""" this function accepts any number and converts to binary integer"""
user_input = int(values["-IN-"])
base = 2
binary_list = []
while True:
if user_input == 0:
break
else:
divide_result = user_input // base
remainder = user_input % base
binary_list.append(remainder)
user_input = divide_result
reversed_binary_list = binary_list[::-1]
reversed_binary_list_str = list(map(str,reversed_binary_list))
return_string = ''.join(reversed_binary_list_str)
user_message = return_string + ' is the equivalent of ' + values['-IN-'] + ' in base 2.'
return sg.popup(user_message,title='info')
def binarytohex():
user_input = values["-IN-"]
user_input_string = str(user_input)
for contents in user_input_string:
if contents != '0' and contents != '1':
return sg.popup('Number not binary! Binary numbers are made up of only 1s and 0s ',title='error')
df = pd.read_csv('hextable.csv')
t_string = str(user_input)
t_list = list(t_string)
t_list.reverse()
# https://more-itertools.readthedocs.io/en/latest/api.html#more_itertools.grouper
y = list(grouper(t_list, 4, '0'))
x = [tup[::-1] for tup in y]
random_list = []
for i in x:
i_str = "".join(i)
i_num = int(i_str)
f = df.loc[df['Binary'] == i_num, 'Hex']
d = f.values
r = d.tolist()
random_list.append(r)
merged = list(itertools.chain(*random_list))
merged.reverse()
merged_str = ''.join(merged)
merged_str_message = merged_str + ' is the equivalent of ' + values['-IN-'] + ' in base 16.'
return sg.popup(merged_str_message)
def hextobinary():
"""accepts hexadecimal digits and converts to binary. It will first check if the number is hexadecimal"""
base = 16
running_total = 0
user_input_str = str(values["-IN-"])
num_of_digits = len(user_input_str) - 1
for i in user_input_str:
x = int(i)
y = x * (base ** num_of_digits)
running_total += y
num_of_digits-=1
user_input = running_total
base = 2
binary_list = []
while True:
if user_input == 0:
break
else:
divide_result = user_input // base
remainder = user_input % base
binary_list.append(remainder)
user_input = divide_result
reversed_binary_list = binary_list[::-1]
reversed_binary_list_str = list(map(str,reversed_binary_list))
return_string = ''.join(reversed_binary_list_str)
user_message = return_string + ' is the equivalent of ' + values['-IN-'] + ' in base 2.'
return sg.popup(user_message,title='info')
def hextobase10():
"""converts hexadecimal digits to base 10. will check for hexadecimal"""
base = 16
running_total = 0
user_input_str = str(values["-IN-"])
num_of_digits = len(user_input_str) - 1
for i in user_input_str:
x = int(i)
y = x * (base ** num_of_digits)
running_total += y
num_of_digits-=1
user_message = str(running_total) + ' is the equivalent of ' + user_input_str + ' in base 10 decimal'
return sg.popup(user_message,title='info')
def base10tohex():
""" converts base 10 digits to hexadecimal values"""
user_input = int(values['-IN-'])
base = 16
running_list = []
while True:
if user_input == 0:
break
else:
quotient = user_input // base
remainder = user_input % base
running_list.append(remainder)
user_input = quotient
reversed_list = running_list[::-1]
# dict is necessary inorder to display output in letter format.
some_dict = {10:'A',11:'B',12:'C',13:'D',14:'E',15:'F'}
for contents in reversed_list:
if contents >= 10:
for contents2 in some_dict.keys():
converted_contents = int(contents2)
# converted_contents comes from the dictionary keys
if converted_contents == contents:
where = reversed_list.index(contents)
reversed_list[where] = some_dict[contents]
a = list(map(str,reversed_list))
a_string = ''.join(a)
user_output = a_string + ' is the equivalent of ' + values['-IN-'] + ' in base 16.'
return sg.popup(user_output,title='info')
def Octaltobase10():
""" converts base 8 digits to base 10"""
base = 8
running_total = 0
user_input_str = str(values["-IN-"])
num_of_digits = len(user_input_str) - 1
for i in user_input_str:
x = int(i)
y = x * (base ** num_of_digits)
running_total += y
num_of_digits-=1
user_message = str(running_total) + ' is the equivalent of ' + user_input_str + ' in base 10 decimal'
return sg.popup(user_message,title='info')
def base10toOctal():
"""converts base 10 digits to base 8 digits"""
user_input = int(values["-IN-"])
base = 8
binary_list = []
while True:
if user_input == 0:
break
else:
divide_result = user_input // base
remainder = user_input % base
binary_list.append(remainder)
user_input = divide_result
reversed_binary_list = binary_list[::-1]
reversed_binary_list_str = list(map(str,reversed_binary_list))
return_string = ''.join(reversed_binary_list_str)
user_message = return_string + ' is the equivalent of ' + values['-IN-'] + ' in base 8.'
return sg.popup(user_message,title='info')
def Octaltobinary():
"""converts base 8 digits to base 2 digits"""
base = 8
running_total = 0
user_input_str = str(values["-IN-"])
num_of_digits = len(user_input_str) - 1
for i in user_input_str:
x = int(i)
y = x * (base ** num_of_digits)
running_total += y
num_of_digits-=1
user_input = running_total
base = 2
binary_list = []
while True:
if user_input == 0:
break
else:
divide_result = user_input // base
remainder = user_input % base
binary_list.append(remainder)
user_input = divide_result
reversed_binary_list = binary_list[::-1]
reversed_binary_list_str = list(map(str,reversed_binary_list))
return_string = ''.join(reversed_binary_list_str)
user_message = return_string + ' is the equivalent of ' + values['-IN-'] + ' in base 2.'
return sg.popup(user_message,title='info')
def binarytoOctal():
"""converts base 2 digits to base 8 digits"""
user_input = values["-IN-"]
user_input_string = str(user_input)
for contents in user_input_string:
if contents != '0' and contents != '1':
return sg.popup('Number not binary! Binary numbers are made up of only 1s and 0s ',title='error')
base = 2
running_total = 0
user_input_str = str(user_input)
num_of_digits = len(user_input_str) - 1
for i in user_input_str:
x = int(i)
y = x * (base ** num_of_digits)
running_total += y
num_of_digits-=1
user_input = running_total
base = 8
binary_list = []
while True:
if user_input == 0:
break
else:
divide_result = user_input // base
remainder = user_input % base
binary_list.append(remainder)
user_input = divide_result
reversed_binary_list = binary_list[::-1]
reversed_binary_list_str = list(map(str,reversed_binary_list))
return_string = ''.join(reversed_binary_list_str)
user_message = return_string + ' is the equivalent of ' + values['-IN-'] + ' in base 8.'
return sg.popup(user_message,title='info')
def Octaltohex():
"""converts base 8 digits to base 16 digits"""
base = 8
running_total = 0
user_input_str = str(values["-IN-"])
num_of_digits = len(user_input_str) - 1
for i in user_input_str:
x = int(i)
y = x * (base ** num_of_digits)
running_total += y
num_of_digits-=1
user_input = running_total
base = 16
running_list = []
while True:
if user_input == 0:
break
else:
quotient = user_input // base
remainder = user_input % base
running_list.append(remainder)
user_input = quotient
reversed_list = running_list[::-1]
# dict is necessary inorder to display output in letter format.
some_dict = {10:'A',11:'B',12:'C',13:'D',14:'E',15:'F'}
for contents in reversed_list:
if contents >= 10:
for contents2 in some_dict.keys():
converted_contents = int(contents2)
# converted_contents comes from the dictionary keys
if converted_contents == contents:
where = reversed_list.index(contents)
reversed_list[where] = some_dict[contents]
a = list(map(str,reversed_list))
a_string = ''.join(a)
user_output = a_string + ' is the equivalent of ' + values['-IN-'] + ' in base 16.'
return sg.popup(user_output,title='info')
def hextoOctal():
"""converts base 16 digits to base 8 digits"""
base = 16
running_total = 0
user_input_str = str(values["-IN-"])
num_of_digits = len(user_input_str) - 1
for i in user_input_str:
x = int(i)
y = x * (base ** num_of_digits)
running_total += y
num_of_digits-=1
user_input = running_total
base = 8
binary_list = []
while True:
if user_input == 0:
break
else:
divide_result = user_input // base
remainder = user_input % base
binary_list.append(remainder)
user_input = divide_result
reversed_binary_list = binary_list[::-1]
reversed_binary_list_str = list(map(str,reversed_binary_list))
return_string = ''.join(reversed_binary_list_str)
user_message = return_string + ' is the equivalent of ' + values['-IN-'] + ' in base 8.'
return sg.popup(user_message,title='info')
def same():
""" this function just returns, no need for conversion error message if the to and from values are the same"""
return sg.popup('No need for conversion!', title='error')
while True:
event, values = window.read()
if event in (None, 'Exit'):
break
# if last character in input element is invalid, remove it
if event == '-IN-' and values['-IN-'] and values['-IN-'][-1] not in ('0123456789.-'):
window['-IN-'].update(values['-IN-'][:-1])
if event in ('Submit'):
if not values['-IN-']:
sg.popup("No value supplied",title='error')
elif values['-from-'] == values['-to-'] :
same()
elif values['-from-'] == 'Binary' and values['-to-'] == 'Base 10':
bintobase10()
elif values['-from-'] == 'Base 10' and values['-to-'] == 'Binary':
base10tobin()
elif values['-from-'] == 'Binary' and values['-to-'] == 'hexadecimal':
binarytohex()
elif values['-from-'] == "Base 10" and values["-to-"] == 'hexadecimal':
base10tohex()
elif values['-from-'] == "hexadecimal" and values["-to-"] == 'Base 10':
hextobase10()
elif values['-from-'] == "Octal" and values["-to-"] == 'Base 10':
Octaltobase10()
elif values['-from-'] == "Base 10" and values["-to-"] == 'Octal':
base10toOctal()
elif values['-from-'] == "Octal" and values["-to-"] == 'Binary':
Octaltobinary()
elif values['-from-'] == "Binary" and values["-to-"] == 'Octal':
binarytoOctal()
elif values['-from-'] == "Octal" and values["-to-"] == 'hexadecimal':
Octaltohex()
elif values['-from-'] == "hexadecimal" and values["-to-"] == 'Base 10':
hextobase10()
elif values['-from-'] == "hexadecimal" and values["-to-"] == 'Binary':
hextobinary()
elif values['-from-'] == "hexadecimal" and values["-to-"] == 'Octal':
hextoOctal()
else:
print(values)
window.close()
|
py | b40efd6e885098578a861c7b1bb909eb044d70ba | # coding: utf-8
"""
Python SDK for Opsgenie REST API
Python SDK for Opsgenie REST API # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class AccountInfo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'user_count': 'int',
'plan': 'AccountPlan'
}
attribute_map = {
'name': 'name',
'user_count': 'userCount',
'plan': 'plan'
}
def __init__(self, name=None, user_count=None, plan=None): # noqa: E501
"""AccountInfo - a model defined in OpenAPI""" # noqa: E501
self._name = None
self._user_count = None
self._plan = None
self.discriminator = None
if name is not None:
self.name = name
if user_count is not None:
self.user_count = user_count
if plan is not None:
self.plan = plan
@property
def name(self):
"""Gets the name of this AccountInfo. # noqa: E501
:return: The name of this AccountInfo. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AccountInfo.
:param name: The name of this AccountInfo. # noqa: E501
:type: str
"""
self._name = name
@property
def user_count(self):
"""Gets the user_count of this AccountInfo. # noqa: E501
:return: The user_count of this AccountInfo. # noqa: E501
:rtype: int
"""
return self._user_count
@user_count.setter
def user_count(self, user_count):
"""Sets the user_count of this AccountInfo.
:param user_count: The user_count of this AccountInfo. # noqa: E501
:type: int
"""
self._user_count = user_count
@property
def plan(self):
"""Gets the plan of this AccountInfo. # noqa: E501
:return: The plan of this AccountInfo. # noqa: E501
:rtype: AccountPlan
"""
return self._plan
@plan.setter
def plan(self, plan):
"""Sets the plan of this AccountInfo.
:param plan: The plan of this AccountInfo. # noqa: E501
:type: AccountPlan
"""
self._plan = plan
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccountInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.