ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a47738a33ede570a7842015276b1d1db4558c02 | import unittest
from distutils.dist import Distribution
from distutils.errors import DistutilsOptionError
from os import path
from mkdocs.commands import babel
BASE_DIR = path.normpath(path.join(path.abspath(path.dirname(__file__)), '../../'))
class ThemeMixinTests(unittest.TestCase):
def test_dict_entry_point(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs'
]
}
inst.theme = 'mkdocs'
self.assertEqual(inst.get_theme_dir(), path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs'))
def test_ini_entry_point(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
inst.theme = 'mkdocs'
self.assertEqual(inst.get_theme_dir(), path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs'))
def test_one_entry_point_as_default(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs'
]
}
inst.theme = None
self.assertEqual(inst.get_theme_dir(), path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs'))
def test_multiple_entry_points(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs',
'readthedocs = mkdocs.themes.readthedocs',
]
}
inst.theme = 'readthedocs'
self.assertEqual(inst.get_theme_dir(), path.join(BASE_DIR, 'mkdocs', 'themes', 'readthedocs'))
def test_multiple_entry_points_no_default(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs',
'readthedocs = mkdocs.themes.readthedocs',
]
}
inst.theme = None
self.assertRaises(DistutilsOptionError, inst.get_theme_dir)
def test_no_entry_points(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {}
inst.theme = 'mkdocs'
self.assertRaises(DistutilsOptionError, inst.get_theme_dir)
def test_undefined_entry_point(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs'
]
}
inst.theme = 'undefined'
self.assertRaises(DistutilsOptionError, inst.get_theme_dir)
class CommandTests(unittest.TestCase):
def test_compile_catalog(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.compile_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.finalize_options()
self.assertEqual(cmd.directory, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_compile_catalog_default_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.compile_catalog(dist)
cmd.initialize_options()
self.assertIsNone(cmd.theme)
cmd.finalize_options()
self.assertEqual(cmd.theme, 'mkdocs')
self.assertEqual(cmd.directory, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_compile_catalog_ignore_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.compile_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.directory = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.directory, 'foo/bar')
def test_extract_messages(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.finalize_options()
self.assertEqual(cmd.input_paths, [path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs')])
self.assertEqual(cmd.output_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.mapping_file, babel.DEFAULT_MAPPING_FILE)
self.assertEqual(cmd.project, 'foo')
self.assertEqual(cmd.version, '1.2')
def test_extract_messages_default_theme(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
self.assertIsNone(cmd.theme)
cmd.finalize_options()
self.assertEqual(cmd.theme, 'mkdocs')
self.assertEqual(cmd.input_paths, [path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs')])
self.assertEqual(cmd.output_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
def test_extract_messages_ingore_theme(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.input_paths = 'mkdocs/tests'
cmd.output_file = 'foo/bar/messages.pot'
cmd.finalize_options()
self.assertEqual(cmd.input_paths, ['mkdocs/tests'])
self.assertEqual(cmd.output_file, 'foo/bar/messages.pot')
def test_extract_messages_ingore_theme_for_input(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.input_paths = 'mkdocs/tests'
cmd.finalize_options()
self.assertEqual(cmd.input_paths, ['mkdocs/tests'])
self.assertEqual(cmd.output_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
def test_extract_messages_ingore_theme_for_output(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.output_file = 'foo/bar/messages.pot'
cmd.finalize_options()
self.assertEqual(cmd.input_paths, [path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs')])
self.assertEqual(cmd.output_file, 'foo/bar/messages.pot')
def test_init_catalog(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.finalize_options()
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_init_catalog_default_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.locale = 'en'
self.assertIsNone(cmd.theme)
cmd.finalize_options()
self.assertEqual(cmd.theme, 'mkdocs')
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_init_catalog_ignore_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.input_file = 'mkdocs/themes/mkdocs/messages.pot'
cmd.output_dir = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.input_file, 'mkdocs/themes/mkdocs/messages.pot')
self.assertEqual(cmd.output_dir, 'foo/bar')
def test_init_catalog_ignore_theme_for_input(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.input_file = 'mkdocs/themes/mkdocs/messages.pot'
cmd.finalize_options()
self.assertEqual(cmd.input_file, 'mkdocs/themes/mkdocs/messages.pot')
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_init_catalog_ignore_theme_for_output(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.output_dir = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, 'foo/bar')
def test_update_catalog(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.finalize_options()
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_update_catalog_default_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.locale = 'en'
self.assertIsNone(cmd.theme)
cmd.finalize_options()
self.assertEqual(cmd.theme, 'mkdocs')
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_update_catalog_ignore_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.input_file = 'mkdocs/themes/readthedocs/messages.pot'
cmd.output_dir = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.input_file, 'mkdocs/themes/readthedocs/messages.pot')
self.assertEqual(cmd.output_dir, 'foo/bar')
def test_update_catalog_ignore_theme_for_input(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.input_file = 'mkdocs/themes/mkdocs/messages.pot'
cmd.finalize_options()
self.assertEqual(cmd.input_file, 'mkdocs/themes/mkdocs/messages.pot')
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_update_catalog_ignore_theme_for_output(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.output_dir = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, 'foo/bar')
|
py | 1a4773d403b5d4bc0c3bda3609484d3cb029ff89 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kfp.v2.google.client.client_utils."""
import json
import unittest
from unittest import mock
from google.cloud import storage
from kfp.v2.google.client import client_utils
class ClientUtilsTest(unittest.TestCase):
@mock.patch.object(storage, 'Client', autospec=True)
@mock.patch.object(storage.Blob, 'download_as_bytes', autospec=True)
def test_load_json_from_gs_uri(self, mock_download_as_bytes,
unused_storage_client):
mock_download_as_bytes.return_value = b'{"key":"value"}'
self.assertEqual({'key': 'value'},
client_utils.load_json('gs://bucket/path/to/blob'))
@mock.patch('builtins.open', mock.mock_open(read_data='{"key":"value"}'))
def test_load_json_from_local_file(self):
self.assertEqual({'key': 'value'},
client_utils.load_json('/path/to/file'))
@mock.patch.object(storage, 'Client', autospec=True)
def test_load_json_from_gs_uri_with_non_gs_uri_should_fail(
self, unused_storage_client):
with self.assertRaisesRegex(ValueError, 'URI scheme must be gs'):
client_utils._load_json_from_gs_uri(
'https://storage.google.com/bucket/blob')
@mock.patch.object(storage, 'Client', autospec=True)
@mock.patch.object(storage.Blob, 'download_as_bytes', autospec=True)
def test_load_json_from_gs_uri_with_invalid_json_should_fail(
self, mock_download_as_bytes, unused_storage_client):
mock_download_as_bytes.return_value = b'invalid-json'
with self.assertRaises(json.decoder.JSONDecodeError):
client_utils._load_json_from_gs_uri('gs://bucket/path/to/blob')
@mock.patch('builtins.open', mock.mock_open(read_data='invalid-json'))
def test_load_json_from_local_file_with_invalid_json_should_fail(self):
with self.assertRaises(json.decoder.JSONDecodeError):
client_utils._load_json_from_local_file('/path/to/file')
if __name__ == '__main__':
unittest.main()
|
py | 1a4773f7be50baf680a92fd70d3801a5e9a39cda | import psycopg2
import sys
def connect_DB():
#Define our connection string
conn_string = "host='localhost' dbname='my_database' user='postgres' password='secret'"
# print the connection string we will use to connect
print "Connecting to database\n ->%s" % (conn_string)
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
print "Connected!\n"
if __name__ == "__main__":
connect_DB()
|
py | 1a4774213212d657ead4384b46d0a7c13a5a3ad9 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
import subprocess
from spack import *
class PyNumpy(PythonPackage):
"""NumPy is the fundamental package for scientific computing with Python.
It contains among other things: a powerful N-dimensional array object,
sophisticated (broadcasting) functions, tools for integrating C/C++ and
Fortran code, and useful linear algebra, Fourier transform, and random
number capabilities"""
homepage = "https://numpy.org/"
pypi = "numpy/numpy-1.19.4.zip"
git = "https://github.com/numpy/numpy.git"
maintainers = ['adamjstewart']
version('main', branch='main')
version('1.22.1', sha256='e348ccf5bc5235fc405ab19d53bec215bb373300e5523c7b476cc0da8a5e9973')
version('1.22.0', sha256='a955e4128ac36797aaffd49ab44ec74a71c11d6938df83b1285492d277db5397')
version('1.21.5', sha256='6a5928bc6241264dce5ed509e66f33676fc97f464e7a919edc672fb5532221ee')
version('1.21.4', sha256='e6c76a87633aa3fa16614b61ccedfae45b91df2767cf097aa9c933932a7ed1e0')
version('1.21.3', sha256='63571bb7897a584ca3249c86dd01c10bcb5fe4296e3568b2e9c1a55356b6410e')
version('1.21.2', sha256='423216d8afc5923b15df86037c6053bf030d15cc9e3224206ef868c2d63dd6dc')
version('1.21.1', sha256='dff4af63638afcc57a3dfb9e4b26d434a7a602d225b42d746ea7fe2edf1342fd')
version('1.21.0', sha256='e80fe25cba41c124d04c662f33f6364909b985f2eb5998aaa5ae4b9587242cce')
version('1.20.3', sha256='e55185e51b18d788e49fe8305fd73ef4470596b33fc2c1ceb304566b99c71a69')
version('1.20.2', sha256='878922bf5ad7550aa044aa9301d417e2d3ae50f0f577de92051d739ac6096cee')
version('1.20.1', sha256='3bc63486a870294683980d76ec1e3efc786295ae00128f9ea38e2c6e74d5a60a')
version('1.20.0', sha256='3d8233c03f116d068d5365fed4477f2947c7229582dad81e5953088989294cec')
version('1.19.5', sha256='a76f502430dd98d7546e1ea2250a7360c065a5fdea52b2dffe8ae7180909b6f4')
version('1.19.4', sha256='141ec3a3300ab89c7f2b0775289954d193cc8edb621ea05f99db9cb181530512')
version('1.19.3', sha256='35bf5316af8dc7c7db1ad45bec603e5fb28671beb98ebd1d65e8059efcfd3b72')
version('1.19.2', sha256='0d310730e1e793527065ad7dde736197b705d0e4c9999775f212b03c44a8484c')
version('1.19.1', sha256='b8456987b637232602ceb4d663cb34106f7eb780e247d51a260b84760fd8f491')
version('1.19.0', sha256='76766cc80d6128750075378d3bb7812cf146415bd29b588616f72c943c00d598')
version('1.18.5', sha256='34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b')
version('1.18.4', sha256='bbcc85aaf4cd84ba057decaead058f43191cc0e30d6bc5d44fe336dc3d3f4509')
version('1.18.3', sha256='e46e2384209c91996d5ec16744234d1c906ab79a701ce1a26155c9ec890b8dc8')
version('1.18.2', sha256='e7894793e6e8540dbeac77c87b489e331947813511108ae097f1715c018b8f3d')
version('1.18.1', sha256='b6ff59cee96b454516e47e7721098e6ceebef435e3e21ac2d6c3b8b02628eb77')
version('1.18.0', sha256='a9d72d9abaf65628f0f31bbb573b7d9304e43b1e6bbae43149c17737a42764c4')
version('1.17.5', sha256='16507ba6617f62ae3c6ab1725ae6f550331025d4d9a369b83f6d5a470446c342')
version('1.17.4', sha256='f58913e9227400f1395c7b800503ebfdb0772f1c33ff8cb4d6451c06cabdf316')
version('1.17.3', sha256='a0678793096205a4d784bd99f32803ba8100f639cf3b932dc63b21621390ea7e')
version('1.17.2', sha256='73615d3edc84dd7c4aeb212fa3748fb83217e00d201875a47327f55363cef2df')
version('1.17.1', sha256='f11331530f0eff69a758d62c2461cd98cdc2eae0147279d8fc86e0464eb7e8ca')
version('1.17.0', sha256='951fefe2fb73f84c620bec4e001e80a80ddaa1b84dce244ded7f1e0cbe0ed34a')
version('1.16.6', sha256='e5cf3fdf13401885e8eea8170624ec96225e2174eb0c611c6f26dd33b489e3ff')
version('1.16.5', sha256='8bb452d94e964b312205b0de1238dd7209da452343653ab214b5d681780e7a0c')
version('1.16.4', sha256='7242be12a58fec245ee9734e625964b97cf7e3f2f7d016603f9e56660ce479c7')
version('1.16.3', sha256='78a6f89da87eeb48014ec652a65c4ffde370c036d780a995edaeb121d3625621')
version('1.16.2', sha256='6c692e3879dde0b67a9dc78f9bfb6f61c666b4562fd8619632d7043fb5b691b0')
version('1.16.1', sha256='31d3fe5b673e99d33d70cfee2ea8fe8dccd60f265c3ed990873a88647e3dd288')
version('1.16.0', sha256='cb189bd98b2e7ac02df389b6212846ab20661f4bafe16b5a70a6f1728c1cc7cb')
version('1.15.4', sha256='3d734559db35aa3697dadcea492a423118c5c55d176da2f3be9c98d4803fc2a7')
version('1.15.3', sha256='1c0c80e74759fa4942298044274f2c11b08c86230b25b8b819e55e644f5ff2b6')
version('1.15.2', sha256='27a0d018f608a3fe34ac5e2b876f4c23c47e38295c47dd0775cc294cd2614bc1')
version('1.15.2', sha256='27a0d018f608a3fe34ac5e2b876f4c23c47e38295c47dd0775cc294cd2614bc1')
version('1.15.1', sha256='7b9e37f194f8bcdca8e9e6af92e2cbad79e360542effc2dd6b98d63955d8d8a3')
version('1.15.0', sha256='f28e73cf18d37a413f7d5de35d024e6b98f14566a10d82100f9dc491a7d449f9')
version('1.14.6', sha256='1250edf6f6c43e1d7823f0967416bc18258bb271dc536298eb0ea00a9e45b80a')
version('1.14.5', sha256='a4a433b3a264dbc9aa9c7c241e87c0358a503ea6394f8737df1683c7c9a102ac')
version('1.14.4', sha256='2185a0f31ecaa0792264fa968c8e0ba6d96acf144b26e2e1d1cd5b77fc11a691')
version('1.14.3', sha256='9016692c7d390f9d378fc88b7a799dc9caa7eb938163dda5276d3f3d6f75debf')
version('1.14.2', sha256='facc6f925c3099ac01a1f03758100772560a0b020fb9d70f210404be08006bcb')
version('1.14.1', sha256='fa0944650d5d3fb95869eaacd8eedbd2d83610c85e271bd9d3495ffa9bc4dc9c')
version('1.14.0', sha256='3de643935b212307b420248018323a44ec51987a336d1d747c1322afc3c099fb')
version('1.13.3', sha256='36ee86d5adbabc4fa2643a073f93d5504bdfed37a149a3a49f4dde259f35a750')
version('1.13.1', sha256='c9b0283776085cb2804efff73e9955ca279ba4edafd58d3ead70b61d209c4fbb')
version('1.13.0', sha256='dcff367b725586830ff0e20b805c7654c876c2d4585c0834a6049502b9d6cf7e')
version('1.12.1', sha256='a65266a4ad6ec8936a1bc85ce51f8600634a31a258b722c9274a80ff189d9542')
version('1.12.0', sha256='ff320ecfe41c6581c8981dce892fe6d7e69806459a899e294e4bf8229737b154')
version('1.11.3', sha256='2e0fc5248246a64628656fe14fcab0a959741a2820e003bd15538226501b82f7')
version('1.11.2', sha256='c1ed4d1d2a795409c7df1eb4bfee65c0e3326cfc7c57875fa39e5c7414116d9a')
version('1.11.1', sha256='4e9c289b9d764d10353a224a5286dda3e0425b13b112719bdc3e9864ae648d79')
version('1.11.0', sha256='9109f260850627e4b83a3c4bcef4f2f99357eb4a5eaae75dec51c32f3c197aa3')
version('1.10.4', sha256='8ce443dc79656a9fc97a7837f1444d324aef2c9b53f31f83441f57ad1f1f3659')
version('1.9.3', sha256='baa074bb1c7f9c822122fb81459b7caa5fc49267ca94cca69465c8dcfd63ac79')
version('1.9.2', sha256='e37805754f4ebb575c434d134f6bebb8b857d9843c393f6943c7be71ef57311c')
version('1.9.1', sha256='2a545c0d096d86035b12160fcba5e4c0a08dcabbf902b4f867eb64deb31a2b7a')
variant('blas', default=True, description='Build with BLAS support')
variant('lapack', default=True, description='Build with LAPACK support')
depends_on('[email protected]:2.8,3.4:', type=('build', 'link', 'run'), when='@:1.15')
depends_on('[email protected]:2.8,3.5:', type=('build', 'link', 'run'), when='@1.16')
depends_on('[email protected]:', type=('build', 'link', 'run'), when='@1.17:1.18')
depends_on('[email protected]:', type=('build', 'link', 'run'), when='@1.19')
depends_on('[email protected]:', type=('build', 'link', 'run'), when='@1.20:1.21.1')
depends_on('[email protected]:3.10', type=('build', 'link', 'run'), when='@1.21.2:1.21')
depends_on('[email protected]:', type=('build', 'link', 'run'), when='@1.22:')
depends_on('py-setuptools', type=('build', 'run'))
# Check pyproject.toml for updates to the required cython version
depends_on('[email protected]:2', when='@1.18.0:', type='build')
depends_on('[email protected]:2', when='@1.18.1:', type='build')
depends_on('[email protected]:2', when='@1.19.1:', type='build')
depends_on('[email protected]:2', when='@1.21.2:', type='build')
depends_on('blas', when='+blas')
depends_on('lapack', when='+lapack')
depends_on('[email protected]:', when='@:1.14', type='test')
depends_on('py-pytest', when='@1.15:', type='test')
depends_on('py-hypothesis', when='@1.19:', type='test')
# Allows you to specify order of BLAS/LAPACK preference
# https://github.com/numpy/numpy/pull/13132
patch('blas-lapack-order.patch', when='@1.15:1.16')
# Add Fujitsu Fortran compiler
patch('add_fj_compiler.patch', when='@1.19.3:1.19.5%fj')
patch('add_fj_compiler2.patch', when='@1.19.0:1.19.2%fj')
patch('add_fj_compiler3.patch', when='@1.14.0:1.18.5%fj')
patch('add_fj_compiler4.patch', when='@:1.13.3%fj')
patch('check_executables.patch', when='@1.20.0:')
patch('check_executables2.patch', when='@1.19.0:1.19.5')
patch('check_executables3.patch', when='@1.16.0:1.18.5')
patch('check_executables4.patch', when='@1.14.0:1.15.4')
patch('check_executables5.patch', when='@:1.13.3')
# version 1.21.0 runs into an infinit loop during printing
# (e.g. print(numpy.ones(1000)) when compiled with gcc 11
conflicts('%gcc@11:', when='@1.21.0')
# GCC 4.8 is the minimum version that works
conflicts('%gcc@:4.7', msg='GCC 4.8+ required')
# NVHPC support added in https://github.com/numpy/numpy/pull/17344
conflicts('%nvhpc', when='@:1.19')
def flag_handler(self, name, flags):
# -std=c99 at least required, old versions of GCC default to -std=c90
if self.spec.satisfies('%gcc@:5.1') and name == 'cflags':
flags.append(self.compiler.c99_flag)
# Check gcc version in use by intel compiler
# This will essentially check the system gcc compiler unless a gcc
# module is already loaded.
if self.spec.satisfies('%intel') and name == 'cflags':
p1 = subprocess.Popen(
[self.compiler.cc, '-v'],
stderr=subprocess.PIPE
)
p2 = subprocess.Popen(
['grep', 'compatibility'],
stdin=p1.stderr,
stdout=subprocess.PIPE
)
p1.stderr.close()
out, err = p2.communicate()
gcc_version = Version(out.split()[5].decode('utf-8'))
if gcc_version < Version('4.8'):
raise InstallError('The GCC version that the Intel compiler '
'uses must be >= 4.8. The GCC in use is '
'{0}'.format(gcc_version))
if gcc_version <= Version('5.1'):
flags.append(self.compiler.c99_flag)
return (flags, None, None)
@run_before('install')
def set_blas_lapack(self):
# https://numpy.org/devdocs/user/building.html
# https://github.com/numpy/numpy/blob/master/site.cfg.example
# Skip if no BLAS/LAPACK requested
spec = self.spec
if '+blas' not in spec and '+lapack' not in spec:
return
def write_library_dirs(f, dirs):
f.write('library_dirs = {0}\n'.format(dirs))
if not ((platform.system() == 'Darwin') and
(Version(platform.mac_ver()[0]).up_to(2) == Version(
'10.12'))):
f.write('rpath = {0}\n'.format(dirs))
blas_libs = LibraryList([])
blas_headers = HeaderList([])
if '+blas' in spec:
blas_libs = spec['blas'].libs
blas_headers = spec['blas'].headers
lapack_libs = LibraryList([])
lapack_headers = HeaderList([])
if '+lapack' in spec:
lapack_libs = spec['lapack'].libs
lapack_headers = spec['lapack'].headers
lapackblas_libs = lapack_libs + blas_libs
lapackblas_headers = lapack_headers + blas_headers
blas_lib_names = ','.join(blas_libs.names)
blas_lib_dirs = ':'.join(blas_libs.directories)
blas_header_dirs = ':'.join(blas_headers.directories)
lapack_lib_names = ','.join(lapack_libs.names)
lapack_lib_dirs = ':'.join(lapack_libs.directories)
lapack_header_dirs = ':'.join(lapack_headers.directories)
lapackblas_lib_names = ','.join(lapackblas_libs.names)
lapackblas_lib_dirs = ':'.join(lapackblas_libs.directories)
lapackblas_header_dirs = ':'.join(lapackblas_headers.directories)
# Tell numpy where to find BLAS/LAPACK libraries
with open('site.cfg', 'w') as f:
if '^intel-mkl' in spec or \
'^intel-parallel-studio+mkl' or \
'^intel-oneapi-mkl' in spec:
f.write('[mkl]\n')
# FIXME: as of @1.11.2, numpy does not work with separately
# specified threading and interface layers. A workaround is a
# terribly bad idea to use mkl_rt. In this case Spack will no
# longer be able to guarantee that one and the same variant of
# Blas/Lapack (32/64bit, threaded/serial) is used within the
# DAG. This may lead to a lot of hard-to-debug segmentation
# faults on user's side. Users may also break working
# installation by (unconsciously) setting environment variable
# to switch between different interface and threading layers
# dynamically. From this perspective it is no different from
# throwing away RPATH's and using LD_LIBRARY_PATH throughout
# Spack.
f.write('libraries = {0}\n'.format('mkl_rt'))
write_library_dirs(f, lapackblas_lib_dirs)
f.write('include_dirs = {0}\n'.format(lapackblas_header_dirs))
if '^blis' in spec:
f.write('[blis]\n')
f.write('libraries = {0}\n'.format(blas_lib_names))
write_library_dirs(f, blas_lib_dirs)
f.write('include_dirs = {0}\n'.format(blas_header_dirs))
if '^openblas' in spec:
f.write('[openblas]\n')
f.write('libraries = {0}\n'.format(lapackblas_lib_names))
write_library_dirs(f, lapackblas_lib_dirs)
f.write('include_dirs = {0}\n'.format(lapackblas_header_dirs))
if '^libflame' in spec:
f.write('[flame]\n')
f.write('libraries = {0}\n'.format(lapack_lib_names))
write_library_dirs(f, lapack_lib_dirs)
f.write('include_dirs = {0}\n'.format(lapack_header_dirs))
if '^atlas' in spec:
f.write('[atlas]\n')
f.write('libraries = {0}\n'.format(lapackblas_lib_names))
write_library_dirs(f, lapackblas_lib_dirs)
f.write('include_dirs = {0}\n'.format(lapackblas_header_dirs))
if '^veclibfort' in spec:
f.write('[accelerate]\n')
f.write('libraries = {0}\n'.format(lapackblas_lib_names))
write_library_dirs(f, lapackblas_lib_dirs)
if '^netlib-lapack' in spec:
# netlib requires blas and lapack listed
# separately so that scipy can find them
if spec.satisfies('+blas'):
f.write('[blas]\n')
f.write('libraries = {0}\n'.format(blas_lib_names))
write_library_dirs(f, blas_lib_dirs)
f.write('include_dirs = {0}\n'.format(blas_header_dirs))
if spec.satisfies('+lapack'):
f.write('[lapack]\n')
f.write('libraries = {0}\n'.format(lapack_lib_names))
write_library_dirs(f, lapack_lib_dirs)
f.write('include_dirs = {0}\n'.format(lapack_header_dirs))
if '^fujitsu-ssl2' in spec:
if spec.satisfies('+blas'):
f.write('[blas]\n')
f.write('libraries = {0}\n'.format(spec['blas'].libs.names[0]))
write_library_dirs(f, blas_lib_dirs)
f.write('include_dirs = {0}\n'.format(blas_header_dirs))
f.write(
"extra_link_args = {0}\n".format(
self.spec["blas"].libs.ld_flags
)
)
if spec.satisfies('+lapack'):
f.write('[lapack]\n')
f.write('libraries = {0}\n'.format(spec['lapack'].libs.names[0]))
write_library_dirs(f, lapack_lib_dirs)
f.write('include_dirs = {0}\n'.format(lapack_header_dirs))
f.write(
"extra_link_args = {0}\n".format(
self.spec["lapack"].libs.ld_flags
)
)
def setup_build_environment(self, env):
# Tell numpy which BLAS/LAPACK libraries we want to use.
# https://github.com/numpy/numpy/pull/13132
# https://numpy.org/devdocs/user/building.html#accelerated-blas-lapack-libraries
spec = self.spec
# https://numpy.org/devdocs/user/building.html#blas
if 'blas' not in spec:
blas = ''
elif spec['blas'].name == 'intel-mkl' or \
spec['blas'].name == 'intel-parallel-studio' or \
spec['blas'].name == 'intel-oneapi-mkl':
blas = 'mkl'
elif spec['blas'].name == 'blis':
blas = 'blis'
elif spec['blas'].name == 'openblas':
blas = 'openblas'
elif spec['blas'].name == 'atlas':
blas = 'atlas'
elif spec['blas'].name == 'veclibfort':
blas = 'accelerate'
else:
blas = 'blas'
env.set('NPY_BLAS_ORDER', blas)
# https://numpy.org/devdocs/user/building.html#lapack
if 'lapack' not in spec:
lapack = ''
elif spec['lapack'].name == 'intel-mkl' or \
spec['lapack'].name == 'intel-parallel-studio' or \
spec['lapack'].name == 'intel-oneapi-mkl':
lapack = 'mkl'
elif spec['lapack'].name == 'openblas':
lapack = 'openblas'
elif spec['lapack'].name == 'libflame':
lapack = 'flame'
elif spec['lapack'].name == 'atlas':
lapack = 'atlas'
elif spec['lapack'].name == 'veclibfort':
lapack = 'accelerate'
else:
lapack = 'lapack'
env.set('NPY_LAPACK_ORDER', lapack)
def install_options(self, spec, prefix):
args = []
# From NumPy 1.10.0 on it's possible to do a parallel build.
# https://numpy.org/devdocs/user/building.html#parallel-builds
if self.version >= Version('1.10.0'):
# But Parallel build in Python 3.5+ is broken. See:
# https://github.com/spack/spack/issues/7927
# https://github.com/scipy/scipy/issues/7112
if spec['python'].version < Version('3.5'):
args = ['-j', str(make_jobs)]
return args
@run_after('install')
@on_package_attributes(run_tests=True)
def install_test(self):
with working_dir('spack-test', create=True):
python('-c', 'import numpy; numpy.test("full", verbose=2)')
|
py | 1a47753b7eacd1d93104c200cfb4c0879c5b0c06 | import numpy as np
def kalman_xy(x, P, measurement, R,
motion = np.matrix('0. 0. 0. 0.').T,
Q = np.matrix(np.eye(4))):
"""
Parameters:
x: initial state 4-tuple of location and velocity: (x0, x1, x0_dot, x1_dot)
P: initial uncertainty convariance matrix
measurement: observed position
R: measurement noise
motion: external motion added to state vector x
Q: motion noise (same shape as P)
"""
return kalman(x, P, measurement, R, motion, Q,
F = np.matrix('''
1. 0. 1. 0.;
0. 1. 0. 1.;
0. 0. 1. 0.;
0. 0. 0. 1.
'''),
H = np.matrix('''
1. 0. 0. 0.;
0. 1. 0. 0.'''))
def kalman(x, P, measurement, R, motion, Q, F, H):
'''
Parameters:
x: initial state
P: initial uncertainty convariance matrix
measurement: observed position (same shape as H*x)
R: measurement noise (same shape as H)
motion: external motion added to state vector x
Q: motion noise (same shape as P)
F: next state function: x_prime = F*x
H: measurement function: position = H*x
Return: the updated and predicted new values for (x, P)
See also http://en.wikipedia.org/wiki/Kalman_filter
This version of kalman can be applied to many different situations by
appropriately defining F and H
'''
# UPDATE x, P based on measurement m
# distance between measured and current position-belief
y = np.matrix(measurement).T - H * x
S = H * P * H.T + R # residual convariance
K = P * H.T * S.I # Kalman gain
x = x + K*y
I = np.matrix(np.eye(F.shape[0])) # identity matrix
P = (I - K*H)*P
# PREDICT x, P based on motion
x = F*x + motion
P = F*P*F.T + Q
return x, P |
py | 1a47771d36debe8d48df3991b973adee1cfc4444 | from __future__ import division
import dolfin as df
import numpy as np
import logging
import os
import scipy.sparse.linalg
from time import time
from finmag.util import helpers
from finmag.util.meshes import embed3d
from itertools import izip
from math import pi
from finmag.field import Field
logger = logging.getLogger('finmag')
# Matrix-vector or Matrix-matrix product
def _mult_one(a, b):
# a and b are ?x?xn arrays where ? = 1..3
assert len(a.shape) == 3
assert len(b.shape) == 3
assert a.shape[2] == b.shape[2]
assert a.shape[1] == b.shape[0]
assert a.shape[0] <= 3 and a.shape[1] <= 3
assert b.shape[0] <= 3 and b.shape[1] <= 3
# One of the arrays might be complex, so we determine the type
# of the resulting array by adding two elements of the argument arrays
res = np.zeros(
(a.shape[0], b.shape[1], a.shape[2]), dtype=type(a[0, 0, 0] + b[0, 0, 0]))
for i in xrange(res.shape[0]):
for j in xrange(res.shape[1]):
for k in xrange(a.shape[1]):
res[i, j, :] += a[i, k, :] * b[k, j, :]
return res
# Returns the componentwise matrix product of the supplied matrix fields
def mf_mult(*args):
if len(args) < 2:
raise Exception("mult requires at least 2 arguments")
res = args[0]
for i in xrange(1, len(args)):
res = _mult_one(res, args[i])
return res
# Transposes the mxk matrix to a kxm matrix
def mf_transpose(a):
return np.transpose(a, [1, 0, 2])
# Computes the componentwise cross product of a vector field a
# and a vector or vector field b
def mf_cross(a, b):
assert a.shape == (3, 1, a.shape[2])
res = np.empty(a.shape, dtype=a.dtype)
res[0] = a[1] * b[2] - a[2] * b[1]
res[1] = a[2] * b[0] - a[0] * b[2]
res[2] = a[0] * b[1] - a[1] * b[0]
return res
# Normalises the 3d vector m
def mf_normalise(m):
assert m.shape == (3, 1, m.shape[2])
return m / np.sqrt(m[0] * m[0] + m[1] * m[1] + m[2] * m[2])
# Set up the basis for the tangential space and the corresponding
# projection operator
def compute_tangential_space_basis(m0):
assert m0.ndim == 3
n = m0.shape[2]
assert m0.shape == (3, 1, n)
# Set up a field of vectors m_perp that are perpendicular to m0
# Start with e_z and compute e_z x m
m_perp = mf_cross(m0, [0., 0., -1.])
# In case m || e_z, add a tiny component in e_y
m_perp[1] += 1e-100
# Normalise and compute the cross product with m0 again
m_perp = mf_cross(mf_normalise(m_perp), m0)
m_perp = mf_normalise(m_perp)
# The basis in the 3d space is ((m_perp x m0) x m0, m_perp x m0, m0)
R = np.zeros((3, 3, n))
R[:, 2, :] = m0[:, 0, :]
R[:, 1, :] = m_perp[:, 0, :]
R[:, 0, :] = mf_cross(m_perp, m0)[:, 0, :]
# Matrix for the injection from 2n to 3n (3x2)
S = np.zeros((3, 2, n))
S[0, 0, :] = 1.
S[1, 1, :] = 1.
# Matrix for the projection from 3n to 2n is transpose(S)
# Matrix for the cross product m0 x in the 2n space
Mcross = np.zeros((2, 2, n))
Mcross[0, 1, :] = -1
Mcross[1, 0, :] = 1
# The relationship between the 3d tangential vector v
# and the 2d vector w is
# v = (R S) w
# w = (R S)^t v
Q = mf_mult(R, S)
return Q, R, S, Mcross
def differentiate_fd4(f, x, dx):
"""
Compute and return a fourth-order approximation to the directional
derivative of `f` at the point `x` in the direction of `dx`.
"""
x_sq = np.dot(x, x)
dx_sq = np.dot(dx, dx)
h = 0.001 * np.sqrt(x_sq + dx_sq) / np.sqrt(dx_sq + 1e-50)
# weights: 1. / 12., -2. / 3., 2. / 3., -1. / 12.
# coefficients: -2., -1., 1., 2.
res = (1. / 12. / h) * f(x - 2 * h * dx)
res += (-2. / 3. / h) * f(x - h * dx)
res += (2. / 3. / h) * f(x + h * dx)
res += (-1. / 12. / h) * f(x + 2 * h * dx)
return res
def compute_eigenproblem_matrix(sim, frequency_unit=1e9, filename=None, differentiate_H_numerically=True, dtype=complex):
"""
Compute and return the square matrix `D` defining the eigenproblem which
has the normal mode frequencies and oscillation patterns as its solution.
Note that `sim` needs to be in a relaxed state, otherwise the results will
be wrong.
"""
# Create the helper simulation which we use to compute
# the effective field for various values of m.
#Ms = sim.Ms
#A = sim.get_interaction('Exchange').A
#unit_length = sim.unit_length
# try:
# sim.get_interaction('Demag')
# demag_solver = 'FK'
# except ValueError:
# demag_solver = None
#sim_aux = sim_with(sim.mesh, Ms=Ms, m_init=[1, 0, 0], A=A, unit_length=unit_length, demag_solver=demag_solver)
# In order to compute the derivative of the effective field, the magnetisation needs to be set
# to many different values. Thus we store a backup so that we can restore
# it later.
m_orig = sim.m
def effective_field_for_m(m, normalise=True):
if np.iscomplexobj(m):
raise NotImplementedError(
"XXX TODO: Implement the version for complex arrays!")
sim.set_m(m, normalise=normalise, debug=False)
return sim.effective_field()
# N is the number of degrees of freedom of the magnetisation vector.
# It may be smaller than the number of mesh nodes if we are using
# periodic boundary conditions.
N = sim.llg.S3.dim()
n = N // 3
assert (N == 3 * n)
m0_array = sim.m.copy()
# this corresponds to the vector 'm0_flat' in Simlib
m0_3xn = m0_array.reshape(3, n)
m0_column_vector = m0_array.reshape(3, 1, n)
H0_array = effective_field_for_m(m0_array)
H0_3xn = H0_array.reshape(3, n)
h0 = H0_3xn[0] * m0_3xn[0] + H0_3xn[1] * m0_3xn[1] + H0_3xn[2] * m0_3xn[2]
logger.debug(
"Computing basis of the tangent space and transition matrices.")
Q, R, S, Mcross = compute_tangential_space_basis(m0_column_vector)
Qt = mf_transpose(Q).copy()
# Returns the product of the linearised llg times vector
def linearised_llg_times_vector(v):
assert v.shape == (3, 1, n)
# The linearised equation is
# dv/dt = - gamma m0 x (H' v - h_0 v)
v_array = v.view()
v_array.shape = (-1,)
# Compute H'(m_0)*v, i.e. the "directional derivative" of H at
# m_0 in the direction of v. Since H is linear in m (at least
# theoretically, although this is not quite true in the case
# of our demag computation), this is the same as H(v)!
if differentiate_H_numerically:
res = differentiate_fd4(effective_field_for_m, m0_array, v_array)
else:
res = effective_field_for_m(v_array, normalise=False)
res.shape = (3, -1)
# Subtract h0 v
res[0] -= h0 * v[0, 0]
res[1] -= h0 * v[1, 0]
res[2] -= h0 * v[2, 0]
# Multiply by -gamma m0x
res *= sim.gamma
res.shape = (3, 1, -1)
# Put res on the left in case v is complex
res = mf_cross(res, m0_column_vector)
return res
# The linearised equation in the tangential basis
def linearised_llg_times_tangential_vector(w):
w = w.view()
w.shape = (2, 1, n)
# Go to the 3d space
v = mf_mult(Q, w)
# Compute the linearised llg
L = linearised_llg_times_vector(v)
# Go back to 2d space
res = np.empty(w.shape, dtype=dtype)
res[:] = mf_mult(Qt, L)
if dtype == complex:
# Multiply by -i/(2*pi*U) so that we get frequencies as the real
# part of eigenvalues
res *= -1j / (2 * pi * frequency_unit)
else:
# This will yield imaginary eigenvalues, but we divide by 1j in the
# calling routine.
res *= 1. / (2 * pi * frequency_unit)
res.shape = (-1,)
return res
df.tic()
logger.info("Assembling eigenproblem matrix.")
D = np.zeros((2 * n, 2 * n), dtype=dtype)
logger.debug("Eigenproblem matrix D will occupy {:.2f} MB of memory.".format(
D.nbytes / 1024. ** 2))
for i, w in enumerate(np.eye(2 * n)):
if i % 50 == 0:
t_cur = df.toc()
completion_info = '' if (i == 0) else ', estimated remaining time: {}'.format(
helpers.format_time(t_cur * (2 * n / i - 1)))
logger.debug("Processing row {}/{} (time elapsed: {}{})".format(i,
2 * n, helpers.format_time(t_cur), completion_info))
D[:, i] = linearised_llg_times_tangential_vector(w)
logger.debug("Eigenproblem matrix D occupies {:.2f} MB of memory.".format(
D.nbytes / 1024. ** 2))
logger.info("Finished assembling eigenproblem matrix.")
if filename != None:
logger.info("Saving eigenproblem matrix to file '{}'".format(filename))
np.save(filename, D)
# Restore the original magnetisation.
# XXX TODO: Is this method safe, or does it leave any trace of the
# temporary changes we did above?
sim.set_m(m_orig)
return D
# We use the following class (which behaves like a function due to its
# __call__ method) instead of a simple lambda expression because it is
# pickleable, which is needed if we want to cache computation results.
#
# XXX TODO: lambda expresions can be pickled with the 'dill' module,
# so we should probably get rid of this.
class M_times_w(object):
def __init__(self, Mcross, n, alpha=0.):
self.Mcross = Mcross
self.n = n
self.alpha = alpha
def __call__(self, w):
w = w.view()
w.shape = (2, 1, self.n)
res = -1j * mf_mult(self.Mcross, w)
if self.alpha != 0.:
res += -1j * self.alpha * w
res.shape = (-1,)
return res
class NotImplementedOp(object):
def __call__(self, w):
raise NotImplementedError("rmatvec is not implemented")
def is_hermitian(A, atol=1e-8, rtol=1e-12):
"""
Returns True if the matrix `A` is Hermitian (up to the given
tolerance) and False otherwise.
The arguments `atol` and `rtol` have the same meaning as in
`numpy.allclose`.
"""
if isinstance(A, np.ndarray):
# Note: just using an absolute tolerance and checking for
# the maximum difference is about twice as efficient, so
# maybe we should avoid the relative tolerance in the future.
return np.allclose(A, np.conj(A.T), atol=atol, rtol=rtol)
elif isinstance(A, scipy.sparse.linalg.LinearOperator):
raise NotImplementedError
else:
raise NotImplementedError
def check_is_hermitian(A, matrix_name, atol=1e-8, rtol=1e-12):
"""
Check if `A` is hermitian and print a warning if this is not the case.
The argument `matrix_name` is only used for printing the warning.
"""
if not is_hermitian(A):
mat_diff = np.absolute(A - np.conj(A.T))
logger.critical("Matrix {} is not Hermitian. Maximum difference "
"between A and conj(A^tr): {}, median difference: {}, "
"mean difference: {} (maximum entry of A: {}, "
"median entry: {}, mean entry: {})".format(
matrix_name, mat_diff.max(), np.median(
mat_diff), np.mean(mat_diff),
np.max(np.absolute(A)), np.median(np.absolute(A)), np.mean(np.absolute(A))))
def compute_generalised_eigenproblem_matrices(sim, alpha=0.0, frequency_unit=1e9,
filename_mat_A=None, filename_mat_M=None,
check_hermitian=False, differentiate_H_numerically=True):
"""
XXX TODO: write me
"""
m_orig = sim.m
def effective_field_for_m(m, normalise=True):
if np.iscomplexobj(m):
raise NotImplementedError(
"XXX TODO: Implement the version for complex arrays!")
sim.set_m(m, normalise=normalise)
return sim.effective_field()
n = sim.mesh.num_vertices()
N = 3 * n # number of degrees of freedom
m0_array = sim.m.copy()
# this corresponds to the vector 'm0_flat' in Simlib
m0_3xn = m0_array.reshape(3, n)
m0_column_vector = m0_array.reshape(3, 1, n)
H0_array = effective_field_for_m(m0_array)
H0_3xn = H0_array.reshape(3, n)
h0 = H0_3xn[0] * m0_3xn[0] + H0_3xn[1] * m0_3xn[1] + H0_3xn[2] * m0_3xn[2]
logger.debug(
"Computing basis of the tangent space and transition matrices.")
Q, R, S, Mcross = compute_tangential_space_basis(m0_column_vector)
Qt = mf_transpose(Q).copy()
logger.debug("Q.shape: {} ({} MB)".format(Q.shape, Q.nbytes / 1024. ** 2))
def A_times_vector(v):
# A = H' v - h_0 v
assert v.shape == (3, 1, n)
v_array = v.view()
v_array.shape = (-1,)
# Compute H'(m_0)*v, i.e. the "directional derivative" of H at
# m_0 in the direction of v. Since H is linear in m (at least
# theoretically, although this is not quite true in the case
# of our demag computation), this is the same as H(v)!
if differentiate_H_numerically:
res = differentiate_fd4(effective_field_for_m, m0_array, v_array)
else:
res = effective_field_for_m(v_array, normalise=False)
res.shape = (3, n)
# Subtract h0 v
res[0] -= h0 * v[0, 0]
res[1] -= h0 * v[1, 0]
res[2] -= h0 * v[2, 0]
res.shape = (3, 1, n)
return res
df.tic()
logger.info("Assembling eigenproblem matrix.")
A = np.zeros((2 * n, 2 * n), dtype=complex)
logger.debug("Eigenproblem matrix A occupies {:.2f} MB of memory.".format(
A.nbytes / 1024. ** 2))
# Compute A
w = np.zeros(2 * n)
for i in xrange(2 * n):
if i % 50 == 0:
logger.debug(
"Processing row {}/{} (time taken so far: {:.2f} seconds)".format(i, 2 * n, df.toc()))
# Ensure that w is the i-th standard basis vector
w.shape = (2 * n,)
w[i - 1] = 0.0 # this will do no harm if i==0
w[i] = 1.0
w.shape = (2, 1, n)
Av = A_times_vector(mf_mult(Q, w))
A[:, i] = mf_mult(Qt, Av).reshape(-1)
# Multiply by (-gamma)/(2 pi U)
A[:, i] *= -sim.gamma / (2 * pi * frequency_unit)
# Compute B, which is -i Mcross 2 pi U / gamma
# B = np.zeros((2, n, 2, n), dtype=complex)
# for i in xrange(n):
# B[:, i, :, i] = Mcross[:, :, i]
# B[:, i, :, i] *= -1j
# B.shape = (2*n, 2*n)
M = scipy.sparse.linalg.LinearOperator(
(2 * n, 2 * n), M_times_w(Mcross, n, alpha), NotImplementedOp(), NotImplementedOp(), dtype=complex)
if check_hermitian:
# Sanity check: A and M should be Hermitian matrices
check_is_hermitian(A, "A")
#check_is_hermitian(M, "M")
if filename_mat_A != None:
dirname_mat_A = os.path.dirname(os.path.abspath(filename_mat_A))
if not os.path.exists(dirname_mat_A):
logger.debug(
"Creating directory '{}' as it does not exist.".format(dirname_mat_A))
os.makedirs(dirname_mat_A)
logger.info(
"Saving generalised eigenproblem matrix 'A' to file '{}'".format(filename_mat_A))
np.save(filename_mat_A, A)
if filename_mat_M != None:
dirname_mat_M = os.path.dirname(os.path.abspath(filename_mat_M))
if not os.path.exists(dirname_mat_M):
logger.debug(
"Creating directory '{}' as it does not exist.".format(dirname_mat_M))
os.makedirs(dirname_mat_M)
logger.info(
"Saving generalised eigenproblem matrix 'M' to file '{}'".format(filename_mat_M))
np.save(filename_mat_M, M)
# Restore the original magnetisation.
# XXX TODO: Is this method safe, or does it leave any trace of the
# temporary changes we did above?
sim.set_m(m_orig)
return A, M, Q, Qt
def compute_normal_modes(D, n_values=10, sigma=0., tol=1e-8, which='LM'):
logger.debug("Solving eigenproblem. This may take a while...")
df.tic()
omega, w = scipy.sparse.linalg.eigs(
D, n_values, which=which, sigma=0., tol=tol, return_eigenvectors=True)
logger.debug(
"Computing the eigenvalues and eigenvectors took {:.2f} seconds".format(df.toc()))
return omega, w
def compute_normal_modes_generalised(A, M, n_values=10, tol=1e-8, discard_negative_frequencies=False, sigma=None, which='LM',
v0=None, ncv=None, maxiter=None, Minv=None, OPinv=None, mode='normal'):
logger.debug("Solving eigenproblem. This may take a while...")
df.tic()
if discard_negative_frequencies:
n_values *= 2
# XXX TODO: The following call seems to increase memory consumption quite a bit. Why?!?
#
# We have to swap M and A when passing them to eigsh since the M matrix
# has to be positive definite for eigsh!
omega_inv, w = scipy.sparse.linalg.eigsh(M, k=n_values, M=A, which=which, tol=tol, return_eigenvectors=True, sigma=sigma,
v0=v0, ncv=ncv, maxiter=maxiter, Minv=Minv, OPinv=OPinv, mode=mode)
logger.debug(
"Computing the eigenvalues and eigenvectors took {:.2f} seconds".format(df.toc()))
# The true eigenfrequencies are given by 1/omega_inv because we swapped M
# and A above and thus computed the inverse eigenvalues.
omega = 1. / omega_inv
# Sanity check: the eigenfrequencies should occur in +/- pairs.
TOL = 1e-3
positive_freqs = filter(lambda x: x > 0, omega)
negative_freqs = filter(lambda x: x < 0, omega)
freq_pairs = izip(positive_freqs, negative_freqs)
if (n_values % 2 == 0 and len(positive_freqs) != len(negative_freqs)) or \
(n_values % 2 == 0 and len(positive_freqs) - len(negative_freqs) not in [0, 1]) or \
any([abs(x + y) > TOL for (x, y) in freq_pairs]):
logger.warning("The eigenfrequencies should occur in +/- pairs, but this "
"does not seem to be the case (with TOL={})! Please "
"double-check that the results make sense!".format(TOL))
# Find the indices that sort the frequency by absolute value,
# with the positive frequencies occurring before the negative ones (where.
sorted_indices = sorted(np.arange(len(omega)),
key=lambda i: (np.round(abs(omega[i]), decimals=4), -np.sign(omega[i]), abs(omega[i])))
if discard_negative_frequencies:
# Discard indices corresponding to negative frequencies
sorted_indices = filter(lambda i: omega[i] >= 0.0, sorted_indices)
omega = omega[sorted_indices]
# XXX TODO: can we somehow avoid copying the columns to save memory?!?
w = w[:, sorted_indices]
return omega, w
def export_normal_mode_animation(mesh, m0, freq, w, filename, num_cycles=1, num_snapshots_per_cycle=20, scaling=0.2, dm_only=False, save_h5=False):
"""
Save a number of vtk files of different snapshots of a given normal mode.
These can be imported and animated in Paraview.
*Arguments*
mesh : dolfin.Mesh
The mesh on which the magnetisation is defined.
m0 : numpy.array
The ground state of the magnetisation for which the normal mode was computed.
The size must be so that the array can be reshaped to size 3xN.
freq : float
The frequency of the normal mode.
w : numpy.array
The eigenvector representing the normal mode (as returned by `compute_eigenv`
or `compute_eigenv_generalised`).
filename : string
The filename of the exported animation files. Each individual frame will
have the same basename but will be given a suffix indicating the frame
number, too.
num_cycles : int
The number of cycles to be animated.
num_snapshots_per_cycle : int
The number of snapshot per cycle to be exported. Thus the total number of
exported frames is num_cycles * num_snapshots_per_cycle.
scaling : float
If `dm_only` is False, this determines the maximum size of the
oscillation (relative to the magnetisation vector) in the
visualisation. If `dm_only` is True, this has no effect.
dm_only : bool (optional)
If False (the default), plots `m0 + scaling*dm(t)`, where m0 is the
average magnetisation and dm(t) the (spatially varying)
oscillation corresponding to the frequency of the normal mode.
If True, only `dm(t)` is plotted.
"""
if freq.imag != 0 and abs(freq.imag) > 5e-3:
logger.warning("Frequency expected to be a real number. "
"Got: {}. This may lead to unexpected behaviour".format(freq))
freq = freq.real
#basename = os.path.basename(re.sub('\.vtk$', '', filename))
#dirname = os.path.dirname(filename)
# if not os.path.exists(dirname):
# print "Creating directory '{}' as it doesn't exist.".format(dirname)
# os.makedirs(dirname)
#mesh = comp.mesh
#mesh_shape = mesh.mesh_size
m0_array = m0.copy() # we assume that sim is relaxed!!
Q, R, S, Mcross = compute_tangential_space_basis(
m0_array.reshape(3, 1, -1))
Qt = mf_transpose(Q).copy()
n = mesh.num_vertices()
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
func = df.Function(V)
func.rename('m', 'magnetisation')
w_3d = mf_mult(Q, w.reshape((2, 1, n)))
w_flat = w_3d.reshape(-1)
phi = np.angle(w_flat) # relative phases of the oscillations
a = np.absolute(w_flat)
a = a / a.max() # normalised amplitudes of the oscillations
t_end = num_cycles * 2 * pi / freq
timesteps = np.linspace(
0, t_end, num_cycles * num_snapshots_per_cycle, endpoint=False)
m_osc = np.zeros(3 * n)
t0 = time()
f = df.File(filename, 'compressed')
field = Field(V, name='m')
for (i, t) in enumerate(timesteps):
logger.debug("Saving animation snapshot for timestep {} ({}/{})".format(t,
i, num_cycles * num_snapshots_per_cycle))
if dm_only is False:
m_osc = (
m0_array + scaling * a * np.cos(t * freq + phi)).reshape(-1)
else:
m_osc = (scaling * a * np.cos(t * freq + phi)).reshape(-1)
#save_vector_field(m_osc, os.path.join(dirname, basename + '_{:04d}.vtk'.format(i)))
func.vector().set_local(m_osc)
f << func
if save_h5:
field.set(func)
field.save_hdf5(filename[0:-4], i)
field.close_hdf5()
t1 = time()
logger.debug(
"Saving the data to file '{}' took {} seconds".format(filename, t1 - t0))
def get_colormap_from_name(cmap_name):
from matplotlib import cm
import custom_colormaps
colormaps = {'coolwarm': cm.coolwarm,
'cool': cm.cool,
'hot': cm.hot,
'afmhot': cm.afmhot,
'rainbow': cm.jet,
'hsv': cm.hsv,
'circular1': custom_colormaps.circular1,
'circular2': custom_colormaps.circular2,
'circular3': custom_colormaps.circular3,
'circular4': custom_colormaps.circular4,
'husl_99_75': custom_colormaps.husl_99_75,
'husl_99_70': custom_colormaps.husl_99_70,
'husl_99_65': custom_colormaps.husl_99_65,
}
try:
if cmap_name == 'rainbow':
logger.warning('The rainbow colormap is strongly discouraged for scientific visualizations, it is '
'highly recommended to choose a different colormap. See for example '
'http://medvis.org/2012/08/21/rainbow-colormaps-what-are-they-good-for-absolutely-nothing/ '
'for more information.')
return colormaps[cmap_name]
except KeyError:
raise ValueError("Unknown colormap name: '{}'. Allowed values: {}".format(
cmap_name, colormaps.keys()))
def extract_mesh_slice(mesh, slice_z):
coords = mesh.coordinates()
xmin = min(coords[:, 0])
xmax = max(coords[:, 0])
ymin = min(coords[:, 1])
ymax = max(coords[:, 1])
nx = int(1 * (xmax - xmin))
ny = int(1 * (ymax - ymin))
slice_mesh = embed3d(
df.RectangleMesh(df.Point(xmin, ymin), df.Point(xmax, ymax), nx, ny), z_embed=slice_z)
V = df.FunctionSpace(mesh, 'CG', 1)
f = df.Function(V)
V_slice = df.FunctionSpace(slice_mesh, 'CG', 1)
f_slice = df.Function(V_slice)
lg = df.LagrangeInterpolator()
def restrict_to_slice_mesh(a):
f.vector().set_local(a)
lg.interpolate(f_slice, f)
return f_slice.vector().array()
return slice_mesh, restrict_to_slice_mesh
def get_phaseplot_ticks_and_labels(num_ticks):
"""
Helper function to define nice ticks for phase plots which are
multiples of pi/2. Currently `num_ticks` must be either 3 or 5.
"""
if num_ticks == 3:
ticks = [-pi, 0, pi]
ticklabels = [u'-\u03C0', u'0', u'\u03C0']
elif num_ticks == 5:
ticks = [-pi, -pi / 2, 0, pi / 2, pi]
ticklabels = [u'-\u03C0', u'-\u03C0/2', u'0', u'\u03C0/2', u'\u03C0']
else:
raise ValueError(
"Number of phase plot ticks must be either 3 or 5. Got: {}".format(num_ticks))
return ticks, ticklabels
def plot_spatially_resolved_normal_mode(
mesh, m0, w, slice_z='z_max', components='xyz', label_components=True,
figure_title=None, yshift_title=0.0, plot_powers=True, plot_phases=True,
label_power='Power', label_phase='Phase', xticks=None, yticks=None,
num_power_colorbar_ticks=5, num_phase_colorbar_ticks=5,
colorbar_fmt='%.2e', cmap_powers='coolwarm', cmap_phases='circular4',
vmin_powers=0.0, show_axis_labels=True, show_axis_frames=True,
show_colorbars=True, figsize=None, outfilename=None, dpi=None):
"""
Plot the normal mode profile across a slice of the sample.
Remark: Due to a bug in matplotlib (see [1]), when saving the
`matplotlib.Figure` object returned from this function the title
and left annotations will likely be cut off. Therefore it is
recommended to save the plot by specifying the argument
`outfilename`.
[1] http://stackoverflow.com/questions/10101700/moving-matplotlib-legend-outside-of-the-axis-makes-it-cutoff-by-the-figure-box
*Arguments*
mesh:
The mesh of the simulation object for which the eigenmode was computed.
m0 : numpy.array
The ground state of the magnetisation for which the normal mode was computed.
The size must be so that the array can be reshaped to size 3xN.
w:
The eigenvector representing the normal mode (for example,
one of the columns of the second return value of
`compute_normal_modes_generalised`).
slice_z:
The z-value of the mesh slice which will be plotted. This can be either
'z_min' or 'z_max' (which correspond to the bottom/top layer of the mesh)
or a numerical value. Note that the mesh must have a layer of nodes with
this z-coordinate, otherwise the plotting routine will fail.
num_power_colorbar_ticks:
The number of tick labels for the power colorbars. Currently
this must be either 3 or 5 (default: 5).
num_phase_colorbar_ticks:
The number of tick labels for the phase colorbars. Currently
this must be either 3 or 5 (default: 5).
outfilename:
If given, the plot will be saved to a file with this name. Any
missing directory components will be created first. Default: None.
dpi:
The resolution of the saved plot (ignored if `outfilename` is None).
*Returns*
The `matplotlib.Figure` containing the plot.
"""
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
coords = mesh.coordinates()
if slice_z == 'z_min':
slice_z = min(coords[:, 2])
elif slice_z == 'z_max':
slice_z = max(coords[:, 2])
slice_mesh, restrict_to_submesh = extract_mesh_slice(mesh, slice_z)
m0_array = m0.copy()
Q, R, S, Mcross = compute_tangential_space_basis(
m0_array.reshape(3, 1, -1))
Qt = mf_transpose(Q).copy()
n = mesh.num_vertices()
w_3d = mf_mult(Q, w.reshape((2, 1, n)))
w_x = w_3d[0, 0, :]
w_y = w_3d[1, 0, :]
w_z = w_3d[2, 0, :]
######################################################################
slice_coords = slice_mesh.coordinates()
xvals = slice_coords[:, 0]
yvals = slice_coords[:, 1]
# We use the mesh triangulation provided by dolfin in case the
# mesh has multiple disconnected regions (in which case matplotlib
# would connect them).
mesh_triang = tri.Triangulation(xvals, yvals, slice_mesh.cells())
# Determine the number of rows (<=2) and columns (<=3) in the plot
num_rows = 0
if plot_powers:
num_rows += 1
if plot_phases:
num_rows += 1
if num_rows == 0:
raise ValueError(
"At least one of the arguments `plot_powers`, `plot_phases` must be True.")
num_columns = len(components)
def plot_mode_profile(ax, a, title=None, vmin=None, vmax=None, cmap=None, cticks=None, cticklabels=None):
ax.set_aspect('equal')
vals = restrict_to_submesh(a)
trimesh = ax.tripcolor(mesh_triang, vals, shading='gouraud', cmap=cmap)
ax.set_title(title)
if show_colorbars:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "5%", pad="3%")
if vmin is None:
vmin = min(vals)
if vmax is None:
vmax = max(vals)
trimesh.set_clim(vmin=vmin, vmax=vmax)
cbar = plt.colorbar(trimesh, cax=cax, format=FormatStrFormatter(colorbar_fmt),
ticks=cticks)
if cticklabels != None:
cbar.ax.set_yticklabels(cticklabels)
if not show_axis_labels:
ax.set_xticks([])
ax.set_yticks([])
if not show_axis_frames:
ax.axis('off')
fig = plt.figure(figsize=figsize)
if isinstance(cmap_powers, str):
cmap_powers = get_colormap_from_name(cmap_powers)
if isinstance(cmap_phases, str):
cmap_phases = get_colormap_from_name(cmap_phases)
powers = {'x': np.absolute(w_x) ** 2,
'y': np.absolute(w_y) ** 2,
'z': np.absolute(w_z) ** 2}
phases = {'x': np.angle(w_x),
'y': np.angle(w_y),
'z': np.angle(w_z)}
def set_xyticks(ax):
if xticks != None:
ax.set_xticks(xticks)
if yticks != None:
ax.set_yticks(yticks)
cnt = 1
if plot_powers:
cticklabels = None
for comp in components:
ax = fig.add_subplot(num_rows, num_columns, cnt)
if num_power_colorbar_ticks != None:
if vmin_powers != None:
minval = vmin_powers
else:
minval = powers[comp].min()
cticks = np.linspace(
minval, powers[comp].max(), num_power_colorbar_ticks)
else:
cticks = None
comp_title = 'm_{}'.format(comp) if label_components else ''
plot_mode_profile(ax, powers[comp], title=comp_title,
cticks=cticks, cticklabels=cticklabels,
vmin=vmin_powers, cmap=cmap_powers)
set_xyticks(ax)
cnt += 1
if plot_phases:
cticks, cticklabels = get_phaseplot_ticks_and_labels(
num_phase_colorbar_ticks)
for comp in components:
ax = fig.add_subplot(num_rows, num_columns, cnt)
if label_components and not plot_powers:
comp_title = 'm_{}'.format(comp)
else:
comp_title = ''
plot_mode_profile(ax, phases[comp], title=comp_title,
cticks=cticks, cticklabels=cticklabels,
vmin=-pi, vmax=+pi,
cmap=cmap_phases)
set_xyticks(ax)
cnt += 1
bbox_extra_artists = []
if figure_title != None:
txt = plt.text(0.5, 1.0 + yshift_title, figure_title,
horizontalalignment='center',
fontsize=20,
transform=fig.transFigure)
bbox_extra_artists.append(txt)
num_axes = len(fig.axes)
ax_annotate_powers = fig.axes[0]
ax_annotate_phases = fig.axes[(num_axes // 2) if plot_powers else 0]
if plot_powers:
txt_power = plt.text(-0.2, 0.5, label_power,
fontsize=16,
horizontalalignment='right',
verticalalignment='center',
rotation='vertical',
# transform=fig.transFigure)
transform=ax_annotate_powers.transAxes)
bbox_extra_artists.append(txt_power)
#
#ax_topleft.text(0, 0, label_power, ha='left', va='center', rotation=90)
#
#from matplotlib.offsetbox import AnchoredOffsetbox, TextArea
#box = TextArea(label_power, textprops=dict(color="k", fontsize=20))
# anchored_box = AnchoredOffsetbox(loc=3,
# child=box, pad=0.,
# frameon=False,
# bbox_to_anchor=(-0.1, 0.5),
# bbox_transform=ax.transAxes,
# borderpad=0.,
# )
# ax_topleft.add_artist(anchored_box)
# bbox_extra_artists.append(anchored_box)
if plot_phases:
txt_phase = plt.text(-0.2, 0.5, label_phase,
fontsize=16,
horizontalalignment='right',
verticalalignment='center',
rotation='vertical',
# transform=fig.transFigure)
transform=ax_annotate_phases.transAxes)
bbox_extra_artists.append(txt_phase)
if outfilename != None:
helpers.create_missing_directory_components(outfilename)
fig.savefig(
outfilename, bbox_extra_artists=bbox_extra_artists, bbox_inches='tight', dpi=dpi)
return fig
|
py | 1a4777a01c79b3bed341f99fd45ff34887d0608c | import unittest
import pandas as pd
from ta.trend import (MACD, ADXIndicator, CCIIndicator, PSARIndicator,
VortexIndicator, adx, adx_neg, adx_pos, cci, macd,
macd_diff, macd_signal, psar_down, psar_down_indicator,
psar_up, psar_up_indicator, vortex_indicator_neg,
vortex_indicator_pos)
class TestADXIndicator(unittest.TestCase):
"""
https://school.stockcharts.com/doku.php?id=technical_indicators:average_directional_index_adx
"""
_filename = 'ta/tests/data/cs-adx.csv'
@classmethod
def setUpClass(cls):
cls._df = pd.read_csv(cls._filename, sep=',')
cls._params = dict(high=cls._df['High'], low=cls._df['Low'], close=cls._df['Close'], n=14, fillna=False)
cls._indicator = ADXIndicator(**cls._params)
@classmethod
def tearDownClass(cls):
del(cls._df)
def test_adx(self):
target = 'ADX'
result = adx(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_adx2(self):
target = 'ADX'
result = self._indicator.adx()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_adx_pos(self):
target = '+DI14'
result = adx_pos(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_adx_pos2(self):
target = '+DI14'
result = self._indicator.adx_pos()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_adx_neg(self):
target = '-DI14'
result = adx_neg(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_adx_neg2(self):
target = '-DI14'
result = self._indicator.adx_neg()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
class TestMACDIndicator(unittest.TestCase):
"""
https://school.stockcharts.com/doku.php?id=technical_indicators:moving_average_convergence_divergence_macd
"""
_filename = 'ta/tests/data/cs-macd.csv'
@classmethod
def setUpClass(cls):
cls._df = pd.read_csv(cls._filename, sep=',')
cls._params = dict(close=cls._df['Close'], n_slow=26, n_fast=12, n_sign=9, fillna=False)
cls._indicator = MACD(**cls._params)
@classmethod
def tearDownClass(cls):
del (cls._df)
def test_macd(self):
target = 'MACD_line'
result = macd(close=self._df['Close'], n_slow=26, n_fast=12, fillna=False)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_macd2(self):
target = 'MACD_line'
result = self._indicator.macd()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_macd_signal(self):
target = 'MACD_signal'
result = macd_signal(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_macd_signal2(self):
target = 'MACD_signal'
result = MACD(**self._params).macd_signal()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_macd_diff(self):
target = 'MACD_diff'
result = macd_diff(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_macd_diff2(self):
target = 'MACD_diff'
result = MACD(**self._params).macd_diff()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
class TestCCIIndicator(unittest.TestCase):
"""
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
"""
_filename = 'ta/tests/data/cs-cci.csv'
@classmethod
def setUpClass(cls):
cls._df = pd.read_csv(cls._filename, sep=',')
cls._params = dict(
high=cls._df['High'], low=cls._df['Low'], close=cls._df['Close'], n=20, c=0.015, fillna=False)
cls._indicator = CCIIndicator(**cls._params)
@classmethod
def tearDownClass(cls):
del (cls._df)
def test_cci(self):
target = 'CCI'
result = cci(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_cci2(self):
target = 'CCI'
result = self._indicator.cci()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
class TestVortexIndicator(unittest.TestCase):
"""
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
"""
_filename = 'ta/tests/data/cs-vortex.csv'
@classmethod
def setUpClass(cls):
cls._df = pd.read_csv(cls._filename, sep=',')
cls._params = dict(high=cls._df['High'], low=cls._df['Low'], close=cls._df['Close'], n=14, fillna=False)
cls._indicator = VortexIndicator(**cls._params)
@classmethod
def tearDownClass(cls):
del (cls._df)
def test_vortex_indicator_pos(self):
target = '+VI14'
result = vortex_indicator_pos(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_vortex_indicator_pos2(self):
target = '+VI14'
result = self._indicator.vortex_indicator_pos()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_vortex_indicator_neg(self):
target = '-VI14'
result = vortex_indicator_neg(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_vortex_indicator_neg2(self):
target = '-VI14'
result = self._indicator.vortex_indicator_neg()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
class TestPSARIndicator(unittest.TestCase):
"""
https://school.stockcharts.com/doku.php?id=technical_indicators:parabolic_sar
"""
_filename = 'ta/tests/data/cs-psar.csv'
@classmethod
def setUpClass(cls):
cls._df = pd.read_csv(cls._filename, sep=',')
cls._params = dict(high=cls._df['High'], low=cls._df['Low'], close=cls._df['Close'], fillna=False)
cls._indicator = PSARIndicator(**cls._params)
@classmethod
def tearDownClass(cls):
del (cls._df)
def test_psar_up(self):
target = 'psar_up'
result = self._indicator.psar_up()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_psar_down(self):
target = 'psar_down'
result = self._indicator.psar_down()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_psar_up_indicator(self):
target = 'psar_up_ind'
result = self._indicator.psar_up_indicator()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_psar_down_indicator(self):
target = 'psar_down_ind'
result = self._indicator.psar_down_indicator()
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_psar_up2(self):
target = 'psar_up'
result = psar_up(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_psar_down2(self):
target = 'psar_down'
result = psar_down(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_psar_up_indicator2(self):
target = 'psar_up_ind'
result = psar_up_indicator(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
def test_psar_down_indicator2(self):
target = 'psar_down_ind'
result = psar_down_indicator(**self._params)
pd.testing.assert_series_equal(self._df[target].tail(), result.tail(), check_names=False)
|
py | 1a4777a938c36b906da3f9c6cd6897c549f2b49f | import torch
import torchani
import unittest
import os
import pickle
path = os.path.dirname(os.path.realpath(__file__))
class TestGrad(unittest.TestCase):
# torch.autograd.gradcheck and torch.autograd.gradgradcheck verify that
# the numerical and analytical gradient and hessian of a function
# matches to within a given tolerance.
#
# The forward call of the function is wrapped with a lambda so that
# gradcheck gets a function with only one tensor input and tensor output.
# nondet_tol is necessarily greater than zero since some operations are
# nondeterministic which makes two equal inputs have different outputs
def setUp(self):
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.model = torchani.models.ANI1x(model_index=0).to(device=self.device,
dtype=torch.double)
datafile = os.path.join(path, 'test_data/NIST/all')
# Some small molecules are selected to make the tests faster
self.data = pickle.load(open(datafile, 'rb'))[1243:1250]
def testGradCheck(self):
for coordinates, species, _, _, _, _ in self.data:
coordinates = torch.from_numpy(coordinates).to(device=self.device,
dtype=torch.float64)
coordinates.requires_grad_(True)
species = torch.from_numpy(species).to(self.device)
torch.autograd.gradcheck(lambda x: self.model((species, x)).energies,
coordinates,
nondet_tol=1e-13)
def testGradGradCheck(self):
for coordinates, species, _, _, _, _ in self.data:
coordinates = torch.from_numpy(coordinates).to(device=self.device,
dtype=torch.float64)
coordinates.requires_grad_(True)
species = torch.from_numpy(species).to(self.device)
torch.autograd.gradgradcheck(lambda x: self.model((species, x)).energies,
coordinates,
nondet_tol=1e-13)
if __name__ == '__main__':
unittest.main()
|
py | 1a47796fbfd4d4423d5b840af63e1d2e0f3ae7ca | #! /usr/bin/env python
import sys
import yt ; yt.funcs.mylog.setLevel(0)
import numpy as np
from scipy import signal
# Build Jx without filter (from other simulation)
my_F_nofilter = np.zeros([16,16])
my_F_nofilter[8,8] = -1.601068065642412e-11
my_F_nofilter[8,7] = -1.601068065642412e-11
# Build 2D filter
filter0 = np.array([.25,.5,.25])
my_order = [1,5]
my_filterx = filter0
my_filtery = filter0
while my_order[0]>1:
my_filterx = np.convolve(my_filterx,filter0)
my_order[0] -= 1
while my_order[1]>1:
my_filtery = np.convolve(my_filtery,filter0)
my_order[1] -= 1
my_filter = my_filterx[:,None]*my_filtery
# Apply filter. my_F_filetered is the theoretical value for filtered field
my_F_filtered = signal.convolve2d(my_F_nofilter, my_filter, boundary='symm', mode='same')
# Get simulation result for F_filtered
filename = sys.argv[1]
ds = yt.load( filename )
sl = yt.SlicePlot(ds, 2, 'jx', aspect=1)
all_data_level_0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
F_filtered = all_data_level_0['boxlib', 'jx'].v.squeeze()
# Compare theory and PIC for filtered value
error = np.sum( np.abs(F_filtered - my_F_filtered) ) / np.sum( np.abs(my_F_filtered) )
assert( error < 1.e-14 )
|
py | 1a47797c97dab34675b6fff6e28b931005133ebf | from __future__ import absolute_import
from docker_registry_client.Image import Image
from docker_registry_client._BaseClient import BaseClientV1
from drc_test_utils.mock_registry import mock_v1_registry
class TestImage(object):
def test_init(self):
url = mock_v1_registry()
image_id = 'test_image_id'
image = Image(image_id, BaseClientV1(url))
assert image.image_id == image_id
|
py | 1a477a04ee155defbce8fc91ce39c25d2107fe7e | # -*- coding: utf-8 -*-
"""Main tasks.py file for current application module."""
import time
import os
import json
import shutil
from datetime import datetime as dtime
from celery import group
from celery import shared_task
from flask import current_app
from libs import helpers
from exts.sqlalchemy import db
from mods.api.models import Eval
from mods.api.models import File
@shared_task(bind=True, ignore_result=True)
def client_eval(self, files, client_id=None):
""" Client evaluation task."""
with current_app.app_context():
idle_time = 0.1
new_eval = Eval.query.filter(Eval.uuid_f == self.request.id).first()
if len(files) > 0:
self.update_state(state='PROGRESS')
file_tasks = []
for file in files:
for k, v in file.items():
file_tasks.append(eval_file.s(v, k, client_id))
group(*file_tasks)()
if helpers.eval_running(new_eval) is True:
while helpers.eval_running(new_eval) is True:
self.update_state(state='PROGRESS')
time.sleep(idle_time)
db.session.refresh(new_eval)
new_eval.status_f, new_eval.score = helpers.eval_status(new_eval)
#new_eval.date_b = str(dtime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3])
#new_eval.date_b = dtime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
new_eval.date_b = dtime.now()
db.session.commit()
fpath = "/tmp/uploads/files/{}".format(new_eval.uuid_f)
shutil.rmtree(str(fpath), ignore_errors=True, onerror=None)
return self.update_state(state='SUCCESS')
@shared_task(bind=True, ignore_result=True)
def eval_file(self, fullpath, file_hash, client_id=None):
""" Single file submission to backend task."""
import requests
requests.packages.urllib3.disable_warnings()
with current_app.app_context():
self.update_state(state='PROGRESS')
fc = helpers.file_config(fullpath, file_hash, client_id)
fd = open(fc["fullpath"], "rb")
file = fd.read()
fd.close()
os.remove(fc["fullpath"])
ma_files = {
fc["filename"]: (fc["filename"], file, 'application/octet-stream')
}
r = requests.post(
fc["scan_url"], files=ma_files, verify=False, headers=fc["headers"])
if not r.ok:
return self.update_state(state='FAILURE')
return self.update_state(state='SUCCESS')
@shared_task(bind=True, ignore_result=True)
def eval_result(self, jdata):
""" Single file result received from wsclient service processing task."""
with current_app.app_context():
out_msg = helpers.file_result(jdata)
jdata['status_f'] = "Complete"
if jdata['status'] == 2 or jdata['status'] == 3:
jdata['status_f'] = "Error"
db.session.query(File).filter(File.sha1 == jdata["sha1"]).update({
File.status_f: jdata['status_f'],
File.score: jdata['score'],
File.exec_time: jdata['exec_time'],
#File.date_b: jdata['server_time'],
#File.date_b: dtime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3].datetime(),
File.date_b: dtime.now(),
File.message: out_msg,
File.results: json.dumps(jdata)
#File.results: jdata
#File.results: {}
})
db.session.commit()
return self.update_state(state='SUCCESS')
|
py | 1a477b8ef61340a3881c6710fa37026f99c6f1b6 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-01 14:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("oidc_provider", "0019_auto_20161005_1552"),
]
operations = [
migrations.AddField(
model_name="client",
name="_post_logout_redirect_uris",
field=models.TextField(
blank=True,
default="",
help_text="Enter each URI on a new line.",
verbose_name="Post Logout Redirect URIs",
),
),
]
|
py | 1a477c406c89a238edb5e4137ac62dce5162c883 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train DCGAN on ModelArts, get checkpoint files and air/onnx models."""
import argparse
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import mindspore.common.dtype as mstype
from mindspore import context
from mindspore import nn, Tensor, export
from mindspore.train.callback import CheckpointConfig, _InternalCallbackParam, ModelCheckpoint, RunContext
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank
import moxing as mox
from src.dataset import create_dataset_imagenet
from src.config import dcgan_imagenet_cfg as cfg
from src.generator import Generator
from src.discriminator import Discriminator
from src.cell import WithLossCellD, WithLossCellG
from src.dcgan import DCGAN
NORMALIZE_MEAN = 127.5
NORMALIZE_STD = 127.5
def save_imgs(gen_imgs, idx):
"""
Save images in 4 * 4 format when training on the modelarts
Inputs:
- **gen_imgs** (array) - Images generated by the generator.
- **idx** (int) - Training epoch.
"""
matplotlib.use('Agg')
for index in range(gen_imgs.shape[0]):
plt.subplot(4, 4, index + 1)
gen_imgs[index] = gen_imgs[index] * NORMALIZE_STD + NORMALIZE_MEAN
perm = (1, 2, 0)
show_imgs = np.transpose(gen_imgs[index], perm)
sdf = show_imgs.astype(int)
plt.imshow(sdf)
plt.axis("off")
plt.savefig("/cache/images/{}.png".format(idx))
def save_losses(G_losses_list, D_losses_list, idx):
"""
Save Loss visualization images when training on the modelarts
Inputs:
- **G_losses_list** (list) - Generator loss list.
- **D_losses_list** (list) - Discriminator loss list.
- **idx** (int) - Training epoch.
"""
plt.figure(figsize=(10, 5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses_list, label="G")
plt.plot(D_losses_list, label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig("/cache/losses/{}.png".format(idx))
parser = argparse.ArgumentParser(description='MindSpore dcgan training')
parser.add_argument('--data_url', default=None,
help='Directory contains ImageNet-1k dataset.')
parser.add_argument('--train_url', default=None,
help='Directory of training output.')
parser.add_argument('--images_url', default=None,
help='Location of images outputs.')
parser.add_argument('--losses_url', default=None,
help='Location of losses outputs.')
parser.add_argument("--file_format", type=str,
default="AIR", help="Format of export file.")
parser.add_argument("--file_name", type=str,
default="dcgan", help="Output file name.")
parser.add_argument('--epoch_size', type=int,
default=cfg.epoch_size, help='Epoch size of training.')
args = parser.parse_args()
device_id = int(os.getenv('DEVICE_ID'))
device_num = int(os.getenv('RANK_SIZE'))
local_input_url = '/cache/data' + str(device_id)
local_output_url = '/cache/ckpt' + str(device_id)
local_images_url = '/cache/images'
local_losses_url = '/cache/losses'
context.set_context(mode=context.GRAPH_MODE,
device_target="Ascend", save_graphs=False)
context.set_context(device_id=device_id)
if device_num > 1:
init()
context.set_auto_parallel_context(device_num=device_num,
global_rank=device_id,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
rank = get_rank()
else:
rank = 0
mox.file.copy_parallel(src_url=args.data_url, dst_url=local_input_url)
mox.file.copy_parallel(src_url=args.images_url, dst_url=local_images_url)
mox.file.copy_parallel(src_url=args.losses_url, dst_url=local_losses_url)
if __name__ == '__main__':
# Load Dataset
ds = create_dataset_imagenet(os.path.join(
local_input_url), num_parallel_workers=2)
steps_per_epoch = ds.get_dataset_size()
# Define Network
netD = Discriminator()
netG = Generator()
criterion = nn.BCELoss(reduction='mean')
netD_with_criterion = WithLossCellD(netD, netG, criterion)
netG_with_criterion = WithLossCellG(netD, netG, criterion)
optimizerD = nn.Adam(netD.trainable_params(),
learning_rate=cfg.learning_rate, beta1=cfg.beta1)
optimizerG = nn.Adam(netG.trainable_params(),
learning_rate=cfg.learning_rate, beta1=cfg.beta1)
myTrainOneStepCellForD = nn.TrainOneStepCell(
netD_with_criterion, optimizerD)
myTrainOneStepCellForG = nn.TrainOneStepCell(
netG_with_criterion, optimizerG)
dcgan = DCGAN(myTrainOneStepCellForD, myTrainOneStepCellForG)
dcgan.set_train()
# checkpoint save
ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch,
keep_checkpoint_max=args.epoch_size)
ckpt_cb = ModelCheckpoint(
config=ckpt_config, directory=local_output_url, prefix='dcgan')
cb_params = _InternalCallbackParam()
cb_params.train_network = netG
cb_params.batch_num = steps_per_epoch
cb_params.epoch_num = args.epoch_size
# For each epoch
cb_params.cur_epoch_num = 0
cb_params.cur_step_num = 0
run_context = RunContext(cb_params)
ckpt_cb.begin(run_context)
np.random.seed(1)
fixed_noise = Tensor(np.random.normal(
size=(16, cfg.latent_size, 1, 1)).astype("float32"))
data_loader = ds.create_dict_iterator(
output_numpy=True, num_epochs=args.epoch_size)
G_losses = []
D_losses = []
# Start Training Loop
print("Starting Training Loop...")
for epoch in range(args.epoch_size):
# For each batch in the dataloader
for i, data in enumerate(data_loader):
real_data = Tensor(data['image'])
latent_code = Tensor(data["latent_code"])
netD_loss, netG_loss = dcgan(real_data, latent_code)
if i % 50 == 0:
print("Date time: ", datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "\tepoch: ", epoch, "/",
args.epoch_size, "\tstep: ", i, "/", steps_per_epoch, "\tDloss: ", netD_loss, "\tGloss: ",
netG_loss)
D_losses.append(netD_loss.asnumpy())
G_losses.append(netG_loss.asnumpy())
cb_params.cur_step_num = cb_params.cur_step_num + 1
cb_params.cur_epoch_num = cb_params.cur_epoch_num + 1
print("================saving model===================")
if device_id == 0:
ckpt_cb.step_end(run_context)
fake = netG(fixed_noise)
print("================saving images===================")
save_imgs(fake.asnumpy(), epoch + 1)
print("================saving losses===================")
save_losses(G_losses, D_losses, epoch + 1)
mox.file.copy_parallel(
src_url=local_images_url, dst_url=args.images_url)
mox.file.copy_parallel(
src_url=local_losses_url, dst_url=args.losses_url)
mox.file.copy_parallel(
src_url=local_output_url, dst_url=args.train_url)
print("================success================")
# export checkpoint file into air, onnx, mindir models
inputs = Tensor(np.random.rand(16, 100, 1, 1), mstype.float32)
export(netG, inputs, file_name=args.file_name,
file_format=args.file_format)
file_name = args.file_name + "." + args.file_format.lower()
mox.file.copy_parallel(
src_url=file_name, dst_url=os.path.join(args.train_url, file_name))
|
py | 1a477d9e921d8454c23317fc66a0eee92acb23e8 | __version__ = '0.3.0.dev'
|
py | 1a477eadeeb0b7b38e6c23b8d8df48152c509c47 | import pygame as py
import variables as v
class Button(py.sprite.Sprite):
def __init__(self, text, pos, size, normalcolour, hovercolour, font, ID, centred = False, bsize=(0,0)):
"""
Create a simple button.
Arguments:
text <str> -- the button's text
pos (x, y) -- the position of the button
size <int> -- the font size of the text
normalcolour (r, g, b) -- the colour of the button
hovercolour (r, g, b) -- the colour of the button when it is hovered
font <str> -- the font file to use (use None for default font)
ID <str|int> -- a unique identifier for this button
centred <bool> -- whether the origin of the button is its topleft corner or centre (default=False)
bsize (w, h) -- a set size for the button (default=(0, 0) - automatic)
"""
super().__init__()
self.ID = ID
self.hovered = False
self.text = text
self.pos = pos
self.hcolour = hovercolour
self.ncolour = normalcolour
self.font = font
self.font = py.font.Font(font, int(size)) #Creates a new font object using font file and font size
self.centred = centred
self.size = bsize
self.rend = self.font.render(self.text, True, (0,0,0)) #Creates a new surface with the text on
self.set_rect()
def update(self):
if self.hovered: #Changes the button colour if it is being hovered
colour = self.hcolour
else:
colour = self.ncolour
py.draw.rect(v.screen, colour, self.rect) #Draws a rectangle
v.screen.blit(self.rend, self.rect) #Blits the text onto the screen
if self.rect.collidepoint(py.mouse.get_pos()): #Detects if the mouse is over the button
self.hovered = True
else:
self.hovered = False
def set_rect(self): #Calculates the size and position of the button
self.rect = self.rend.get_rect()
if not self.centred:
self.rect.topleft = self.pos
if self.centred:
self.rect.center = self.pos
if not self.size[0] == 0:
self.rect.width = self.size[0]
if not self.size[1] == 0:
self.rect.height = self.size[1]
def pressed(self): #Detects if the button is pressed
for event in v.events:
if self.hovered:
if event.type == py.MOUSEBUTTONDOWN:
return True
return False
def fill_gradient(surface, color, gradient, rect=None, vertical=True, forward=True):
"""fill a surface with a gradient pattern
Parameters:
color (r, g, b) -- starting color
gradient (r, g, b) -- final color
rect <pygame.Rect> -- area to fill (default=Surface's rect)
vertical <bool> -- True=vertical, False=horizontal (default=True)
forward <bool> -> True=forward, False=reverse (default=True)
Pygame recipe: http://www.pygame.org/wiki/GradientCode
"""
if rect is None: rect = surface.get_rect()
x1,x2 = rect.left, rect.right
y1,y2 = rect.top, rect.bottom
if vertical: h = y2-y1
else: h = x2-x1
if forward: a, b = color, gradient
else: b, a = color, gradient
rate = (
float(b[0]-a[0])/h,
float(b[1]-a[1])/h,
float(b[2]-a[2])/h
)
fn_line = py.draw.line
if vertical:
for line in range(y1,y2):
color = (
min(max(a[0]+(rate[0]*(line-y1)),0),255),
min(max(a[1]+(rate[1]*(line-y1)),0),255),
min(max(a[2]+(rate[2]*(line-y1)),0),255)
)
fn_line(surface, color, (x1,line), (x2,line))
else:
for col in range(x1,x2):
color = (
min(max(a[0]+(rate[0]*(col-x1)),0),255),
min(max(a[1]+(rate[1]*(col-x1)),0),255),
min(max(a[2]+(rate[2]*(col-x1)),0),255)
)
fn_line(surface, color, (col,y1), (col,y2))
class textLabel(py.sprite.Sprite):
def __init__(self, text, pos, colour, font, size, centred=False):
"""
Create a simple text label.
Arguments:
text <str> -- the label's text
pos (x, y) -- the position of the text
size <int> -- the font size of the text
colour (r, g, b) -- the colour of the text
font <str> -- the font file to use (use None for default font)
centred <bool> -- whether the origin of the text is its topleft corner or centre (default=False)
"""
super().__init__()
self.text = text
self.pos = pos
self.colour = colour
self.font = font
self.size = size
self.centred = centred
def update(self):
pos = self.pos
font = py.font.Font(self.font, self.size) #Creates a new font with given file and size
label = font.render(self.text, 1, self.colour) #Renders given text with font
if self.centred:
#Centres text
pos = list(self.pos)
pos[0] -= font.size(self.text)[0] / 2
pos[1] -= font.size(self.text)[1] / 2
pos = tuple(pos)
v.screen.blit(label, pos) #Blits label to screen |
py | 1a477f96302b2331710b125429e98d1339f88fcb | from passes import *
fileName = input("Input file address") # get the input from filename
try:
file = open(fileName, 'r')
except NameError:
print("No File Found, Kindly Retry")
text = file.read() # read from the file
text = text.split('\n') # split them
if passOne(text) == 0:
ErrorFlag = True # if there is an error in pass one pass then there is an error
ErrorList.append("Stop Command not found") # error successfully added to the error list
else:
variableAddress_counter = 0
for i in symbol_Table:
if i['isFound'] == False: # if isFound is false then there is an error
ErrorFlag = True # make error flag True
ErrorList.append('error- Symbol Address not Defined: '+ i['name']) # successfully added to error list
elif i['isUsed'] == False: # if isUsed is false then there is an error
ErrorFlag = True
ErrorList.append('error- Symbol Defined But Not Used: '+ i['name']) # successfully added to error list
elif i['variableAddress'] == -1: # if variableAddress is positive then there is an error - more than one symbol missing with variableAddress missing
if variableAddress_counter == 0:
variableAddress_counter += 1
elif variableAddress_counter >= 1:
ErrorFlag = True
ErrorList.append('error - more than one symbol with variableAddress missing') # successfully added to error list
if i['variableAddress']>=256:
ErrorFlag = True
ErrorList.append("Address more than 256 bits") # successfully added to error list
f_symboltable = open('Symboltable.txt', 'w')
print(symbol_Table) # print symbol table and write them in file
for i in symbol_Table:
f_symboltable.write(i['name'] + " " + str(i['variableAddress']) + '\n') # append the symbol in table
f_symboltable.close()
f_output = open("Output.txt", 'w') # open the file in write mode
f_error = open('Errorfile.txt', 'w') # open the file in write mode
if ErrorFlag:
for err in ErrorList: # here we are printing all the error and write and close the file
print(err)
f_error.write(err + '\n') # write in the error file
else:
passTwo() # call pass2
if len(ErrorListPass2)>0:
for err in ErrorListPass2:
print(err)
f_error.write(err +'\n') # write in the error file
else:
for i in finalOutput: # else print the final output
if i != "":
print(i)
f_output.write(i+'\n') # write in the output file
f_error.close() # close the file
f_output.close() # close the file
|
py | 1a4780ac36ed40884d22340873727a5cd8624382 | import datetime
from io import BytesIO
import os
import shutil
import numpy as np
import pytest
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.testing import _has_tex_package, _check_for_pgf
from matplotlib.testing.compare import compare_images, ImageComparisonFailure
from matplotlib.backends.backend_pgf import PdfPages, common_texification
from matplotlib.testing.decorators import (_image_directories,
check_figures_equal,
image_comparison)
baseline_dir, result_dir = _image_directories(lambda: 'dummy func')
needs_xelatex = pytest.mark.skipif(not _check_for_pgf('xelatex'),
reason='xelatex + pgf is required')
needs_pdflatex = pytest.mark.skipif(not _check_for_pgf('pdflatex'),
reason='pdflatex + pgf is required')
needs_lualatex = pytest.mark.skipif(not _check_for_pgf('lualatex'),
reason='lualatex + pgf is required')
needs_ghostscript = pytest.mark.skipif(
"eps" not in mpl.testing.compare.converter,
reason="This test needs a ghostscript installation")
def compare_figure(fname, savefig_kwargs={}, tol=0):
actual = os.path.join(result_dir, fname)
plt.savefig(actual, **savefig_kwargs)
expected = os.path.join(result_dir, "expected_%s" % fname)
shutil.copyfile(os.path.join(baseline_dir, fname), expected)
err = compare_images(expected, actual, tol=tol)
if err:
raise ImageComparisonFailure(err)
def create_figure():
plt.figure()
x = np.linspace(0, 1, 15)
# line plot
plt.plot(x, x ** 2, "b-")
# marker
plt.plot(x, 1 - x**2, "g>")
# filled paths and patterns
plt.fill_between([0., .4], [.4, 0.], hatch='//', facecolor="lightgray",
edgecolor="red")
plt.fill([3, 3, .8, .8, 3], [2, -2, -2, 0, 2], "b")
# text and typesetting
plt.plot([0.9], [0.5], "ro", markersize=3)
plt.text(0.9, 0.5, 'unicode (ü, °, µ) and math ($\\mu_i = x_i^2$)',
ha='right', fontsize=20)
plt.ylabel('sans-serif, blue, $\\frac{\\sqrt{x}}{y^2}$..',
family='sans-serif', color='blue')
plt.xlim(0, 1)
plt.ylim(0, 1)
@pytest.mark.parametrize('plain_text, escaped_text', [
(r'quad_sum: $\sum x_i^2$', r'quad\_sum: \(\displaystyle \sum x_i^2\)'),
(r'no \$splits \$ here', r'no \$splits \$ here'),
('with_underscores', r'with\_underscores'),
('% not a comment', r'\% not a comment'),
('^not', r'\^not'),
])
def test_common_texification(plain_text, escaped_text):
assert common_texification(plain_text) == escaped_text
# test compiling a figure to pdf with xelatex
@needs_xelatex
@pytest.mark.backend('pgf')
@image_comparison(['pgf_xelatex.pdf'], style='default')
def test_xelatex():
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
create_figure()
# test compiling a figure to pdf with pdflatex
@needs_pdflatex
@pytest.mark.skipif(not _has_tex_package('ucs'), reason='needs ucs.sty')
@pytest.mark.backend('pgf')
@image_comparison(['pgf_pdflatex.pdf'], style='default')
def test_pdflatex():
if os.environ.get('APPVEYOR'):
pytest.xfail("pdflatex test does not work on appveyor due to missing "
"LaTeX fonts")
rc_pdflatex = {'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ('\\usepackage[utf8x]{inputenc}'
'\\usepackage[T1]{fontenc}')}
mpl.rcParams.update(rc_pdflatex)
create_figure()
# test updating the rc parameters for each figure
@needs_xelatex
@needs_pdflatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_rcupdate():
rc_sets = [{'font.family': 'sans-serif',
'font.size': 30,
'figure.subplot.left': .2,
'lines.markersize': 10,
'pgf.rcfonts': False,
'pgf.texsystem': 'xelatex'},
{'font.family': 'monospace',
'font.size': 10,
'figure.subplot.left': .1,
'lines.markersize': 20,
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ('\\usepackage[utf8x]{inputenc}'
'\\usepackage[T1]{fontenc}'
'\\usepackage{sfmath}')}]
tol = [6, 0]
for i, rc_set in enumerate(rc_sets):
with mpl.rc_context(rc_set):
for substring, pkg in [('sfmath', 'sfmath'), ('utf8x', 'ucs')]:
if (substring in mpl.rcParams['pgf.preamble']
and not _has_tex_package(pkg)):
pytest.skip(f'needs {pkg}.sty')
create_figure()
compare_figure('pgf_rcupdate%d.pdf' % (i + 1), tol=tol[i])
# test backend-side clipping, since large numbers are not supported by TeX
@needs_xelatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_pathclip():
mpl.rcParams.update({'font.family': 'serif', 'pgf.rcfonts': False})
plt.plot([0., 1e100], [0., 1e100])
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.savefig(BytesIO(), format="pdf") # No image comparison.
# test mixed mode rendering
@needs_xelatex
@pytest.mark.backend('pgf')
@image_comparison(['pgf_mixedmode.pdf'], style='default')
def test_mixedmode():
mpl.rcParams.update({'font.family': 'serif', 'pgf.rcfonts': False})
Y, X = np.ogrid[-1:1:40j, -1:1:40j]
plt.pcolor(X**2 + Y**2).set_rasterized(True)
# test bbox_inches clipping
@needs_xelatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_bbox_inches():
mpl.rcParams.update({'font.family': 'serif', 'pgf.rcfonts': False})
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(range(5))
ax2.plot(range(5))
plt.tight_layout()
bbox = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
compare_figure('pgf_bbox_inches.pdf', savefig_kwargs={'bbox_inches': bbox},
tol=0)
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
@pytest.mark.parametrize('system', [
pytest.param('lualatex', marks=[needs_lualatex]),
pytest.param('pdflatex', marks=[needs_pdflatex]),
pytest.param('xelatex', marks=[needs_xelatex]),
])
def test_pdf_pages(system):
rc_pdflatex = {
'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': system,
}
mpl.rcParams.update(rc_pdflatex)
fig1, ax1 = plt.subplots()
ax1.plot(range(5))
fig1.tight_layout()
fig2, ax2 = plt.subplots(figsize=(3, 2))
ax2.plot(range(5))
fig2.tight_layout()
path = os.path.join(result_dir, f'pdfpages_{system}.pdf')
md = {
'Author': 'me',
'Title': 'Multipage PDF with pgf',
'Subject': 'Test page',
'Keywords': 'test,pdf,multipage',
'ModDate': datetime.datetime(
1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))),
'Trapped': 'Unknown'
}
with PdfPages(path, metadata=md) as pdf:
pdf.savefig(fig1)
pdf.savefig(fig2)
pdf.savefig(fig1)
assert pdf.get_pagecount() == 3
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
@pytest.mark.parametrize('system', [
pytest.param('lualatex', marks=[needs_lualatex]),
pytest.param('pdflatex', marks=[needs_pdflatex]),
pytest.param('xelatex', marks=[needs_xelatex]),
])
def test_pdf_pages_metadata_check(monkeypatch, system):
# Basically the same as test_pdf_pages, but we keep it separate to leave
# pikepdf as an optional dependency.
pikepdf = pytest.importorskip('pikepdf')
monkeypatch.setenv('SOURCE_DATE_EPOCH', '0')
mpl.rcParams.update({'pgf.texsystem': system})
fig, ax = plt.subplots()
ax.plot(range(5))
md = {
'Author': 'me',
'Title': 'Multipage PDF with pgf',
'Subject': 'Test page',
'Keywords': 'test,pdf,multipage',
'ModDate': datetime.datetime(
1968, 8, 1, tzinfo=datetime.timezone(datetime.timedelta(0))),
'Trapped': 'True'
}
path = os.path.join(result_dir, f'pdfpages_meta_check_{system}.pdf')
with PdfPages(path, metadata=md) as pdf:
pdf.savefig(fig)
with pikepdf.Pdf.open(path) as pdf:
info = {k: str(v) for k, v in pdf.docinfo.items()}
# Not set by us, so don't bother checking.
if '/PTEX.FullBanner' in info:
del info['/PTEX.FullBanner']
if '/PTEX.Fullbanner' in info:
del info['/PTEX.Fullbanner']
assert info == {
'/Author': 'me',
'/CreationDate': 'D:19700101000000Z',
'/Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org',
'/Keywords': 'test,pdf,multipage',
'/ModDate': 'D:19680801000000Z',
'/Producer': f'Matplotlib pgf backend v{mpl.__version__}',
'/Subject': 'Test page',
'/Title': 'Multipage PDF with pgf',
'/Trapped': '/True',
}
@needs_xelatex
def test_tex_restart_after_error():
fig = plt.figure()
fig.suptitle(r"\oops")
with pytest.raises(ValueError):
fig.savefig(BytesIO(), format="pgf")
fig = plt.figure() # start from scratch
fig.suptitle(r"this is ok")
fig.savefig(BytesIO(), format="pgf")
@needs_xelatex
def test_bbox_inches_tight():
fig, ax = plt.subplots()
ax.imshow([[0, 1], [2, 3]])
fig.savefig(BytesIO(), format="pdf", backend="pgf", bbox_inches="tight")
@needs_xelatex
@needs_ghostscript
def test_png():
# Just a smoketest.
fig, ax = plt.subplots()
fig.savefig(BytesIO(), format="png", backend="pgf")
@needs_xelatex
def test_unknown_font(caplog):
with caplog.at_level("WARNING"):
mpl.rcParams["font.family"] = "this-font-does-not-exist"
plt.figtext(.5, .5, "hello, world")
plt.savefig(BytesIO(), format="pgf")
assert "Ignoring unknown font: this-font-does-not-exist" in [
r.getMessage() for r in caplog.records]
@check_figures_equal(extensions=["pdf"])
@pytest.mark.parametrize("texsystem", ("pdflatex", "xelatex", "lualatex"))
@pytest.mark.backend("pgf")
def test_minus_signs_with_tex(fig_test, fig_ref, texsystem):
if not _check_for_pgf(texsystem):
pytest.skip(texsystem + ' + pgf is required')
mpl.rcParams["pgf.texsystem"] = texsystem
fig_test.text(.5, .5, "$-1$")
fig_ref.text(.5, .5, "$\N{MINUS SIGN}1$")
|
py | 1a47823a42cfc9d0acf882dc12eb144a80106f7e | #!/usr/bin/env python
"""Test the grr aff4 objects."""
import hashlib
import io
import time
from builtins import range # pylint: disable=redefined-builtin
import mock
from grr_response_core.lib import flags
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import cloud as rdf_cloud
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import events
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.flows.general import transfer
from grr.test_lib import action_mocks
from grr.test_lib import aff4_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class MockChangeEvent(events.EventListener):
EVENTS = ["MockChangeEvent"]
CHANGED_URNS = []
def ProcessMessages(self, msgs=None, token=None):
MockChangeEvent.CHANGED_URNS.extend(msgs)
class AFF4GRRTest(aff4_test_lib.AFF4ObjectTest):
"""Test the client aff4 implementation."""
def setUp(self):
super(AFF4GRRTest, self).setUp()
MockChangeEvent.CHANGED_URNS = []
def testAFF4Path(self):
"""Test the pathspec to URN conversion function."""
pathspec = rdf_paths.PathSpec(
path="\\\\.\\Volume{1234}\\",
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point="/c:/").Append(
path="/windows", pathtype=rdf_paths.PathSpec.PathType.TSK)
urn = pathspec.AFF4Path(rdf_client.ClientURN("C.1234567812345678"))
self.assertEqual(
urn,
rdfvalue.RDFURN(
r"aff4:/C.1234567812345678/fs/tsk/\\.\Volume{1234}\/windows"))
# Test an ADS
pathspec = rdf_paths.PathSpec(
path="\\\\.\\Volume{1234}\\",
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point="/c:/").Append(
pathtype=rdf_paths.PathSpec.PathType.TSK,
path="/Test Directory/notes.txt:ads",
inode=66,
ntfs_type=128,
ntfs_id=2)
urn = pathspec.AFF4Path(rdf_client.ClientURN("C.1234567812345678"))
self.assertEqual(
urn,
rdfvalue.RDFURN(r"aff4:/C.1234567812345678/fs/tsk/\\.\Volume{1234}\/"
"Test Directory/notes.txt:ads"))
def testClientSubfieldGet(self):
"""Test we can get subfields of the client."""
fd = aff4.FACTORY.Create(
"C.0000000000000000", aff4_grr.VFSGRRClient, token=self.token)
kb = fd.Schema.KNOWLEDGE_BASE()
for i in range(5):
kb.users.Append(rdf_client.User(username="user%s" % i))
fd.Set(kb)
fd.Close()
fd = aff4.FACTORY.Open(
"C.0000000000000000", aff4_grr.VFSGRRClient, token=self.token)
for i, user in enumerate(fd.Get(fd.Schema.KNOWLEDGE_BASE).users):
self.assertEqual(user.username, "user%s" % i)
def testVFSFileContentLastNotUpdated(self):
"""Make sure CONTENT_LAST does not update when only STAT is written.."""
path = "/C.12345/contentlastchecker"
timestamp = 1
with utils.Stubber(time, "time", lambda: timestamp):
fd = aff4.FACTORY.Create(
path, aff4_grr.VFSFile, mode="w", token=self.token)
timestamp += 1
fd.SetChunksize(10)
# Make lots of small writes - The length of this string and the chunk size
# are relative primes for worst case.
for i in range(100):
fd.Write("%s%08X\n" % ("Test", i))
# Flush after every write.
fd.Flush()
# And advance the time.
timestamp += 1
fd.Set(fd.Schema.STAT, rdf_client_fs.StatEntry())
fd.Close()
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
# Make sure the attribute was written when the write occured.
self.assertEqual(int(fd.GetContentAge()), 101000000)
# Write the stat (to be the same as before, but this still counts
# as a write).
fd.Set(fd.Schema.STAT, fd.Get(fd.Schema.STAT))
fd.Flush()
fd = aff4.FACTORY.Open(path, token=self.token)
# The age of the content should still be the same.
self.assertEqual(int(fd.GetContentAge()), 101000000)
def testVFSFileStartsOnlyOneMultiGetFileFlowOnUpdate(self):
"""File updates should only start one MultiGetFile at any point in time."""
client_id = self.SetupClient(0)
# We need to create a file path having a pathspec.
path = "fs/os/c/bin/bash"
with aff4.FACTORY.Create(
client_id.Add(path),
aff4_type=aff4_grr.VFSFile,
mode="rw",
token=self.token) as file_fd:
file_fd.Set(
file_fd.Schema.STAT,
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(path="/bin/bash", pathtype="OS")))
# Starts a MultiGetFile flow.
file_fd.Update()
# Check that there is exactly one flow on the client.
flows_fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(flows_fd.ListChildren())
self.assertEqual(len(flows), 1)
# The flow is the MultiGetFile flow holding the lock on the file.
flow_obj = aff4.FACTORY.Open(flows[0], token=self.token)
self.assertEqual(
flow_obj.Get(flow_obj.Schema.TYPE), transfer.MultiGetFile.__name__)
self.assertEqual(flow_obj.urn, file_fd.Get(file_fd.Schema.CONTENT_LOCK))
# Since there is already a running flow having the lock on the file,
# this call shouldn't do anything.
file_fd.Update()
# There should still be only one flow on the client.
flows_fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(flows_fd.ListChildren())
self.assertEqual(len(flows), 1)
def testVFSFileStartsNewMultiGetFileWhenLockingFlowHasFinished(self):
"""A new MultiFileGet can be started when the locking flow has finished."""
client_id = self.SetupClient(0)
path = "fs/os/c/bin/bash"
with aff4.FACTORY.Create(
client_id.Add(path),
aff4_type=aff4_grr.VFSFile,
mode="rw",
token=self.token) as file_fd:
file_fd.Set(
file_fd.Schema.STAT,
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(path="/bin/bash", pathtype="OS")))
# Starts a MultiGetFile flow.
first_update_flow_urn = file_fd.Update()
# Check that there is exactly one flow on the client.
flows_fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(flows_fd.ListChildren())
self.assertEqual(len(flows), 1)
# Finish the flow holding the lock.
client_mock = action_mocks.ActionMock()
flow_test_lib.TestFlowHelper(
flows[0], client_mock, client_id=client_id, token=self.token)
# The flow holding the lock has finished, so Update() should start a new
# flow.
second_update_flow_urn = file_fd.Update()
# There should be two flows now.
flows_fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)
flows = list(flows_fd.ListChildren())
self.assertEqual(len(flows), 2)
# Make sure that each Update() started a new flow and that the second flow
# is holding the lock.
self.assertNotEqual(first_update_flow_urn, second_update_flow_urn)
self.assertEqual(second_update_flow_urn,
file_fd.Get(file_fd.Schema.CONTENT_LOCK))
def testGetClientSummary(self):
hostname = "test"
system = "Linux"
os_release = "12.02"
kernel = "3.15-rc2"
fqdn = "test.test.com"
arch = "amd64"
install_time = rdfvalue.RDFDatetime.Now()
user = "testuser"
userobj = rdf_client.User(username=user)
interface = rdf_client_network.Interface(ifname="eth0")
google_cloud_instance = rdf_cloud.GoogleCloudInstance(
instance_id="1771384456894610289",
zone="projects/123456789733/zones/us-central1-a",
project_id="myproject",
unique_id="us-central1-a/myproject/1771384456894610289")
cloud_instance = rdf_cloud.CloudInstance(
cloud_type="GOOGLE", google=google_cloud_instance)
serial_number = "DSD33679FZ"
system_manufacturer = "Foobar Inc."
system_uuid = "C31292AD-6Z4F-55D8-28AC-EC1100E42222"
hwinfo = rdf_client.HardwareInfo(
serial_number=serial_number,
system_manufacturer=system_manufacturer,
system_uuid=system_uuid)
timestamp = 1
with utils.Stubber(time, "time", lambda: timestamp):
with aff4.FACTORY.Create(
"C.0000000000000000",
aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as fd:
kb = rdf_client.KnowledgeBase()
kb.users.Append(userobj)
empty_summary = fd.GetSummary()
self.assertEqual(empty_summary.client_id, "C.0000000000000000")
self.assertFalse(empty_summary.system_info.version)
self.assertEqual(empty_summary.timestamp.AsSecondsSinceEpoch(), 1)
# This will cause TYPE to be written with current time = 101 when the
# object is closed
timestamp += 100
fd.Set(fd.Schema.HOSTNAME(hostname))
fd.Set(fd.Schema.SYSTEM(system))
fd.Set(fd.Schema.OS_RELEASE(os_release))
fd.Set(fd.Schema.KERNEL(kernel))
fd.Set(fd.Schema.FQDN(fqdn))
fd.Set(fd.Schema.ARCH(arch))
fd.Set(fd.Schema.INSTALL_DATE(install_time))
fd.Set(fd.Schema.KNOWLEDGE_BASE(kb))
fd.Set(fd.Schema.USERNAMES(user))
fd.Set(fd.Schema.HARDWARE_INFO(hwinfo))
fd.Set(fd.Schema.INTERFACES([interface]))
fd.Set(fd.Schema.CLOUD_INSTANCE(cloud_instance))
with aff4.FACTORY.Open(
"C.0000000000000000",
aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as fd:
summary = fd.GetSummary()
self.assertEqual(summary.system_info.system, system)
self.assertEqual(summary.system_info.release, os_release)
self.assertEqual(summary.system_info.kernel, kernel)
self.assertEqual(summary.system_info.fqdn, fqdn)
self.assertEqual(summary.system_info.machine, arch)
self.assertEqual(summary.system_info.install_date, install_time)
self.assertItemsEqual(summary.users, [userobj])
self.assertItemsEqual(summary.interfaces, [interface])
self.assertFalse(summary.client_info)
self.assertEqual(summary.timestamp.AsSecondsSinceEpoch(), 101)
self.assertEqual(summary.cloud_type, "GOOGLE")
self.assertEqual(summary.cloud_instance_id,
"us-central1-a/myproject/1771384456894610289")
self.assertEqual(summary.serial_number, serial_number)
self.assertEqual(summary.system_manufacturer, system_manufacturer)
self.assertEqual(summary.system_uuid, system_uuid)
def StoreBlobStub(blob, token=None):
del token # Unused.
return hashlib.sha256(blob).hexdigest()
class BlobImageTest(aff4_test_lib.AFF4ObjectTest):
"""Tests for cron functionality."""
def testAppendContentError(self):
src_content = b"ABCD" * 10
src_fd = io.BytesIO(src_content)
dest_fd = aff4.FACTORY.Create(
aff4.ROOT_URN.Add("temp"),
aff4_grr.VFSBlobImage,
token=self.token,
mode="rw")
dest_fd.SetChunksize(7)
dest_fd.AppendContent(src_fd)
dest_fd.Seek(0)
self.assertEqual(dest_fd.Read(5000), src_content)
src_fd.seek(0)
self.assertRaises(IOError, dest_fd.AppendContent, src_fd)
def testAppendContent(self):
"""Test writing content where content length % chunksize == 0."""
src_content = b"ABCDEFG" * 10 # 10 chunksize blobs
src_fd = io.BytesIO(src_content)
dest_fd = aff4.FACTORY.Create(
aff4.ROOT_URN.Add("temp"),
aff4_grr.VFSBlobImage,
token=self.token,
mode="rw")
self.assertEqual(dest_fd.Get(dest_fd.Schema.HASHES), None)
dest_fd.SetChunksize(7)
dest_fd.AppendContent(src_fd)
self.assertEqual(int(dest_fd.Get(dest_fd.Schema.SIZE)), len(src_content))
self.assertTrue(dest_fd.Get(dest_fd.Schema.HASHES))
dest_fd.Seek(0)
self.assertEqual(dest_fd.Read(5000), src_content)
src_fd.seek(0)
dest_fd.AppendContent(src_fd)
self.assertEqual(dest_fd.size, 2 * len(src_content))
self.assertEqual(
int(dest_fd.Get(dest_fd.Schema.SIZE)), 2 * len(src_content))
dest_fd.Seek(0)
self.assertEqual(dest_fd.Read(5000), src_content + src_content)
def testMultiStreamStreamsSingleFileWithSingleChunk(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"123456789"))
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd]))
self.assertEqual(len(chunks_fds), 1)
self.assertEqual(chunks_fds[0][1], b"123456789")
self.assertIs(chunks_fds[0][0], fd)
def testMultiStreamStreamsSinglfeFileWithTwoChunks(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"123456789"))
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"abcd"))
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertEqual(len(chunks_fds), 2)
self.assertEqual(chunks_fds[0][1], b"123456789")
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"abcd")
self.assertIs(chunks_fds[1][0], fd2)
def testMultiStreamStreamsTwoFilesWithTwoChunksInEach(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"*" * 10 + b"123456789"))
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"*" * 10 + b"abcd"))
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertEqual(len(chunks_fds), 4)
self.assertEqual(chunks_fds[0][1], b"*" * 10)
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"123456789")
self.assertIs(chunks_fds[1][0], fd1)
self.assertEqual(chunks_fds[2][1], b"*" * 10)
self.assertIs(chunks_fds[2][0], fd2)
self.assertEqual(chunks_fds[3][1], b"abcd")
self.assertIs(chunks_fds[3][0], fd2)
def testMultiStreamReturnsExceptionIfChunkIsMissing(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
# Patching StoreBlob prevents the blobs from actually being written.
with mock.patch.object(
data_store.DB, "StoreBlob", side_effect=StoreBlobStub):
fd.AppendContent(io.BytesIO(b"123456789"))
fd.index.seek(0)
blob_id = fd.index.read(fd._HASH_SIZE).encode("hex")
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
returned_fd, _, e = list(aff4.AFF4Stream.MultiStream([fd]))[0]
self.assertNotEqual(e, None)
self.assertEqual(returned_fd, fd)
self.assertEqual(e.missing_chunks, [blob_id])
def testMultiStreamIgnoresTheFileIfAnyChunkIsMissingInReadAheadChunks(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"*" * 10))
# Patching StoreBlob prevents the blobs from actually being written.
with mock.patch.object(
data_store.DB, "StoreBlob", side_effect=StoreBlobStub):
fd.AppendContent(io.BytesIO(b"123456789"))
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
count = 0
for _, _, e in aff4.AFF4Stream.MultiStream([fd]):
if not e:
count += 1
self.assertEqual(count, 0)
@mock.patch.object(aff4_grr.VFSBlobImage, "MULTI_STREAM_CHUNKS_READ_AHEAD", 1)
def testMultiStreamTruncatesBigFileIfLastChunkIsMissing(self):
# If the file is split between 2 batches of chunks, and the missing
# chunk is in the second batch, the first batch will be succesfully
# yielded.
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
fd.AppendContent(io.BytesIO(b"*" * 10))
# Patching StoreBlob prevents the blobs from actually being written.
with mock.patch.object(
data_store.DB, "StoreBlob", side_effect=StoreBlobStub):
fd.AppendContent(io.BytesIO(b"123456789"))
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
content = []
error_detected = False
for fd, chunk, e in aff4.AFF4Stream.MultiStream([fd]):
if not e:
content.append(chunk)
else:
error_detected = True
self.assertEqual(content, [b"*" * 10])
self.assertTrue(error_detected)
@mock.patch.object(aff4_grr.VFSBlobImage, "MULTI_STREAM_CHUNKS_READ_AHEAD", 1)
def testMultiStreamSkipsBigFileIfFirstChunkIsMissing(self):
# If the file is split between 2 batches of chunks, and the missing
# chunk is in the first batch, the file will be skipped entirely.
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4_grr.VFSBlobImage, token=self.token) as fd:
fd.SetChunksize(10)
# Patching StoreBlob prevents the blobs from actually being written.
with mock.patch.object(
data_store.DB, "StoreBlob", side_effect=StoreBlobStub):
fd.AppendContent(io.BytesIO(b"*" * 10))
fd.AppendContent(io.BytesIO(b"123456789"))
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
count = 0
for _, _, e in aff4.AFF4Stream.MultiStream([fd]):
if not e:
count += 1
self.assertEqual(count, 0)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
py | 1a4782527a427aa5ae4ab8de0c9609c248dfc1b9 | from scramp.core import (
ScramClient, ScramException, ScramMechanism, make_channel_binding)
__all__ = [ScramClient, ScramMechanism, ScramException, make_channel_binding]
|
py | 1a47829cd021f190d63b50c9ad5c932ec28e3e86 | import setuptools
from src.ptth import __version__ as version
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="post-tonal-theory-helper-mbmasuda",
version=version,
author="Mari Masuda",
author_email="[email protected]",
description="Post-tonal music theory analysis functions",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mbmasuda/post-tonal-theory-helper",
packages=setuptools.find_packages('src'),
package_dir={'':'src'},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
py | 1a4784625b00055534d173b1c2b4d713e5baeb47 | """ AIPS STar table
Due to the funky nature of the AIPS STar table it cannot be made in the usual
Obit fashion. This class allows doing this from python.
Symbol type codes
1: Plus sign (default) 12: Five pointed star
2: Cross (X) 13: Star of David
3: Circle 14: Seven-pointed star
4: Box 15: Eight-pointed star
5: Triangle 16: Nine-pointed star
6: Diamond 17: Ten-pointed star
7: Pentagon 18: 11-pointed star
8: Hexagon 19: 12-pointed star
9: Septagon 20: 13-pointed star
10: Octagon 21: 14-pointed star
11: Nine-gon 22: Plus with gap
23: Vertical line
24: Cross (X) with gap
"""
# $Id$
#-----------------------------------------------------------------------
# Copyright (C) 2007,2019
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Massachusetts Ave, Cambridge,
# MA 02139, USA.
#
# Correspondence concerning this software should be addressed as follows:
# Internet email: [email protected].
# Postal address: William Cotton
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#-----------------------------------------------------------------------
from __future__ import absolute_import
import Obit, Table, TableDesc, OErr, Image, ImageDesc
class TableSTar(Table.Table):
pass
# end class TableSTar
# Data type codes
OBIT_double = 10
OBIT_float = 9
OBIT_string = 13
OBIT_int = 2
# Non class functions
def PCreate(im, err, ver=0):
"""
New AIPS STars table
Create a ST table on input image im
im = Obit Image on which to attach ST Table
err = Python Obit Error/message stack
ver = version, 0=> new
"""
################################################################
# Check
if not im.ImageIsA():
raise TypeError('im MUST be a Python Obit Image')
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
if err.isErr: # existing error?
return None
# Get image descriptor
id = im.Desc.Dict
# Set descriptor dict
dd = {"FieldName":[id["ctype"][0].strip(), id["ctype"][1].strip(), "MAJOR AX", "MINOR AX", \
'POSANG', 'STARTYPE', 'LABEL', \
"_status"], \
"FieldUnit":["DEGREES", "DEGREES", "DEGREES", "DEGREES", \
"DEGREES", "INDEX ", " ", " "], \
"repeat":[1,1,1,1,1,1,24,1], \
"dim0":[1,1,1,1,1,1,24,1], \
"dim1":[1,1,1,1,1,1,1,1], \
"dim2":[1,1,1,1,1,1,1,1], \
"type":[OBIT_double,OBIT_double,OBIT_float,OBIT_float,OBIT_float,OBIT_float,\
OBIT_string,OBIT_int], \
"sortOrder1":0, "sortOrder2":0, "Table name":"AIPS ST", "version":1 \
}
# Table descriptor
tabDesc = TableDesc.PDef(dd)
# Table
st = im.NewTable(Table.WRITEONLY,"AIPS ST",ver,err)
Obit.TableSetDesc(st.me, tabDesc.me)
# Instantiate
Table.PFullInstantiate(st, Table.WRITEONLY, err)
return st
# end PCreate
def newRow (im):
""" Create new row structure for writing ST Table
im = Obit Image on which to attach ST Table
returns row:
Position columns have labelws of first two axes of image
(e.g. 'RA---SIN', 'DEC--SIN')
'MAJOR AX' major axis of symbol
'MINOR AX Minor axis of symbol (deg)
'POSANG' Position angle in deg
'STARTYPE' symbol code
1: Plus sign (default) 12: Five pointed star
2: Cross (X) 13: Star of David
3: Circle 14: Seven-pointed star
4: Box 15: Eight-pointed star
5: Triangle 16: Nine-pointed star
6: Diamond 17: Ten-pointed star
7: Pentagon 18: 11-pointed star
8: Hexagon 19: 12-pointed star
9: Septagon 20: 13-pointed star
10: Octagon 21: 14-pointed star
11: Nine-gon 22: Plus with gap
23: Vertical line
24: Cross (X) with gap
'LABEL' Label string for symbol, up to 24 char.
"""
# Get image descriptor
id = im.Desc.Dict
out = {id["ctype"][0].strip():[0.0], id["ctype"][1].strip():[0.0], \
'MINOR AX': [0.0], 'MAJOR AX': [0.0], 'POSANG': [0.0], 'STARTYPE':[3.0], \
'LABEL': [' '], \
'NumFields': 8, 'Table name': 'AIPS ST', '_status': [0]}
return out
# end newRow
def PWriteCirc (sttab, im, center, radius, err):
""" Write an entry for drawing a circle
sttab = Python Table object, must be open with write enabled
im = Obit Image on which to attach ST Table
center = [x,y] pixels
radius = radius in pixels
err = Python Obit Error/message stack
"""
################################################################
# Check
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
if err.isErr: # existing error?
return None
# Get image descriptor
id = im.Desc.Dict
# Get row
row = newRow(im)
# Convert pixels to positions
pos = ImageDesc.PGetPos(im.Desc, center, err)
if err.isErr:
printErrMsg(err, "Error converting pixel location to position")
row[id["ctype"][0].strip()] = [pos[0]]
row[id["ctype"][1].strip()] = [pos[1]]
row['MAJOR AX'] = [radius * abs(id["cdelt"][0])]
row['MINOR AX'] = row['MAJOR AX']
row['POSANG'] = [0.0]
row['STARTYPE'] = [3.0]
row['LABEL'] = [" "]
# Write
sttab.WriteRow(-1,row, err)
if err.isErr:
printErrMsg(err, "Error Writing ST table")
# end PWriteCirc
def PWriteEllipse (sttab, im, center, major, minor, PA, err):
""" Write an entry for drawing a circle
sttab = Python Table object, must be open with write enabled
im = Obit Image on which to attach ST Table
center = [x,y] pixels
major = major axis size in pixels
minor = minor axis size in pixels
PA = position angle (from N thru E in deg)
err = Python Obit Error/message stack
"""
################################################################
# Check
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
if err.isErr: # existing error?
return None
# Get image descriptor
id = im.Desc.Dict
# Get row
row = newRow(im)
# Convert pixels to positions
pos = ImageDesc.PGetPos(im.Desc, center, err)
if err.isErr:
printErrMsg(err, "Error converting pixel location to position")
row[id["ctype"][0].strip()] = [pos[0]]
row[id["ctype"][1].strip()] = [pos[1]]
row['MAJOR AX'] = [major * abs(id["cdelt"][0])]
row['MINOR AX'] = [minor * abs(id["cdelt"][0])]
row['POSANG'] = [PA]
row['STARTYPE'] = [3.0]
row['LABEL'] = [" "]
# Write
sttab.WriteRow(-1,row, err)
if err.isErr:
printErrMsg(err, "Error Writing ST table")
# end PWriteEllipse
|
py | 1a478677a323fdcab1fd8455c94caa25d9927251 | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^cms/', include('cms.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Admin URLs
(r'^admin/filebrowser/', include('filebrowser.urls')),
(r'^grappelli/', include('grappelli.urls')),
#(r'^tinymce/', include('tinymce.urls')),
(r'^admin/(.*)', admin.site.root),
# cms URLs
(r'^/?$', 'django.views.generic.simple.redirect_to', { 'url': 'weblog/' } ),
(r'^search/$', 'cms.search.views.search'),
# snakelog URLs
(r'^weblog/categories/', include('snakelog.urls.categories')),
(r'^weblog/links/', include('snakelog.urls.links')),
(r'^weblog/tags/', include('snakelog.urls.tags')),
(r'^weblog/', include('snakelog.urls.entries')),
# Comment URLS
(r'^comments/', include('django.contrib.comments.urls')),
# Last catch all for flatpages
(r'', include('django.contrib.flatpages.urls')),
)
|
py | 1a478714eb3332f7af62dacd8e2615f00a34ae9c | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import textwrap
from builtins import map
from pants_test.contrib.python.checks.tasks.checkstyle.plugin_test_base import \
CheckstylePluginTestBase
from pants.contrib.python.checks.tasks.checkstyle.common import Nit
from pants.contrib.python.checks.tasks.checkstyle.import_order import ImportOrder, ImportType
IMPORT_CHUNKS = {
ImportType.STDLIB: """
import ast
from collections import namedtuple
import io
""",
ImportType.TWITTER: """
from twitter.common import app
from twitter.common.dirutil import (
safe_mkdtemp,
safe_open,
safe_rmtree)
""",
ImportType.GEN: """
from gen.twitter.aurora.ttypes import TwitterTaskInfo
""",
ImportType.PACKAGE: """
from .import_order import (
ImportOrder,
ImportType
)
""",
ImportType.THIRD_PARTY: """
from kazoo.client import KazooClient
import zookeeper
""",
}
def strip_newline(stmt):
return textwrap.dedent('\n'.join(_f for _f in stmt.splitlines() if _f))
def stitch_chunks(newlines, *chunks):
return ('\n' * newlines).join([strip_newline(IMPORT_CHUNKS.get(c)) for c in chunks])
class ImportOrderTest(CheckstylePluginTestBase):
plugin_type = ImportOrder
def get_import_chunk_types(self, import_type):
chunks = list(self.get_plugin(IMPORT_CHUNKS[import_type]).iter_import_chunks())
self.assertEqual(1, len(chunks))
return tuple(map(type, chunks[0]))
def test_classify_import_chunks(self):
self.assertEqual((ast.Import, ast.ImportFrom, ast.Import),
self.get_import_chunk_types(ImportType.STDLIB))
self.assertEqual((ast.ImportFrom, ast.ImportFrom),
self.get_import_chunk_types(ImportType.TWITTER))
self.assertEqual((ast.ImportFrom,),
self.get_import_chunk_types(ImportType.GEN))
self.assertEqual((ast.ImportFrom,),
self.get_import_chunk_types(ImportType.PACKAGE))
self.assertEqual((ast.ImportFrom, ast.Import),
self.get_import_chunk_types(ImportType.THIRD_PARTY))
def test_classify_import(self):
for import_type, chunk in IMPORT_CHUNKS.items():
io = self.get_plugin(chunk)
import_chunks = list(io.iter_import_chunks())
self.assertEqual(1, len(import_chunks))
module_types, chunk_errors = io.classify_imports(import_chunks[0])
self.assertEqual(1, len(module_types))
self.assertEqual(import_type, module_types.pop())
self.assertEqual([], chunk_errors)
PAIRS = (
(ImportType.STDLIB, ImportType.TWITTER),
(ImportType.TWITTER, ImportType.GEN),
(ImportType.PACKAGE, ImportType.THIRD_PARTY),
)
def test_pairwise_classify(self):
for first, second in self.PAIRS:
io = self.get_plugin(stitch_chunks(1, first, second))
import_chunks = list(io.iter_import_chunks())
self.assertEqual(2, len(import_chunks))
module_types, chunk_errors = io.classify_imports(import_chunks[0])
self.assertEqual(1, len(module_types))
self.assertEqual(0, len(chunk_errors))
self.assertEqual(first, module_types.pop())
module_types, chunk_errors = io.classify_imports(import_chunks[1])
self.assertEqual(1, len(module_types))
self.assertEqual(0, len(chunk_errors))
self.assertEqual(second, module_types.pop())
for second, first in self.PAIRS:
io = self.get_plugin(stitch_chunks(1, first, second))
import_chunks = list(io.iter_import_chunks())
self.assertEqual(2, len(import_chunks))
nits = list(io.nits())
self.assertEqual(1, len(nits))
self.assertEqual('T406', nits[0].code)
self.assertEqual(Nit.ERROR, nits[0].severity)
def test_multiple_imports_error(self):
io = self.get_plugin(stitch_chunks(0, ImportType.STDLIB, ImportType.TWITTER))
import_chunks = list(io.iter_import_chunks())
self.assertEqual(1, len(import_chunks))
module_types, chunk_errors = io.classify_imports(import_chunks[0])
self.assertEqual(1, len(chunk_errors))
self.assertEqual('T405', chunk_errors[0].code)
self.assertEqual(Nit.ERROR, chunk_errors[0].severity)
self.assertEqual(sorted([ImportType.STDLIB, ImportType.TWITTER]), sorted(module_types))
io = self.get_plugin("""
import io, pkg_resources
""")
import_chunks = list(io.iter_import_chunks())
self.assertEqual(1, len(import_chunks))
module_types, chunk_errors = io.classify_imports(import_chunks[0])
self.assertEqual(3, len(chunk_errors))
self.assertEqual(sorted(['T403', 'T405', 'T402']),
sorted([chunk_error.code for chunk_error in chunk_errors]))
self.assertEqual(sorted([ImportType.STDLIB, ImportType.THIRD_PARTY]), sorted(module_types))
def test_import_lexical_order(self):
imp = """
from twitter.common.dirutil import safe_rmtree, safe_mkdtemp
"""
self.assertNit(imp, 'T401')
def test_import_wildcard(self):
imp = """
from twitter.common.dirutil import *
"""
self.assertNit(imp, 'T400')
|
py | 1a47878212b3595ff270c00b293e089293af5bc5 | # xxxswf.py was created by alexander dot hanel at gmail dot com
# version 0.1
# Date - 12-07-2011
# To do list
# - Tag Parser
# - ActionScript Decompiler
import fnmatch
import hashlib
import imp
import math
import os
import re
import struct
import sys
import time
from StringIO import StringIO
from optparse import OptionParser
import zlib
def checkMD5(md5):
# checks if MD5 has been seen in MD5 Dictionary
# MD5Dict contains the MD5 and the CVE
# For { 'MD5':'CVE', 'MD5-1':'CVE-1', 'MD5-2':'CVE-2'}
MD5Dict = {'c46299a5015c6d31ad5766cb49e4ab4b':'CVE-XXXX-XXXX'}
if MD5Dict.get(md5):
print '\t[BAD] MD5 Match on', MD5Dict.get(md5)
return
def bad(f):
for idx, x in enumerate(findSWF(f)):
tmp = verifySWF(f,x)
if tmp != None:
yaraScan(tmp)
checkMD5(hashBuff(tmp))
return
def yaraScan(d):
# d = buffer of the read file
# Scans SWF using Yara
# test if yara module is installed
# if not Yara can be downloaded from http://code.google.com/p/yara-project/
try:
imp.find_module('yara')
import yara
except ImportError:
print '\t[ERROR] Yara module not installed - aborting scan'
return
# test for yara compile errors
try:
r = yara.compile(r'rules.yar')
except:
pass
print '\t[ERROR] Yara compile error - aborting scan'
return
# get matches
m = r.match(data=d)
# print matches
for X in m:
print '\t[BAD] Yara Signature Hit:', X
return
def findSWF(d):
# d = buffer of the read file
# Search for SWF Header Sigs in files
return [tmp.start() for tmp in re.finditer('CWS|FWS', d.read())]
def hashBuff(d):
# d = buffer of the read file
# This function hashes the buffer
# source: http://stackoverflow.com/q/5853830
if type(d) is str:
d = StringIO(d)
md5 = hashlib.md5()
while True:
data = d.read(128)
if not data:
break
md5.update(data)
return md5.hexdigest()
def verifySWF(f,addr):
# Start of SWF
f.seek(addr)
# Read Header
header = f.read(3)
# Read Version
ver = struct.unpack('<b', f.read(1))[0]
# Read SWF Size
size = struct.unpack('<i', f.read(4))[0]
# Start of SWF
f.seek(addr)
try:
# Read SWF into buffer. If compressed read uncompressed size.
t = f.read(size)
except:
pass
# Error check for invalid SWF
print ' - [ERROR] Invalid SWF Size'
return None
if type(t) is str:
f = StringIO(t)
# Error check for version above 20
if ver > 20:
print ' - [ERROR] Invalid SWF Version'
return None
if 'CWS' in header:
try:
f.read(3)
tmp = 'FWS' + f.read(5) + zlib.decompress(f.read())
print ' - CWS Header'
return tmp
except:
pass
print '- [ERROR]: Zlib decompression error. Invalid CWS SWF'
return None
elif 'FWS' in header:
try:
tmp = f.read(size)
print ' - FWS Header'
return tmp
except:
pass
print ' - [ERROR] Invalid SWF Size'
return None
else:
print ' - [Error] Logic Error Blame Programmer'
return None
def headerInfo(f):
# f is the already opended file handle
# Yes, the format is is a rip off SWFDump. Can you blame me? Their tool is awesome.
# SWFDump FORMAT
# [HEADER] File version: 8
# [HEADER] File is zlib compressed. Ratio: 52%
# [HEADER] File size: 37536
# [HEADER] Frame rate: 18.000000
# [HEADER] Frame count: 323
# [HEADER] Movie width: 217.00
# [HEADER] Movie height: 85.00
if type(f) is str:
f = StringIO(f)
sig = f.read(3)
print '\t[HEADER] File header:', sig
if 'C' in sig:
print '\t[HEADER] File is zlib compressed.'
version = struct.unpack('<b', f.read(1))[0]
print '\t[HEADER] File version:', version
size = struct.unpack('<i', f.read(4))[0]
print '\t[HEADER] File size:', size
# deflate compressed SWF
if 'C' in sig:
f = verifySWF(f,0)
if type(f) is str:
f = StringIO(f)
f.seek(0, 0)
x = f.read(8)
ta = f.tell()
tmp = struct.unpack('<b', f.read(1))[0]
nbit = tmp >> 3
print '\t[HEADER] Rect Nbit:', nbit
# Curretely the nbit is static at 15. This could be modified in the
# future. If larger than 9 this will break the struct unpack. Will have
# to revist must be a more effective way to deal with bits. Tried to keep
# the algo but damn this is ugly...
f.seek(ta)
rect = struct.unpack('>Q', f.read(int(math.ceil((nbit*4)/8.0))))[0]
tmp = struct.unpack('<b', f.read(1))[0]
tmp = bin(tmp>>7)[2:].zfill(1)
# bin requires Python 2.6 or higher
# skips string '0b' and the nbit
rect = bin(rect)[7:]
xmin = int(rect[0:nbit-1],2)
print '\t[HEADER] Rect Xmin:', xmin
xmax = int(rect[nbit:(nbit*2)-1],2)
print '\t[HEADER] Rect Xmax:', xmax
ymin = int(rect[nbit*2:(nbit*3)-1],2)
print '\t[HEADER] Rect Ymin:', ymin
# one bit needs to be added, my math might be off here
ymax = int(rect[nbit*3:(nbit*4)-1] + str(tmp) ,2)
print '\t[HEADER] Rect Ymax:', ymax
framerate = struct.unpack('<H', f.read(2))[0]
print '\t[HEADER] Frame Rate:', framerate
framecount = struct.unpack('<H', f.read(2))[0]
print '\t[HEADER] Frame Count:', framecount
def walk4SWF(path):
# returns a list of [folder-path, [addr1,addrw2]]
# Don't ask, will come back to this code.
p = ['',[]]
r = p*0
if os.path.isdir(path) != True and path != '':
print '\t[ERROR] walk4SWF path must be a dir.'
return
for root, dirs, files in os.walk(path):
for name in files:
try:
x = open(os.path.join(root, name), 'rb')
except:
pass
break
y = findSWF(x)
if len(y) != 0:
# Path of file SWF
p[0] = os.path.join(root, name)
# contains list of the file offset of SWF header
p[1] = y
r.insert(len(r),p)
p = ['',[]]
y = ''
x.close()
return r
def tagsInfo(f):
return
def fileExist(n, ext):
# Checks the working dir to see if the file is
# already in the dir. If exists the file will
# be named name.count.ext (n.c.ext). No more than
# 50 matching MD5s will be written to the dir.
if os.path.exists( n + '.' + ext):
c = 2
while os.path.exists(n + '.' + str(c) + '.' + ext):
c = c + 1
if c == 50:
print '\t[ERROR] Skipped 50 Matching MD5 SWFs'
break
n = n + '.' + str(c)
return n + '.' + ext
def CWSize(f):
# The file size in the header is of the uncompressed SWF.
# To estimate the size of the compressed data, we can grab
# the length, read that amount, deflate the data, then
# compress the data again, and then call len(). This will
# give us the length of the compressed SWF.
return
def compressSWF(f):
if type(f) is str:
f = StringIO(f)
try:
f.read(3)
tmp = 'CWS' + f.read(5) + zlib.compress(f.read())
return tmp
except:
pass
print '\t[ERROR] SWF Zlib Compression Failed'
return None
def disneyland(f,filename, options):
# because this is where the magic happens
# but seriously I did the recursion part last..
retfindSWF = findSWF(f)
f.seek(0)
print '\n[SUMMARY] %d SWF(s) in MD5:%s:%s' % ( len(retfindSWF),hashBuff(f), filename )
# for each SWF in file
for idx, x in enumerate(retfindSWF):
print '\t[ADDR] SWF %d at %s' % (idx+1, hex(x)),
f.seek(x)
h = f.read(1)
f.seek(x)
swf = verifySWF(f,x)
if swf == None:
continue
if options.extract != None:
name = fileExist(hashBuff(swf), 'swf')
print '\t\t[FILE] Carved SWF MD5: %s' % name
try:
o = open(name, 'wb+')
except IOError, e:
print '\t[ERROR] Could Not Create %s ' % e
continue
o.write(swf)
o.close()
if options.yara != None:
yaraScan(swf)
if options.md5scan != None:
checkMD5(hashBuff(swf))
if options.decompress != None:
name = fileExist(hashBuff(swf), 'swf')
print '\t\t[FILE] Carved SWF MD5: %s' % name
try:
o = open(name, 'wb+')
except IOError, e:
print '\t[ERROR] Could Not Create %s ' % e
continue
o.write(swf)
o.close()
if options.header != None:
headerInfo(swf)
if options.compress != None:
swf = compressSWF(swf)
if swf == None:
continue
name = fileExist(hashBuff(swf), 'swf')
print '\t\t[FILE] Compressed SWF MD5: %s' % name
try:
o = open(name, 'wb+')
except IOError, e:
print '\t[ERROR] Could Not Create %s ' % e
continue
o.write(swf)
o.close()
def main():
# Scenarios:
# Scan file for SWF(s)
# Scan file for SWF(s) and extract them
# Scan file for SWF(s) and scan them with Yara
# Scan file for SWF(s), extract them and scan with Yara
# Scan directory recursively for files that contain SWF(s)
# Scan directory recursively for files that contain SWF(s) and extract them
parser = OptionParser()
usage = 'usage: %prog [options] <file.bad>'
parser = OptionParser(usage=usage)
parser.add_option('-x', '--extract', action='store_true', dest='extract', help='Extracts the embedded SWF(s), names it MD5HASH.swf & saves it in the working dir. No addition args needed')
parser.add_option('-y', '--yara', action='store_true', dest='yara', help='Scans the SWF(s) with yara. If the SWF(s) is compressed it will be deflated. No addition args needed')
parser.add_option('-s', '--md5scan', action='store_true', dest='md5scan', help='Scans the SWF(s) for MD5 signatures. Please see func checkMD5 to define hashes. No addition args needed')
parser.add_option('-H', '--header', action='store_true', dest='header', help='Displays the SWFs file header. No addition args needed')
parser.add_option('-d', '--decompress', action='store_true', dest='decompress', help='Deflates compressed SWFS(s)')
parser.add_option('-r', '--recdir', dest='PATH', type='string', help='Will recursively scan a directory for files that contain SWFs. Must provide path in quotes')
parser.add_option('-c', '--compress', action='store_true', dest='compress', help='Compresses the SWF using Zlib')
(options, args) = parser.parse_args()
# Print help if no argurments are passed
if len(sys.argv) < 2:
parser.print_help()
return
# Note files can't start with '-'
if '-' in sys.argv[len(sys.argv)-1][0] and options.PATH == None:
parser.print_help()
return
# Recusive Search
if options.PATH != None:
paths = walk4SWF(options.PATH)
for y in paths:
#if sys.argv[0] not in y[0]:
try:
t = open(y[0], 'rb+')
disneyland(t, y[0],options)
except IOError:
pass
return
# try to open file
try:
f = open(sys.argv[len(sys.argv)-1],'rb+')
filename = sys.argv[len(sys.argv)-1]
except Exception:
print '[ERROR] File can not be opended/accessed'
return
disneyland(f,filename,options)
f.close()
return
if __name__ == '__main__':
main()
|
py | 1a4787a051d6a7ee27d68ad43b461a3379a49f17 | import numpy as np
def value_iteration(env, gamma, theta, max_iterations, value=None):
if value is None:
value = np.zeros(env.n_states)
else:
value = np.array(value, dtype=np.float)
for _ in range(max_iterations):
delta = 0.
for s in range(env.n_states):
v = value[s]
value[s] = max([sum([env.p(next_s, s, a) * (env.r(next_s, s, a) + gamma * value[next_s]) for next_s in range(env.n_states)]) for a in range(env.n_actions)])
delta = max(delta, np.abs(v - value[s]))
if delta < theta:
break
policy = np.zeros(env.n_states, dtype=int)
for s in range(env.n_states):
policy[s] = np.argmax([sum([env.p(next_s, s, a) * (env.r(next_s, s, a) + gamma * value[next_s]) for next_s in range(env.n_states)]) for a in range(env.n_actions)])
return policy, value
def policy_evaluation(env, policy, gamma, theta, max_iterations):
value = np.zeros(env.n_states, dtype=np.float)
for _ in range(max_iterations):
delta = 0
for s in range(env.n_states):
v = value[s]
value[s] = sum([env.p(next_s, s, policy[s]) * (env.r(next_s, s, policy[s]) + gamma * value[next_s]) for next_s in range(env.n_states)])
delta = max(delta, abs(v - value[s]))
if delta < theta:
break
return value
|
py | 1a47887373361eced83e41a9c65bc654a9d0badf | import csv
from django.http import HttpResponse
class ExportCsvMixin:
def export_as_csv(self, request, queryset):
meta = self.model._meta
field_names = [field.name for field in meta.fields]
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={}.csv".format(meta)
writer = csv.writer(response)
writer.writerow(field_names)
# for obj in queryset:
# row = writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export to csv"
def all_complete(self, request, queryset):
self.model.objects.all().update(completed=True)
self.message_user(request, "All task are set as completed now")
def all_not_complete(self, request, queryset):
self.model.objects.all().update(completed=False)
self.message_user(request, "All task are set as uncompleted now")
|
py | 1a47887e5923218ec7a6c9416bff9e95ab2c62ef | # -*- coding: utf-8 -*-
'''
The music21 Framework is Copyright © 2006-2015 Michael Scott Cuthbert
and the music21 Project
(Michael Scott Cuthbert, principal investigator; [email protected])
Some Rights Reserved
Released under the Lesser GNU Public License (LGPL) or the BSD (3-clause) license.
See license.txt file for the full license which represents your legal
obligations in using, modifying, or distributing music21.
Roughly speaking, this means that anyone can use this software for
free, they can distribute it to anyone, so long as this acknowledgment
of copyright and ownership remain publicly accessible. You may also
modify this software or use it in your own programs so long as you do
so long as you make your product available
under the same license. You may also link to this code as a library
from your sold, proprietary commercial product so long as this code
remains open and accessible, this license is made accessible,
and the developers are credited.
The development of music21 was supported by grants
from the Seaver Institute and the NEH/Digging into Data Challenge,
with the support of the MIT
Music and Theater Arts section and the School of Humanities, Arts,
and Social Sciences. Portions of music21 were originally part of
the PMusic (Perl) library, developed by Cuthbert prior to arriving at MIT.
music21 outputs a subset of XML data defined by the MusicXML 2.0
standard, Copyright © Recordare LLC; License available at
http://www.recordare.com/dtds/license.html, now transferred to MakeMusic
music21 incorporates Microsoft Excel reading via the included
xlrd library:
Portions copyright (c) 2005-2006, Stephen John Machin, Lingfo Pty Ltd
All rights reserved.
see ext/xlrd/licenses.py for the complete disclaimer and conditions
Files in the ext/ folder are not copyright music21 Project but whose distribution
is compatible with music21. The corpus files have copyrights retained by their
owners who have allowed them to be included with music21.
'''
# this defines what is loaded when importing __all__
# put these in alphabetical order FIRST dirs then modules
# but: base must come first; in some cases other modules depend on
# definitions in base
__all__ = [
'base',
'sites', # important
# sub folders
'abcFormat',
'analysis',
'audioSearch',
'braille',
'capella',
'composition',
'counterpoint',
'corpus',
'demos',
'features',
'figuredBass',
'humdrum',
'ipython21',
'languageExcerpts',
'lily',
'mei',
'midi',
'musedata',
'musicxml',
'noteworthy',
'omr',
'romanText',
'scala',
'search',
'test',
'theoryAnalysis',
'timespans',
'trecento',
'vexflow',
'webapps',
# individual modules
# KEEP ALPHABETICAL unless necessary for load reasons, if so
# put a note. Keep one letter per line.
'articulations',
'bar',
# base listed above
'beam',
'chant',
'chord',
'chordTables',
'clef',
'common',
'configure',
'contour',
'converter',
'defaults',
'derivation',
'duration',
'dynamics',
'editorial',
'environment',
'exceptions21',
'expressions',
'freezeThaw',
'graph',
'harmony',
'instrument',
'interval',
'intervalNetwork',
'key',
'layout',
'medren',
'metadata',
'meter',
'note',
'pitch',
'repeat',
'roman',
'scale',
'serial',
'sieve',
'spanner',
'stream',
'tempo',
'text',
'tie',
'tinyNotation',
'variant',
'voiceLeading',
'volume',
'xmlnode',
]
#__all__.reverse()
#print __all__
# skipped purposely, "base", "xmlnode"
#-------------------------------------------------------------------------------
# for sub packages, need to manually add the modules in these subpackages
#from music21.analysis import *
#import sys
#x = sys.stdout
#-------------------------------------------------------------------------------
# base Music21Object -- all objects should inherit from this!
from music21 import base
from music21.base import VERSION
from music21.base import VERSION_STR
from music21.base import VERSION_STR as __version__
from music21.base import Music21Exception
from music21.base import SitesException
from music21.base import Music21ObjectException
from music21.base import ElementException
from music21.base import Groups
from music21.base import SiteRef
from music21.base import Sites
from music21.base import Music21Object
from music21.base import ElementWrapper
from music21.base import mainTest
from music21.base import *
#del(types)
#del(sys)
#del(imp)
#del(doctest)
#del(copy)
#del(codecs)
#del(unittest)
#-------------------------------------------------------------------------------
# place the parse function directly in the music21 namespace
# this cannot go in music21/base.py
#import converter
#parse = converter.parse
#------------------------------------------------------------------------------
# this bring all of the __all__ names into the music21 package namespace
from music21 import * # @UnresolvedImport
#------------------------------------------------------------------------------
# eof
|
py | 1a478917be8677edb26d739e94ac42248318f842 | import warnings
import pytest
import flask
from flask.sessions import SecureCookieSessionInterface
from flask.sessions import SessionInterface
try:
from greenlet import greenlet
except ImportError:
greenlet = None
def test_teardown_on_pop(app):
buffer = []
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
assert buffer == []
ctx.pop()
assert buffer == [None]
def test_teardown_with_previous_exception(app):
buffer = []
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
try:
raise Exception("dummy")
except Exception:
pass
with app.test_request_context():
assert buffer == []
assert buffer == [None]
def test_teardown_with_handled_exception(app):
buffer = []
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
with app.test_request_context():
assert buffer == []
try:
raise Exception("dummy")
except Exception:
pass
assert buffer == [None]
def test_proper_test_request_context(app):
app.config.update(SERVER_NAME="localhost.localdomain:5000")
@app.route("/")
def index():
return None
@app.route("/", subdomain="foo")
def sub():
return None
with app.test_request_context("/"):
assert (
flask.url_for("index", _external=True)
== "http://localhost.localdomain:5000/"
)
with app.test_request_context("/"):
assert (
flask.url_for("sub", _external=True)
== "http://foo.localhost.localdomain:5000/"
)
# suppress Werkzeug 0.15 warning about name mismatch
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Current server name", UserWarning, "flask.app"
)
with app.test_request_context(
"/", environ_overrides={"HTTP_HOST": "localhost"}
):
pass
app.config.update(SERVER_NAME="localhost")
with app.test_request_context("/", environ_overrides={"SERVER_NAME": "localhost"}):
pass
app.config.update(SERVER_NAME="localhost:80")
with app.test_request_context(
"/", environ_overrides={"SERVER_NAME": "localhost:80"}
):
pass
def test_context_binding(app):
@app.route("/")
def index():
return f"Hello {flask.request.args['name']}!"
@app.route("/meh")
def meh():
return flask.request.url
with app.test_request_context("/?name=World"):
assert index() == "Hello World!"
with app.test_request_context("/meh"):
assert meh() == "http://localhost/meh"
assert flask._request_ctx_stack.top is None
def test_context_test(app):
assert not flask.request
assert not flask.has_request_context()
ctx = app.test_request_context()
ctx.push()
try:
assert flask.request
assert flask.has_request_context()
finally:
ctx.pop()
def test_manual_context_binding(app):
@app.route("/")
def index():
return f"Hello {flask.request.args['name']}!"
ctx = app.test_request_context("/?name=World")
ctx.push()
assert index() == "Hello World!"
ctx.pop()
with pytest.raises(RuntimeError):
index()
@pytest.mark.skipif(greenlet is None, reason="greenlet not installed")
class TestGreenletContextCopying:
def test_greenlet_context_copying(self, app, client):
greenlets = []
@app.route("/")
def index():
flask.session["fizz"] = "buzz"
reqctx = flask._request_ctx_stack.top.copy()
def g():
assert not flask.request
assert not flask.current_app
with reqctx:
assert flask.request
assert flask.current_app == app
assert flask.request.path == "/"
assert flask.request.args["foo"] == "bar"
assert flask.session.get("fizz") == "buzz"
assert not flask.request
return 42
greenlets.append(greenlet(g))
return "Hello World!"
rv = client.get("/?foo=bar")
assert rv.data == b"Hello World!"
result = greenlets[0].run()
assert result == 42
def test_greenlet_context_copying_api(self, app, client):
greenlets = []
@app.route("/")
def index():
flask.session["fizz"] = "buzz"
@flask.copy_current_request_context
def g():
assert flask.request
assert flask.current_app == app
assert flask.request.path == "/"
assert flask.request.args["foo"] == "bar"
assert flask.session.get("fizz") == "buzz"
return 42
greenlets.append(greenlet(g))
return "Hello World!"
rv = client.get("/?foo=bar")
assert rv.data == b"Hello World!"
result = greenlets[0].run()
assert result == 42
def test_session_error_pops_context():
class SessionError(Exception):
pass
class FailingSessionInterface(SessionInterface):
def open_session(self, app, request):
raise SessionError()
class CustomFlask(flask.Flask):
session_interface = FailingSessionInterface()
app = CustomFlask(__name__)
@app.route("/")
def index():
# shouldn't get here
AssertionError()
response = app.test_client().get("/")
assert response.status_code == 500
assert not flask.request
assert not flask.current_app
def test_session_dynamic_cookie_name():
# This session interface will use a cookie with a different name if the
# requested url ends with the string "dynamic_cookie"
class PathAwareSessionInterface(SecureCookieSessionInterface):
def get_cookie_name(self, app):
if flask.request.url.endswith("dynamic_cookie"):
return "dynamic_cookie_name"
else:
return super().get_cookie_name(app)
class CustomFlask(flask.Flask):
session_interface = PathAwareSessionInterface()
app = CustomFlask(__name__)
app.secret_key = "secret_key"
@app.route("/set", methods=["POST"])
def set():
flask.session["value"] = flask.request.form["value"]
return "value set"
@app.route("/get")
def get():
v = flask.session.get("value", "None")
return v
@app.route("/set_dynamic_cookie", methods=["POST"])
def set_dynamic_cookie():
flask.session["value"] = flask.request.form["value"]
return "value set"
@app.route("/get_dynamic_cookie")
def get_dynamic_cookie():
v = flask.session.get("value", "None")
return v
test_client = app.test_client()
# first set the cookie in both /set urls but each with a different value
assert test_client.post("/set", data={"value": "42"}).data == b"value set"
assert (
test_client.post("/set_dynamic_cookie", data={"value": "616"}).data
== b"value set"
)
# now check that the relevant values come back - meaning that different
# cookies are being used for the urls that end with "dynamic cookie"
assert test_client.get("/get").data == b"42"
assert test_client.get("/get_dynamic_cookie").data == b"616"
def test_bad_environ_raises_bad_request():
app = flask.Flask(__name__)
from flask.testing import EnvironBuilder
builder = EnvironBuilder(app)
environ = builder.get_environ()
# use a non-printable character in the Host - this is key to this test
environ["HTTP_HOST"] = "\x8a"
with app.request_context(environ):
response = app.full_dispatch_request()
assert response.status_code == 400
def test_environ_for_valid_idna_completes():
app = flask.Flask(__name__)
@app.route("/")
def index():
return "Hello World!"
from flask.testing import EnvironBuilder
builder = EnvironBuilder(app)
environ = builder.get_environ()
# these characters are all IDNA-compatible
environ["HTTP_HOST"] = "ąśźäüжŠßя.com"
with app.request_context(environ):
response = app.full_dispatch_request()
assert response.status_code == 200
def test_normal_environ_completes():
app = flask.Flask(__name__)
@app.route("/")
def index():
return "Hello World!"
response = app.test_client().get("/", headers={"host": "xn--on-0ia.com"})
assert response.status_code == 200
|
py | 1a47891828ef98aa9fb730a68e103ff4f22ced9e | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public Python API of TensorFlow Debugger (tfdbg).
See the [TFDBG](https://www.tensorflow.org/guide/debugger) guide.
@@add_debug_tensor_watch
@@watch_graph
@@watch_graph_with_blacklists
@@DebugTensorDatum
@@DebugDumpDir
@@load_tensor_from_event
@@load_tensor_from_event_file
@@has_inf_or_nan
@@DumpingDebugHook
@@DumpingDebugWrapperSession
@@GrpcDebugHook
@@GrpcDebugWrapperSession
@@LocalCLIDebugHook
@@LocalCLIDebugWrapperSession
@@TensorBoardDebugHook
@@TensorBoardDebugWrapperSession
@@WatchOptions
@@reconstruct_non_debug_graph_def
@@GradientsDebugger
@@clear_gradient_debuggers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-imports
from tensorflow.python.debug.lib.debug_data import DebugDumpDir
from tensorflow.python.debug.lib.debug_data import DebugTensorDatum
from tensorflow.python.debug.lib.debug_data import has_inf_or_nan
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event_file
from tensorflow.python.debug.lib.debug_gradients import GradientsDebugger
from tensorflow.python.debug.lib.debug_graphs import reconstruct_non_debug_graph_def
from tensorflow.python.debug.lib.debug_utils import add_debug_tensor_watch
from tensorflow.python.debug.lib.debug_utils import watch_graph
from tensorflow.python.debug.lib.debug_utils import watch_graph_with_blacklists
from tensorflow.python.debug.wrappers.dumping_wrapper import DumpingDebugWrapperSession
from tensorflow.python.debug.wrappers.framework import WatchOptions
from tensorflow.python.debug.wrappers.grpc_wrapper import GrpcDebugWrapperSession
from tensorflow.python.debug.wrappers.grpc_wrapper import TensorBoardDebugWrapperSession
from tensorflow.python.debug.wrappers.hooks import DumpingDebugHook
from tensorflow.python.debug.wrappers.hooks import GrpcDebugHook
from tensorflow.python.debug.wrappers.hooks import LocalCLIDebugHook
from tensorflow.python.debug.wrappers.hooks import TensorBoardDebugHook
from tensorflow.python.debug.wrappers.local_cli_wrapper import LocalCLIDebugWrapperSession
from tensorflow.python.util import all_util as _all_util
_all_util.remove_undocumented(__name__)
|
py | 1a4789513d39cec957eec51ee37a10746d3dd86c | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . import cudnn
|
py | 1a47895d3602ef550c99a566309e9dba95cb80b3 | import json
from django.views.generic import TemplateView
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse
from rest_framework import viewsets
from rest_framework.views import APIView
from django.views.decorators.csrf import ensure_csrf_cookie, csrf_protect
from django.utils.decorators import method_decorator
from rest_framework.permissions import AllowAny
from rest_framework.generics import ListAPIView
from .serializers import CityPopulationSerializer, ProfitableBuildingSerializer, \
MaintainableBuildingSerializer, MassiliaSettingsSerializer, ArmyUnitSerializer, \
NavyUnitSerializer, BalanceSheetSerializer, UniqueEventSerializer
from .models import CityPopulation, ProfitableBuilding, MaintainableBuilding, \
MassiliaSettings, ArmyUnit, NavyUnit, BalanceSheet, UniqueEvent
class CityPopulationView(viewsets.ModelViewSet):
serializer_class = CityPopulationSerializer
queryset = CityPopulation.objects.all()
class MassiliaSettingsView(viewsets.ModelViewSet):
serializer_class = MassiliaSettingsSerializer
queryset = MassiliaSettings.objects.filter(pk=1)
class ProfitableBuildingView(viewsets.ModelViewSet):
serializer_class = ProfitableBuildingSerializer
queryset = ProfitableBuilding.objects.all()
class MaintainableBuildingView(viewsets.ModelViewSet):
serializer_class = MaintainableBuildingSerializer
queryset = MaintainableBuilding.objects.all()
class ArmyUnitView(viewsets.ModelViewSet):
serializer_class = ArmyUnitSerializer
queryset = ArmyUnit.objects.all()
class NavyUnitView(viewsets.ModelViewSet):
serializer_class = NavyUnitSerializer
queryset = NavyUnit.objects.all()
class BalanceSheetView(viewsets.ModelViewSet):
serializer_class = BalanceSheetSerializer
queryset = BalanceSheet.objects.all()
class UniqueEventView(viewsets.ModelViewSet):
serializer_class = UniqueEventSerializer
queryset = UniqueEvent.objects.all()
class IndexView(TemplateView):
""" Return the ReactJS frontend. """
template_name = 'build/index.html'
@method_decorator(csrf_protect, name='dispatch')
class LoginView(APIView):
permission_classes = (AllowAny, )
def post(self, request):
data = json.loads(request.body)
username = data['username']
password = data['password']
# Check user credentials
if username is None or password is None:
return JsonResponse({'detail': 'Please provide username and password.'}, status=400)
# Authenticate the user
user = authenticate(username=username, password=password)
if user is None:
return JsonResponse({'detail': 'Invalid credentials.'}, status=400)
# Login
login(request, user)
return JsonResponse({'detail': 'Successfully logged in.'})
class LogoutView(APIView):
def get(self, request):
if not request.user.is_authenticated:
return JsonResponse({'detail': 'User is not authenticated.'}, status=400)
logout(request)
return JsonResponse({'detail': 'Successfully logged out.'})
@method_decorator(ensure_csrf_cookie, name='dispatch')
class SessionView(APIView):
permission_classes = (AllowAny, )
def get(self, request, format=None):
if request.user.is_authenticated:
return JsonResponse({'isAuthenticated': True})
return JsonResponse({'isAuthenticated': False})
class LatestBalanceSheetView(APIView):
""" Find the latest balance sheet and send it to the user. """
def get(self, request, format=None):
settings = MassiliaSettings.objects.get(pk=1)
current_year = BalanceSheet.objects.get(year=settings.year)
serializer = BalanceSheetSerializer(current_year)
return JsonResponse(serializer.data, safe=False)
class YearsEventsView(ListAPIView):
""" Get the events of a specific year. """
serializer_class = UniqueEventSerializer
def get_queryset(self):
year = self.kwargs['year']
return UniqueEvent.objects.filter(year=year)
class NetDifferenceView(APIView):
""" Calculate and return the net difference of year's balance sheet. """
def get(self, request, format=None, *args, **kwargs):
year = kwargs['year']
matched_sheets = BalanceSheet.objects.filter(year=year)
if len(matched_sheets) > 0:
# Calculate the net difference
net_diff = matched_sheets[0].calculate_net_difference()
return JsonResponse({
'isProfit': net_diff[0],
'netDiff': net_diff[1],
})
return JsonResponse({'detail', 'No balance sheet for such year found.'}, status=400)
class EndYearView(APIView):
""" Progress to the next year. """
def get(self, request, format=None):
# Create the new balance sheet
settings = MassiliaSettings.objects.get(pk=1)
settings.end_year()
# Send a positive answer back
return JsonResponse({'details': 'Changed year successfully.'})
|
py | 1a478b4a3857d6c3d380ae0f7781e6f10a66081e | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import re
import subprocess
import time
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.kubernetes import kube_client
from airflow.utils.log.logging_mixin import LoggingMixin
class SparkSubmitHook(BaseHook, LoggingMixin):
"""
This hook is a wrapper around the spark-submit binary to kick off a spark-submit job.
It requires that the "spark-submit" binary is in the PATH or the spark_home to be
supplied.
:param conf: Arbitrary Spark configuration properties
:type conf: dict
:param conn_id: The connection id as configured in Airflow administration. When an
invalid connection_id is supplied, it will default to yarn.
:type conn_id: str
:param files: Upload additional files to the executor running the job, separated by a
comma. Files will be placed in the working directory of each executor.
For example, serialized objects.
:type files: str
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:type py_files: str
:param: archives: Archives that spark should unzip (and possibly tag with #ALIAS) into
the application working directory.
:param driver_class_path: Additional, driver-specific, classpath settings.
:type driver_class_path: str
:param jars: Submit additional jars to upload and place them in executor classpath.
:type jars: str
:param java_class: the main class of the Java application
:type java_class: str
:param packages: Comma-separated list of maven coordinates of jars to include on the
driver and executor classpaths
:type packages: str
:param exclude_packages: Comma-separated list of maven coordinates of jars to exclude
while resolving the dependencies provided in 'packages'
:type exclude_packages: str
:param repositories: Comma-separated list of additional remote repositories to search
for the maven coordinates given with 'packages'
:type repositories: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone, YARN and Kubernetes only) Number of cores per
executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:type driver_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param proxy_user: User to impersonate when submitting the application
:type proxy_user: str
:param name: Name of the job (default airflow-spark)
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param application_args: Arguments for the application being submitted
:type application_args: list
:param env_vars: Environment variables for spark-submit. It
supports yarn and k8s mode too.
:type env_vars: dict
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:type verbose: bool
:param spark_binary: The command to use for spark submit.
Some distros may use spark2-submit.
:type spark_binary: str
"""
def __init__(self,
conf=None,
conn_id='spark_default',
files=None,
py_files=None,
archives=None,
driver_class_path=None,
jars=None,
java_class=None,
packages=None,
exclude_packages=None,
repositories=None,
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
driver_memory=None,
keytab=None,
principal=None,
proxy_user=None,
name='default-name',
num_executors=None,
application_args=None,
env_vars=None,
verbose=False,
spark_binary=None):
self._conf = conf
self._conn_id = conn_id
self._files = files
self._py_files = py_files
self._archives = archives
self._driver_class_path = driver_class_path
self._jars = jars
self._java_class = java_class
self._packages = packages
self._exclude_packages = exclude_packages
self._repositories = repositories
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._proxy_user = proxy_user
self._name = name
self._num_executors = num_executors
self._application_args = application_args
self._env_vars = env_vars
self._verbose = verbose
self._submit_sp = None
self._yarn_application_id = None
self._kubernetes_driver_pod = None
self._spark_binary = spark_binary
self._connection = self._resolve_connection()
self._is_yarn = 'yarn' in self._connection['master']
self._is_kubernetes = 'k8s' in self._connection['master']
if self._is_kubernetes and kube_client is None:
raise RuntimeError(
"{} specified by kubernetes dependencies are not installed!".format(
self._connection['master']))
self._should_track_driver_status = self._resolve_should_track_driver_status()
self._driver_id = None
self._driver_status = None
self._spark_exit_code = None
def _resolve_should_track_driver_status(self):
"""
Determines whether or not this hook should poll the spark driver status through
subsequent spark-submit status requests after the initial spark-submit request
:return: if the driver status should be tracked
"""
return ('spark://' in self._connection['master'] and
self._connection['deploy_mode'] == 'cluster')
def _resolve_connection(self):
# Build from connection master or default to yarn if not available
conn_data = {'master': 'yarn',
'queue': None,
'deploy_mode': None,
'spark_home': None,
'spark_binary': self._spark_binary or "spark-submit",
'namespace': None}
try:
# Master can be local, yarn, spark://HOST:PORT, mesos://HOST:PORT and
# k8s://https://<HOST>:<PORT>
conn = self.get_connection(self._conn_id)
if conn.port:
conn_data['master'] = "{}:{}".format(conn.host, conn.port)
else:
conn_data['master'] = conn.host
# Determine optional yarn queue from the extra field
extra = conn.extra_dejson
conn_data['queue'] = extra.get('queue', None)
conn_data['deploy_mode'] = extra.get('deploy-mode', None)
conn_data['spark_home'] = extra.get('spark-home', None)
conn_data['spark_binary'] = self._spark_binary or \
extra.get('spark-binary', "spark-submit")
conn_data['namespace'] = extra.get('namespace')
except AirflowException:
self.log.info(
"Could not load connection string %s, defaulting to %s",
self._conn_id, conn_data['master']
)
return conn_data
def get_conn(self):
pass
def _get_spark_binary_path(self):
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'], 'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
return connection_cmd
def _build_spark_submit_command(self, application):
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url of the spark master
connection_cmd += ["--master", self._connection['master']]
if self._conf:
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._env_vars and (self._is_kubernetes or self._is_yarn):
if self._is_yarn:
tmpl = "spark.yarn.appMasterEnv.{}={}"
# Allow dynamic setting of hadoop/yarn configuration environments
self._env = self._env_vars
else:
tmpl = "spark.kubernetes.driverEnv.{}={}"
for key in self._env_vars:
connection_cmd += [
"--conf",
tmpl.format(key, str(self._env_vars[key]))]
elif self._env_vars and self._connection['deploy_mode'] != "cluster":
self._env = self._env_vars # Do it on Popen of the process
elif self._env_vars and self._connection['deploy_mode'] == "cluster":
raise AirflowException(
"SparkSubmitHook env_vars is not supported in standalone-cluster mode.")
if self._is_kubernetes and self._connection['namespace']:
connection_cmd += ["--conf", "spark.kubernetes.namespace={}".format(
self._connection['namespace'])]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._archives:
connection_cmd += ["--archives", self._archives]
if self._driver_class_path:
connection_cmd += ["--driver-class-path", self._driver_class_path]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._packages:
connection_cmd += ["--packages", self._packages]
if self._exclude_packages:
connection_cmd += ["--exclude-packages", self._exclude_packages]
if self._repositories:
connection_cmd += ["--repositories", self._repositories]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._proxy_user:
connection_cmd += ["--proxy-user", self._proxy_user]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection['queue']:
connection_cmd += ["--queue", self._connection['queue']]
if self._connection['deploy_mode']:
connection_cmd += ["--deploy-mode", self._connection['deploy_mode']]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
connection_cmd += self._application_args
self.log.info("Spark-Submit cmd: %s", connection_cmd)
return connection_cmd
def _build_track_driver_status_command(self):
"""
Construct the command to poll the driver status.
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The driver id so we can poll for its status
if self._driver_id:
connection_cmd += ["--status", self._driver_id]
else:
raise AirflowException(
"Invalid status: attempted to poll driver " +
"status but no driver id is known. Giving up.")
self.log.debug("Poll driver status cmd: %s", connection_cmd)
return connection_cmd
def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_spark_submit_command(application)
if hasattr(self, '_env'):
env = os.environ.copy()
env.update(self._env)
kwargs["env"] = env
self._submit_sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
**kwargs)
self._process_spark_submit_log(iter(self._submit_sp.stdout))
returncode = self._submit_sp.wait()
# Check spark-submit return code. In Kubernetes mode, also check the value
# of exit code in the log, as it may differ.
if returncode or (self._is_kubernetes and self._spark_exit_code != 0):
raise AirflowException(
"Cannot execute: {}. Error code is: {}.".format(
spark_submit_cmd, returncode
)
)
self.log.debug("Should track driver: {}".format(self._should_track_driver_status))
# We want the Airflow job to wait until the Spark driver is finished
if self._should_track_driver_status:
if self._driver_id is None:
raise AirflowException(
"No driver id is known: something went wrong when executing " +
"the spark submit command"
)
# We start with the SUBMITTED status as initial status
self._driver_status = "SUBMITTED"
# Start tracking the driver status (blocking function)
self._start_driver_status_tracking()
if self._driver_status != "FINISHED":
raise AirflowException(
"ERROR : Driver {} badly exited with status {}"
.format(self._driver_id, self._driver_status)
)
def _process_spark_submit_log(self, itr):
"""
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
self.log.info("Identified spark driver id: %s",
self._yarn_application_id)
# If we run Kubernetes cluster mode, we want to extract the driver pod id
# from the logs so we can kill the application when we stop it unexpectedly
elif self._is_kubernetes:
match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line)
if match:
self._kubernetes_driver_pod = match.groups()[0]
self.log.info("Identified spark driver pod: %s",
self._kubernetes_driver_pod)
# Store the Spark Exit code
match_exit_code = re.search(r'\s*Exit code: (\d+)', line)
if match_exit_code:
self._spark_exit_code = int(match_exit_code.groups()[0])
# if we run in standalone cluster mode and we want to track the driver status
# we need to extract the driver id from the logs. This allows us to poll for
# the status using the driver id. Also, we can kill the driver when needed.
elif self._should_track_driver_status and not self._driver_id:
match_driver_id = re.search(r'(driver-[0-9\-]+)', line)
if match_driver_id:
self._driver_id = match_driver_id.groups()[0]
self.log.info("identified spark driver id: {}"
.format(self._driver_id))
self.log.info(line)
def _process_spark_status_log(self, itr):
"""
parses the logs of the spark driver status query process
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# Check if the log line is about the driver status and extract the status.
if "driverState" in line:
self._driver_status = line.split(' : ')[1] \
.replace(',', '').replace('\"', '').strip()
self.log.debug("spark driver status log: {}".format(line))
def _start_driver_status_tracking(self):
"""
Polls the driver based on self._driver_id to get the status.
Finish successfully when the status is FINISHED.
Finish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.
Possible status:
SUBMITTED
Submitted but not yet scheduled on a worker
RUNNING
Has been allocated to a worker to run
FINISHED
Previously ran and exited cleanly
RELAUNCHING
Exited non-zero or due to worker failure, but has not yet
started running again
UNKNOWN
The status of the driver is temporarily not known due to
master failure recovery
KILLED
A user manually killed this driver
FAILED
The driver exited non-zero and was not supervised
ERROR
Unable to run or restart due to an unrecoverable error
(e.g. missing jar file)
"""
# When your Spark Standalone cluster is not performing well
# due to misconfiguration or heavy loads.
# it is possible that the polling request will timeout.
# Therefore we use a simple retry mechanism.
missed_job_status_reports = 0
max_missed_job_status_reports = 10
# Keep polling as long as the driver is processing
while self._driver_status not in ["FINISHED", "UNKNOWN",
"KILLED", "FAILED", "ERROR"]:
# Sleep for 1 second as we do not want to spam the cluster
time.sleep(1)
self.log.debug("polling status of spark driver with id {}"
.format(self._driver_id))
poll_drive_status_cmd = self._build_track_driver_status_command()
status_process = subprocess.Popen(poll_drive_status_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True)
self._process_spark_status_log(iter(status_process.stdout))
returncode = status_process.wait()
if returncode:
if missed_job_status_reports < max_missed_job_status_reports:
missed_job_status_reports = missed_job_status_reports + 1
else:
raise AirflowException(
"Failed to poll for the driver status {} times: returncode = {}"
.format(max_missed_job_status_reports, returncode)
)
def _build_spark_driver_kill_command(self):
"""
Construct the spark-submit command to kill a driver.
:return: full command to kill a driver
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'],
'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The actual kill command
connection_cmd += ["--kill", self._driver_id]
self.log.debug("Spark-Kill cmd: %s", connection_cmd)
return connection_cmd
def on_kill(self):
self.log.debug("Kill Command is being called")
if self._should_track_driver_status:
if self._driver_id:
self.log.info('Killing driver {} on cluster'
.format(self._driver_id))
kill_cmd = self._build_spark_driver_kill_command()
driver_kill = subprocess.Popen(kill_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.log.info("Spark driver {} killed with return code: {}"
.format(self._driver_id, driver_kill.wait()))
if self._submit_sp and self._submit_sp.poll() is None:
self.log.info('Sending kill signal to %s', self._connection['spark_binary'])
self._submit_sp.kill()
if self._yarn_application_id:
self.log.info('Killing application {} on YARN'
.format(self._yarn_application_id))
kill_cmd = "yarn application -kill {}" \
.format(self._yarn_application_id).split()
yarn_kill = subprocess.Popen(kill_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.log.info("YARN killed with return code: %s", yarn_kill.wait())
if self._kubernetes_driver_pod:
self.log.info('Killing pod %s on Kubernetes', self._kubernetes_driver_pod)
# Currently only instantiate Kubernetes client for killing a spark pod.
try:
import kubernetes
client = kube_client.get_kube_client()
api_response = client.delete_namespaced_pod(
self._kubernetes_driver_pod,
self._connection['namespace'],
body=kubernetes.client.V1DeleteOptions(),
pretty=True)
self.log.info("Spark on K8s killed with response: %s", api_response)
except kube_client.ApiException as e:
self.log.info("Exception when attempting to kill Spark on K8s:")
self.log.exception(e)
|
py | 1a478ba3d957cee13a3dd1393ae3625a9015dfc9 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerProbesOperations:
"""LoadBalancerProbesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerProbeListResult"]:
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerProbeListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_11_01.models.LoadBalancerProbeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerProbeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerProbeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
probe_name: str,
**kwargs: Any
) -> "_models.Probe":
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Probe, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_11_01.models.Probe
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Probe"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Probe', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'} # type: ignore
|
py | 1a478cbdc6fa0915c1cae3c2b67d6e199c358269 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyMoltemplate(PythonPackage):
"""Moltemplate is a general cross-platform text-based molecule builder for
LAMMPS."""
homepage = "https://moltemplate.org"
url = "https://github.com/jewettaij/moltemplate/archive/v2.5.8.tar.gz"
version('2.5.8', sha256='f1e2d52249e996d85f5b1b7b50f50037da9e4b9c252cdfc622b21e79aa21162f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
|
py | 1a478ec19db1922bd43b4eca048921798889ce11 | # -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import (
Blueprint,
current_app,
flash,
redirect,
render_template,
request,
url_for,
)
from flask_login import login_required, login_user, logout_user
from flask_blog_api.extensions import login_manager
from flask_blog_api.public.forms import LoginForm
from flask_blog_api.user.forms import RegisterForm
from flask_blog_api.user.models import User
from flask_blog_api.utils import flash_errors
blueprint = Blueprint("public", __name__, static_folder="../static")
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
"""Home page."""
form = LoginForm(request.form)
current_app.logger.info("Hello from the home page!")
# Handle logging in
if request.method == "POST":
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", "success")
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route("/logout/")
@login_required
def logout():
"""Logout."""
logout_user()
flash("You are logged out.", "info")
return redirect(url_for("public.home"))
@blueprint.route("/register/", methods=["GET", "POST"])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(
username=form.username.data,
email=form.email.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
password=form.password.data,
active=True,
)
flash("Thank you for registering. You can now log in.", "success")
return redirect(url_for("public.home"))
else:
flash_errors(form)
return render_template("public/register.html", form=form)
@blueprint.route("/about/")
def about():
"""About page."""
form = LoginForm(request.form)
return render_template("public/about.html", form=form)
|
py | 1a478f76b54895860aeff7425d03a594ed22af6a | # -*- coding: utf-8 -*-
#
# sphinx-nbexamples documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 20 18:01:33 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import os.path as osp
import re
import six
import sphinx_nbexamples
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(osp.dirname(__file__)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'autodocsumm',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_nbexamples',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
napoleon_use_admonition_for_examples = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = '.rst'
not_document_data = 'sphinx_nbexamples.gallery_config'
example_gallery_config = dict(
dont_preprocess=['../examples/Subgallery/example_bokeh.ipynb'],
insert_bokeh='0.12.1',
urls='https://github.com/Chilipp/sphinx-nbexamples/blob/master/examples',
binder_url='https://mybinder.org/v2/gh/Chilipp/sphinx-nbexamples/master?filepath=examples',
)
process_examples = not osp.exists(osp.join(osp.dirname(__file__), 'examples'))
if on_rtd:
import subprocess as spr
spr.call([sys.executable] +
('-m ipykernel install --user --name python3 '
'--display-name python3').split())
spr.call([sys.executable, '-m', 'bash_kernel.install'])
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
autodoc_default_flags = ['show_inheritance', 'autosummary']
autoclass_content = 'both'
autodata_content = 'call'
add_module_names = False
# General information about the project.
project = u'sphinx-nbexamples'
copyright = u'2016, Philipp Sommer'
author = u'Philipp Sommer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = re.match('\d+\.\d+\.\d+', sphinx_nbexamples.__version__).group()
# The full version, including alpha/beta/rc tags.
release = sphinx_nbexamples.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinx-nbexamplesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'preamble': '\setcounter{tocdepth}{10}'
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sphinx-nbexamples.tex', u'sphinx-nbexamples Documentation',
u'Philipp Sommer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sphinx-nbexamples', u'sphinx-nbexamples Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sphinx-nbexamples', u'sphinx-nbexamples Documentation',
author, 'sphinx-nbexamples', 'Extending your autodoc API docs with a summary',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable/', None),
'sphinx_nbexamples_doc': (
'http://sphinx-nbexamples.readthedocs.io/en/latest/', None),
'psyplot': ('http://psyplot.readthedocs.io/en/latest/', None),
'nbconvert': ('https://nbconvert.readthedocs.io/en/latest/', None),
}
if six.PY3:
intersphinx_mapping['python'] = ('https://docs.python.org/3.4/', None)
else:
intersphinx_mapping['python'] = ('https://docs.python.org/2.7/', None)
extlinks = {'dudir': ('http://docutils.sourceforge.net/docs/ref/rst/'
'directives.html#%s', '')}
# -- Extension interface ------------------------------------------------------
# taken from sphinx conf.py
from sphinx import addnodes # noqa
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.util.docfields import GroupedField
app.add_object_type('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')
fdesc = GroupedField('parameter', label='Parameters',
names=['param'], can_collapse=True)
app.add_object_type('event', 'event', 'pair: %s; event', parse_event,
doc_field_types=[fdesc])
|
py | 1a478fceb530193dbc07940aa9efe00e256db566 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from pandas import DataFrame
from collections import OrderedDict
from pyomo.environ import value
from pyomo.network import Arc, Port
import idaes.logger as idaeslog
from idaes.core.util.units_of_measurement import report_quantity
_log = idaeslog.getLogger(__name__)
__author__ = "John Eslick, Andrew Lee"
def arcs_to_stream_dict(
blk, additional=None, descend_into=True, sort=False, prepend=None, s=None
):
"""
Creates a stream dictionary from the Arcs in a model, using the Arc names as
keys. This can be used to automate the creation of the streams dictionary
needed for the ``create_stream_table_dataframe()`` and ``stream_states_dict()``
functions.
Args:
blk (pyomo.environ._BlockData): Pyomo model to search for Arcs
additional (dict): Additional states to add to the stream dictionary,
which aren't represented by arcs in blk, for example feed or
product streams without Arcs attached or states internal to a unit
model.
descend_into (bool): If True, search subblocks for Arcs as well. The
default is True.
sort (bool): If True sort keys and return an OrderedDict
prepend (str): Prepend a string to the arc name joined with a '.'.
This can be useful to prevent conflicting names when sub blocks
contain Arcs that have the same names when used in combination
with descend_into=False.
s (dict): Add streams to an existing stream dict.
Returns:
Dictionary with Arc names as keys and the Arcs as values.
"""
if s is None:
s = {}
for c in blk.component_objects(Arc, descend_into=descend_into):
key = c.getname()
if prepend is not None:
key = ".".join([prepend, key])
s[key] = c
if additional is not None:
s.update(additional)
if sort:
s = OrderedDict(sorted(s.items()))
return s
def stream_states_dict(streams, time_point=0):
"""
Method to create a dictionary of state block representing stream states.
This takes a dict with stream name keys and stream values.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
time_point : point in the time domain at which to generate stream table
(default = 0)
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_dict = OrderedDict()
def _stream_dict_add(sb, n, i=None):
"""add a line to the stream table"""
if i is None:
key = n
else:
key = "{}[{}]".format(n, i)
stream_dict[key] = sb
for n in streams.keys():
if isinstance(streams[n], Arc):
for i, a in streams[n].items():
try:
# if getting the StateBlock from the destination port
# fails for any reason try the source port. This could
# happen if a port does not have an associated
# StateBlock. For example a surrogate model may not
# use state blocks, unit models may handle physical
# properties without state blocks, or the port could
# be used to serve the purpose of a translator block.
sb = _get_state_from_port(a.ports[1], time_point)
except:
sb = _get_state_from_port(a.ports[0], time_point)
_stream_dict_add(sb, n, i)
elif isinstance(streams[n], Port):
sb = _get_state_from_port(streams[n], time_point)
_stream_dict_add(sb, n)
else:
# _IndexedStateBlock is a private class, so cannot directly test
# whether streams[n] is one or not.
try:
sb = streams[n][time_point]
except KeyError as err:
raise TypeError(
f"Either component type of stream argument {streams[n]} "
f"is unindexed or {time_point} is not a member of its "
f"indexing set."
) from err
_stream_dict_add(sb, n)
return stream_dict
def tag_state_quantities(blocks, attributes, labels, exception=False):
"""Take a stream states dictionary, and return a tag dictionary for stream
quantities. This takes a dictionary (blk) that has state block labels as
keys and state blocks as values. The attributes are a list of attributes to
tag. If an element of the attribute list is list-like, the fist element is
the attribute and the remaining elements are indexes. Lables provides a list
of attribute lables to be used to create the tag. Tags are blk_key + label
for the attribute.
Args:
blocks (dict): Dictionary of state blocks. The key is the block label to
be used in the tag, and the value is a state block.
attributes (list-like): A list of attriutes to tag. It is okay if a
particular attribute does not exist in a state bock. This allows
you to mix state blocks with differnt sets of attributes. If an
attribute is indexed, the attribute can be specified as a list or
tuple where the first element is the attribute and the remaining
elements are indexes.
labels (list-like): These are attribute lables. The order corresponds to the
attribute list. They are used to create the tags. Tags are in the
form blk.key + label.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Return:
(dict): Dictionary where the keys are tags and the values are model
attributes, usually Pyomo component data objects.
"""
tags = {}
if labels is None:
lables = attributes
for a in attributes:
if isinstance(a, (tuple, list)):
if len(a) == 2:
# in case there are multiple indexes and user gives tuple
label = f"{a[0]}[{a[1]}]"
if len(a) > 2:
label = f"{a[0]}[{a[1:]}]"
else:
label = a[0]
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, the first element should be the
# attribute and the remaining elements should be indexes.
if len(a) == 2:
j = a[1] # catch user supplying list-like of indexes
if len(a) > 2:
j = a[1:]
# if len(a) == 1, we'll say that's fine here. Don't know why you
# would put the attribute in a list-like if not indexed, but I'll
# allow it.
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)"
)
except ZeroDivisionError:
pass # this one is okay
if v is not None:
tags[f"{key}{labels[i]}"] = v
return tags
def create_stream_table_dataframe(
streams, true_state=False, time_point=0, orient="columns"
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for i in disp_dict[k]:
stream_key = k if i is None else f"{k} {i}"
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = quant.m
# TODO: Only need to do this once, as otherwise we are just
# repeatedly overwriting this
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def stream_table_dataframe_to_string(stream_table, **kwargs):
"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
)
def _get_state_from_port(port, time_point):
"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. If no components are assigned to the Port, raise a
ValueError. If the first component's parent block has no index, raise an
AttributeError. If different variables on the port appear to be connected
to different state blocks, raise a RuntimeError.
Args:
port (pyomo.network.Port): a port with variables derived from some
single StateBlock
time_point : point in the time domain at which to index StateBlock
(default = 0)
Returns:
(StateBlock-like) : an object containing all the components contained
in the port.
"""
vlist = list(port.iter_vars())
states = [v.parent_block().parent_component() for v in vlist]
if len(vlist) == 0:
raise ValueError(
f"No block could be retrieved from Port {port.name} "
f"because it contains no components."
)
# Check the number of indices of the parent property block. If its indexed
# both in space and time, keep the second, spatial index and throw out the
# first, temporal index. If that ordering is changed, this method will
# need to be changed as well.
try:
idx = vlist[0].parent_block().index()
except AttributeError as err:
raise AttributeError(
f"No block could be retrieved from Port {port.name} "
f"because block {vlist[0].parent_block().name} has no index."
) from err
# Assuming the time index is always first and the spatial indices are all
# the same
if isinstance(idx, tuple):
idx = (time_point, vlist[0].parent_block().index()[1:])
else:
idx = (time_point,)
# This method also assumes that ports with different spatial indices won't
# end up at the same port. Otherwise this check is insufficient.
if all(states[0] is s for s in states):
return states[0][idx]
raise RuntimeError(
f"No block could be retrieved from Port {port.name} "
f"because components are derived from multiple blocks."
)
def generate_table(blocks, attributes, heading=None, exception=True):
"""
Create a Pandas DataFrame that contains a list of user-defined attributes
from a set of Blocks.
Args:
blocks (dict): A dictionary with name keys and BlockData objects for
values. Any name can be associated with a block. Use an OrderedDict
to show the blocks in a specific order, otherwise the dataframe can
be sorted later.
attributes (list or tuple of strings): Attributes to report from a
Block, can be a Var, Param, or Expression. If an attribute doesn't
exist or doesn't have a valid value, it will be treated as missing
data.
heading (list or tuple of srings): A list of strings that will be used
as column headings. If None the attribute names will be used.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Returns:
(DataFrame): A Pandas dataframe containing a data table
"""
if heading is None:
heading = attributes
st = DataFrame(columns=heading)
row = [None] * len(attributes) # not a big deal but save time on realloc
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, assume index supplied
try:
assert len(a) > 1
except AssertionError:
_log.error(f"An index must be supplided for attribute {a[0]}")
raise AssertionError(
f"An index must be supplided for attribute {a[0]}"
)
j = a[1:]
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
v = value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)"
)
except ZeroDivisionError:
v = None
row[i] = v
st.loc[key] = row
return st
|
py | 1a47900ba695d42d5b4e4db71ac20606e22179b6 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.LIN/Sun-ExtA_16/udhr_Latn.LIN_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
py | 1a47901a3cd7fd3d4428d98f67b98c4167fcbb7b | from django.contrib import admin
from .models import Post, PostFile, Comment, Like, Follow
# Register your models here.
admin.site.register(PostFile)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Like)
admin.site.register(Follow)
|
py | 1a47904f89ba4489d0e388b31be290a02f2aadc0 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Fermatum - lightweight IoP client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import base64
import os
import re
import hmac
import version
from util import print_error, InvalidPassword
import ecdsa
import pyaes
# IoP network constants
TESTNET = False
# NOLNET = False
ADDRTYPE_P2PKH = 0x75
ADDRTYPE_P2SH = 0xAE
WIF_BYTE = 0x31
#ADDRTYPE_P2WPKH = 6 # Segwit Addresses
XPRV_HEADER = 0xAE3416F6
XPUB_HEADER = 0x2780915F
HEADERS_URL = "https://headers.fermatum.org/blockchain_headers" # TODO Change this
def set_testnet():
global ADDRTYPE_P2PKH, ADDRTYPE_P2SH#, ADDRTYPE_P2WPKH
global XPRV_HEADER, XPUB_HEADER
global TESTNET, HEADERS_URL
TESTNET = True
ADDRTYPE_P2PKH = 0x82
ADDRTYPE_P2SH = 0x31
WIF_BYTE = 0x4C
#ADDRTYPE_P2WPKH = 3 # Segwit Addresses
XPRV_HEADER = 0x2B7FA42A
XPUB_HEADER = 0xBB8F4852
HEADERS_URL = "https://headers.fermatum.org/testnet_headers"
#def set_nolnet():
# global ADDRTYPE_P2PKH, ADDRTYPE_P2SH, ADDRTYPE_P2WPKH
# global XPRV_HEADER, XPUB_HEADER
# global NOLNET, HEADERS_URL
# NOLNET = True
# ADDRTYPE_P2PKH = 0
# ADDRTYPE_P2SH = 5
# ADDRTYPE_P2WPKH = 6
# XPRV_HEADER = 0x0488ade4
# XPUB_HEADER = 0x0488b21e
# HEADERS_URL = "https://headers.fermatum.org/nolnet_headers"
################################## transactions
FEE_STEP = 10000
MAX_FEE_RATE = 300000
FEE_TARGETS = [25, 10, 5, 2]
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
try:
from Cryptodome.Cipher import AES
except:
AES = None
def aes_encrypt_with_iv(key, iv, data):
if AES:
padlen = 16 - (len(data) % 16)
if padlen == 0:
padlen = 16
data += chr(padlen) * padlen
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
return e
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc)
e = aes.feed(data) + aes.feed() # empty aes.feed() appends pkcs padding
return e
def aes_decrypt_with_iv(key, iv, data):
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
padlen = ord(data[-1])
for i in data[-padlen:]:
if ord(i) != padlen:
raise InvalidPassword()
return data[0:-padlen]
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc)
s = aes.feed(data) + aes.feed() # empty aes.feed() strips pkcs padding
return s
def EncodeAES(secret, s):
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, s.encode("utf8"))
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = DecodeAES(secret, s).decode("utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return s.decode('hex')[::-1].encode('hex')
def int_to_hex(i, length=1):
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.iop.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def sha256(x):
return hashlib.sha256(x).digest()
def Hash(x):
if type(x) is unicode: x=x.encode('utf-8')
return sha256(sha256(x))
hash_encode = lambda x: x[::-1].encode('hex')
hash_decode = lambda x: x.decode('hex')[::-1]
hmac_sha_512 = lambda x,y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
import mnemonic
x = mnemonic.normalize_text(x)
s = hmac_sha_512("Seed version", x.encode('utf8')).encode('hex')
return s.startswith(prefix)
def is_old_seed(seed):
import old_mnemonic
words = seed.strip().split()
try:
old_mnemonic.mn_decode(words)
uses_fermatum_words = True
except Exception:
uses_fermatum_words = False
try:
seed.decode('hex')
is_hex = (len(seed) == 32 or len(seed) == 64)
except Exception:
is_hex = False
return is_hex or (uses_fermatum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
elif TESTNET and is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return key.decode('hex')
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
if 'ANDROID_DATA' in os.environ:
from Crypto.Hash import RIPEMD
md = RIPEMD.new()
else:
md = hashlib.new('ripemd')
md.update(sha256(public_key))
return md.digest()
def hash_160_to_bc_address(h160, addrtype, witness_program_version=1):
s = chr(addrtype)
#if addrtype == ADDRTYPE_P2WPKH:
# s += chr(witness_program_version) + chr(0)
s += h160
return base_encode(s+Hash(s)[0:4], base=58)
def bc_address_to_hash_160(addr):
bytes = base_decode(addr, 25, base=58)
return ord(bytes[0]), bytes[1:21]
def hash160_to_p2pkh(h160):
return hash_160_to_bc_address(h160, ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160):
return hash_160_to_bc_address(h160, ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key):
return hash160_to_p2pkh(hash_160(public_key))
def public_key_to_p2wpkh(public_key):
return hash160_to_bc_address(hash_160(public_key), ADDRTYPE_P2WPKH)
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= base:
div, mod = divmod(long_value, base)
result = chars[mod] + result
long_value = div
result = chars[long_value] + result
# IoP does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (chars[0]*nPad) + result
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(c) * (base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
def PrivKeyToSecret(privkey):
return privkey[9:9+32]
def SecretToASecret(secret, compressed=False):
#addrtype = ADDRTYPE_P2PKH
vchIn = chr(WIF_BYTE&255) + secret
if compressed: vchIn += '\01'
return EncodeBase58Check(vchIn)
def ASecretToSecret(key):
#addrtype = ADDRTYPE_P2PKH
vch = DecodeBase58Check(key)
if vch and vch[0] == chr(WIF_BYTE&255):
return vch[1:]
elif is_minikey(key):
return minikey_to_private_key(key)
else:
return False
def regenerate_key(sec):
b = ASecretToSecret(sec)
if not b:
return False
b = b[0:32]
return EC_KEY(b)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return ('%064x' % pkey.secret).decode('hex')
def is_compressed(sec):
b = ASecretToSecret(sec)
return len(b) == 33
def public_key_from_private_key(sec):
# rebuild public key from private key, compressed or uncompressed
pkey = regenerate_key(sec)
assert pkey
compressed = is_compressed(sec)
public_key = GetPubKey(pkey.pubkey, compressed)
return public_key.encode('hex')
def address_from_private_key(sec):
public_key = public_key_from_private_key(sec)
address = public_key_to_p2pkh(public_key.decode('hex'))
return address
def is_valid(addr):
return is_address(addr)
def is_address(addr):
try:
addrtype, h = bc_address_to_hash_160(addr)
except Exception:
return False
if addrtype not in [ADDRTYPE_P2PKH, ADDRTYPE_P2SH]:
return False
return addr == hash_160_to_bc_address(h, addrtype)
def is_p2pkh(addr):
if is_address(addr):
addrtype, h = bc_address_to_hash_160(addr)
return addrtype == ADDRTYPE_P2PKH
def is_p2sh(addr):
if is_address(addr):
addrtype, h = bc_address_to_hash_160(addr)
return addrtype == ADDRTYPE_P2SH
def is_private_key(key):
try:
k = ASecretToSecret(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitoins.
return (len(text) >= 20 and text[0] == 'S'
and all(c in __b58chars for c in text)
and ord(sha256(text + '?')[0]) == 0)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
varint = var_int(len(message))
encoded_varint = "".join([chr(int(varint[i:i+2], 16)) for i in xrange(0, len(varint), 2)])
return "\x18IoP Signed Message:\n" + encoded_varint + message
def verify_message(address, sig, message):
try:
public_key, compressed = pubkey_from_signature(sig, message)
# check public key using the address
pubkey = point_to_ser(public_key.pubkey.point, compressed)
addr = public_key_to_p2pkh(pubkey)
if address != addr:
raise Exception("Bad signature")
# check message
h = Hash(msg_magic(message))
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey):
return EC_KEY.encrypt_message(message, pubkey.decode('hex'))
def chunks(l, n):
return [l[i:i+n] for i in xrange(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)/4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return ( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) ).decode('hex')
return ( '04'+('%064x'%P.x())+('%064x'%P.y()) ).decode('hex')
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in ['\x02','\x03','\x04']
if Aser[0] == '\x04':
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0]=='\x03')[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid/2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
def pubkey_from_signature(sig, message):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = ord(sig[0])
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
h = Hash(msg_magic(message))
return MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1), compressed
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order/2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return point_to_ser(self.pubkey.point, compressed).encode('hex')
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, is_compressed):
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = chr(27 + i + (4 if is_compressed else 0)) + signature
try:
self.verify_message(sig, message)
return sig
except Exception:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message):
public_key, compressed = pubkey_from_signature(sig, message)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
h = Hash(msg_magic(message))
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True).decode('hex')
encrypted = 'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != 'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError, e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, rev_hex(int_to_hex(n,4)).decode('hex'), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = chr(0) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, rev_hex(int_to_hex(n,4)).decode('hex'))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
def xprv_header(xtype):
return ("%08x"%(XPRV_HEADER + xtype)).decode('hex')
def xpub_header(xtype):
return ("%08x"%(XPUB_HEADER + xtype)).decode('hex')
def serialize_xprv(xtype, c, k, depth=0, fingerprint=chr(0)*4, child_number=chr(0)*4):
xprv = xprv_header(xtype) + chr(depth) + fingerprint + child_number + c + chr(0) + k
return EncodeBase58Check(xprv)
def serialize_xpub(xtype, c, cK, depth=0, fingerprint=chr(0)*4, child_number=chr(0)*4):
xpub = xpub_header(xtype) + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def deserialize_xkey(xkey, prv):
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BaseException('Invalid length')
depth = ord(xkey[4])
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = XPRV_HEADER if prv else XPUB_HEADER
xtype = int('0x' + xkey[0:4].encode('hex'), 16) - header
if xtype not in ([0, 1] if TESTNET else [0]):
raise BaseException('Invalid header')
n = 33 if prv else 32
K_or_k = xkey[13+n:]
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_xpub(xkey):
return deserialize_xkey(xkey, False)
def deserialize_xprv(xkey):
return deserialize_xkey(xkey, True)
def is_xpub(text):
try:
deserialize_xpub(text)
return True
except:
return False
def is_xprv(text):
try:
deserialize_xprv(text)
return True
except:
return False
def xpub_from_xprv(xprv):
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
K, cK = get_pubkeys_from_secret(k)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_root(seed, xtype):
I = hmac.new("IoP seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = serialize_xprv(xtype, master_c, master_k)
xpub = serialize_xpub(xtype, master_c, cK)
return xprv, xpub
def xpub_from_pubkey(xtype, cK):
assert cK[0] in ['\x02','\x03']
return serialize_xpub(xtype, chr(0)*32, cK)
def bip32_private_derivation(xprv, branch, sequence):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv)
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
K, cK = get_pubkeys_from_secret(k)
xpub = serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
xprv = serialize_xprv(xtype, c, k, depth, fingerprint, child_number)
return xprv, xpub
def bip32_public_derivation(xpub, branch, sequence):
xtype, depth, fingerprint, child_number, c, cK = deserialize_xpub(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return SecretToASecret(k, True)
def xkeys_from_seed(seed, passphrase, derivation):
from mnemonic import Mnemonic
xprv, xpub = bip32_root(Mnemonic.mnemonic_to_seed(seed, passphrase), 0)
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
|
py | 1a479059b586d2e14d3adbf0e49341d0900c02f4 | #!/usr/bin/env python
"""Execute a Rekall plugin on the client memory.
This module implements the Rekall enabled client actions.
"""
import json
import os
import pdb
import sys
# Initialize the Rekall plugins, so pylint: disable=unused-import
from rekall import addrspace
from rekall import constants
from rekall import io_manager
from rekall import obj
from rekall import plugins
from rekall import session
from rekall.plugins.addrspaces import standard
from rekall.plugins.renderers import data_export
# pylint: enable=unused-import
import logging
from grr.client import actions
from grr.client import vfs
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
class Error(Exception):
pass
class ProfileNotFoundError(ValueError):
pass
class GRRObjectRenderer(data_export.NativeDataExportObjectRenderer):
"""A default object renderer for the GRRRekallRenderer.
GRR Renders all Rekall objects using the Rekall DataExportRenderer. By default
we just delegate everything to DataExportRenderer.
"""
renders_type = "object"
renderers = ["GRRRekallRenderer"]
def _GetDelegateObjectRenderer(self, item):
return self.FromEncoded(item, "DataExportRenderer")(
renderer=self.renderer)
def EncodeToJsonSafe(self, item, **options):
return self._GetDelegateObjectRenderer(item).EncodeToJsonSafe(
item, **options)
def DecodeFromJsonSafe(self, value, options):
return self._GetDelegateObjectRenderer(value).DecodeFromJsonSafe(
value, options)
def RawHTML(self, item, **options):
return self._GetDelegateObjectRenderer(item).Summary(item, **options)
def Summary(self, item, **options):
return self._GetDelegateObjectRenderer(item).Summary(item, **options)
class GRRRekallRenderer(data_export.DataExportRenderer):
"""This renderer sends all messages to the server encoded as JSON.
Note that this renderer is used to encode and deliver Rekall objects to the
server. Additionally Rekall ObjectRenderer implementations specific to GRR
will be attached to this renderer.
"""
name = None
# Maximum number of statements to queue before sending a reply.
RESPONSE_CHUNK_SIZE = 1000
def __init__(self, rekall_session=None, action=None):
"""Collect Rekall rendering commands and send to the server.
Args:
rekall_session: The Rekall session object.
action: The GRR Client Action which owns this renderer. We will use it to
actually send messages back to the server.
"""
try:
sys.stdout.isatty()
except AttributeError:
sys.stdout.isatty = lambda: False
super(GRRRekallRenderer, self).__init__(session=rekall_session)
# A handle to the client action we can use for sending responses.
self.action = action
# The current plugin we are running.
self.plugin = None
self.context_messages = {}
self.new_context_messages = {}
def start(self, plugin_name=None, kwargs=None):
self.plugin = plugin_name
return super(GRRRekallRenderer, self).start(plugin_name=plugin_name,
kwargs=kwargs)
def write_data_stream(self):
"""Prepares a RekallResponse and send to the server."""
if self.data:
response_msg = rdfvalue.RekallResponse(
json_messages=json.dumps(self.data, separators=(",", ":")),
json_context_messages=json.dumps(self.context_messages.items(),
separators=(",", ":")),
plugin=self.plugin)
self.context_messages = self.new_context_messages
self.new_context_messages = {}
# Queue the response to the server.
self.action.SendReply(response_msg)
def SendMessage(self, statement):
super(GRRRekallRenderer, self).SendMessage(statement)
if statement[0] in ["s", "t"]:
self.new_context_messages[statement[0]] = statement[1]
if len(self.data) > self.RESPONSE_CHUNK_SIZE:
self.flush()
def open(self, directory=None, filename=None, mode="rb"):
result = tempfiles.CreateGRRTempFile(filename=filename, mode=mode)
# The tempfile library created an os path, we pass it through vfs to
# normalize it.
with vfs.VFSOpen(rdfvalue.PathSpec(
path=result.name,
pathtype=rdfvalue.PathSpec.PathType.OS)) as vfs_fd:
dict_pathspec = vfs_fd.pathspec.ToPrimitiveDict()
self.SendMessage(["file", dict_pathspec])
return result
def report_error(self, message):
super(GRRRekallRenderer, self).report_error(message)
if flags.FLAGS.debug:
pdb.post_mortem()
class GrrRekallSession(session.Session):
"""A GRR Specific Rekall session."""
def __init__(self, fhandle=None, action=None, **session_args):
super(GrrRekallSession, self).__init__(**session_args)
self.action = action
# Ensure the action's Progress() method is called when Rekall reports
# progress.
self.progress.Register(id(self), lambda *_, **__: self.action.Progress())
def LoadProfile(self, filename):
"""Wraps the Rekall profile's LoadProfile to fetch profiles from GRR."""
# If the user specified a special profile path we use their choice.
profile = super(GrrRekallSession, self).LoadProfile(filename)
if profile:
return profile
# Cant load the profile, we need to ask the server for it.
logging.debug("Asking server for profile %s" % filename)
self.action.SendReply(
rdfvalue.RekallResponse(
missing_profile="%s/%s" % (
constants.PROFILE_REPOSITORY_VERSION, filename)))
# Wait for the server to wake us up. When we wake up the server should
# have sent the profile over by calling the WriteRekallProfile.
self.action.Suspend()
# Now the server should have sent the data already. We try to load the
# profile one more time.
return super(GrrRekallSession, self).LoadProfile(
filename, use_cache=False)
def GetRenderer(self):
# We will use this renderer to push results to the server.
return GRRRekallRenderer(rekall_session=self, action=self.action)
class WriteRekallProfile(actions.ActionPlugin):
"""A client action to write a Rekall profile to the local cache."""
in_rdfvalue = rdfvalue.RekallProfile
def Run(self, args):
output_filename = utils.JoinPath(
config_lib.CONFIG["Client.rekall_profile_cache_path"], args.name)
try:
os.makedirs(os.path.dirname(output_filename))
except OSError:
pass
with open(output_filename, "wb") as fd:
fd.write(args.data)
class RekallAction(actions.SuspendableAction):
"""Runs a Rekall command on live memory."""
in_rdfvalue = rdfvalue.RekallRequest
out_rdfvalue = rdfvalue.RekallResponse
def Iterate(self):
"""Run a Rekall plugin and return the result."""
# Open the device pathspec as requested by the server.
with vfs.VFSOpen(self.request.device,
progress_callback=self.Progress) as fhandle:
# Create a session and run all the plugins with it.
session_args = self.request.session.ToDict()
# If the user has not specified a special profile path, we use the local
# cache directory.
if "profile_path" not in session_args:
session_args["profile_path"] = [config_lib.CONFIG[
"Client.rekall_profile_cache_path"]]
session_args.update(fhandle.GetMetadata())
rekal_session = GrrRekallSession(action=self, **session_args)
# Wrap GRR's VFS handler for the device in a Rekall FDAddressSpace so we
# can pass it directly to the Rekall session as the physical address
# space. This avoids the AS voting mechanism for Rekall's image format
# detection.
with rekal_session:
rekal_session.physical_address_space = standard.FDAddressSpace(
session=rekal_session, fhandle=fhandle)
# Autodetect the profile. Valid plugins for this profile will become
# available now.
rekal_session.GetParameter("profile")
for plugin_request in self.request.plugins:
# Get the keyword args to this plugin.
plugin_args = plugin_request.args.ToDict()
try:
rekal_session.RunPlugin(plugin_request.plugin, **plugin_args)
except Exception: # pylint: disable=broad-except
# Just ignore errors, and run the next plugin. Errors will be reported
# through the renderer.
pass
|
py | 1a4791cff91ab24c99cb9d9bcd8d8a6df98614a8 | import argparse
import cv2
import numpy
import PIL.Image
import torch
import torchvision.transforms as transforms
from PIL import Image
from torch.autograd import Variable
from models import *
from tools.canny import processing
from tools.picture2texture import estimate
def sample_images(generator,Tensor,imgs):
"""
save the processed pictures
Args:
generator: trained model
Tensor: tensor format
imgs: real picture
Author: Zhongqi Wang
"""
real_A = Variable(imgs.type(Tensor))
real_A = real_A.unsqueeze(0)
fake_B = generator(real_A)
cv2.imwrite("generate.png" ,255*fake_B[0].squeeze(0).cpu().swapaxes(0,2).swapaxes(0,1).numpy())
def process(opt,file_path):
"""
get the HED edge-painting
Args:
opt: opt file
file_path: the file path U want to process
Author: Zhongqi Wang
"""
arguments_strOut = "HED.jpg"
src = cv2.imread(file_path, 0)
src = cv2.resize(src, (opt.img_width,opt.img_height))
src_RGB = cv2.cvtColor(src, cv2.COLOR_GRAY2RGB)
a = PIL.Image.fromarray(src_RGB)
b = numpy.array(a)[:, :]
tenInput = torch.FloatTensor(numpy.ascontiguousarray(b.transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0)))
tenOutput = estimate(tenInput)
PIL.Image.fromarray((tenOutput.clip(0.0, 1.0).numpy().transpose(1, 2, 0)[:, :, 0] * 255.0).astype(numpy.uint8)).save(arguments_strOut)
def main(path):
parser = argparse.ArgumentParser()
parser.add_argument("--img_height", type=int, default=512, help="size of image height")
parser.add_argument("--img_width", type=int, default=512, help="size of image width")
opt = parser.parse_args()
transform=transforms.Compose([
transforms.ToTensor(),
])
cuda = True if torch.cuda.is_available() else False
generator = GeneratorUNet()
if cuda:
generator = generator.cuda() #使用gpu
generator.load_state_dict(torch.load("generator_45_canny.pth"))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
process(opt,path) #处理为HED边缘图像
img = processing(path) #处理为canny边缘图像
cv2.imwrite("canny.jpg",img)
pic1 = cv2.imread("HED.jpg")
pic1 = cv2.resize(pic1, (opt.img_width,opt.img_height))
pic2 = cv2.imread("canny.jpg")
pic2 = cv2.resize(pic2, (opt.img_width,opt.img_height))
train_data = pic1+pic2
cv2.imwrite("canny&HED.jpg",train_data) #得到二者叠加
frame = cv2.resize(train_data,(opt.img_width,opt.img_height))
frame = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
frame = transform(frame)
sample_images(generator,Tensor,frame) #输入pix2pix模型求解
if __name__ == "__main__":
path = "test_pic/6.jpg" # 要处理的图片
main(path)
|
py | 1a47934c7f7ba1869da643239d39282cd9c2da22 | """Package used for testing the webapp / api for the project."""
|
py | 1a4793d20e2b3d234c7a7dea4856ff075482e007 | """
Given an array of ints length 3, return a new array with the elements in reverse order, so {1, 2, 3} becomes {3, 2, 1}.
reverse3([1, 2, 3]) → [3, 2, 1]
reverse3([5, 11, 9]) → [9, 11, 5]
reverse3([7, 0, 0]) → [0, 0, 7]
@author unobatbayar
"""
def reverse3(nums):
reversed = [nums[2], nums[1], nums[0]]
return reversed
|
py | 1a479456fdf1c387e249644ddaf2d4f14f64a2c4 | import os
import functools
from flask import Flask
from flask import request
import redis
import hn_feeds
import logger_config
app = Flask(__name__)
logger = logger_config.get_logger()
@functools.lru_cache(None)
def _get_feed_generator():
redis_server = os.environ.get("REDIS_SERVER", None)
if redis_server:
host, port = redis_server.split(":")
redis_db = os.environ.get("REDIS_DB", 0)
redis_client = redis.Redis(host=host, port=int(port), db=redis_db)
redis_client.ping() # test connection
logger.info(f"Connected to Redis at {host}:{port}")
else:
redis_client = None
logger.warning("Not using Redis")
return hn_feeds.HNFeedsGenerator(
timeout_secs=int(os.environ.get("TIMEOUT_SECS", 5)),
max_workers=int(os.environ.get("MAX_WORKERS", 5)),
redis_client=redis_client,
redis_expire_secs=int(os.environ.get("REDIS_EXPIRE_SECS", 172800)),
fulltext_rss_url=os.environ.get("FULLTEXT_RSS_URL", None))
# global feed generator
_feed_generator = _get_feed_generator()
@app.route('/')
def base():
return f'<p>Must pass an url with a feed to parse!</p>'
@app.route('/favicon.ico')
def no_favicon():
"""Returns 404 if we pass a favicon request."""
return '', 404
@app.route('/<path:url>')
def main_entry(url):
del url # Unused since we need full path anyway.
full_path = request.full_path[1:] # Strip leading /.
base_rss = f'http://{full_path}'
logger.info(f'Got request for "{base_rss}". Creating feed.')
fg = _feed_generator.create_feed(base_rss=base_rss)
if not fg:
return '', 404
xml = fg.atom_str(pretty=True)
return xml, 200, {'Content-Type': 'text/xml; charset=utf-8'}
|
py | 1a47960630abb7b33fb91d5caf22a5652060a40f | #!/usr/bin/env python
import optparse
import os
import sys
chplenv_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(chplenv_dir))
import chpl_comm, chpl_compiler, chpl_platform, overrides
from compiler_utils import CompVersion, get_compiler_version
from utils import error, memoize
@memoize
def get(flag='target'):
if flag == 'network':
atomics_val = overrides.get('CHPL_NETWORK_ATOMICS')
if not atomics_val:
if chpl_comm.get() == 'ugni' and get('target') != 'locks':
atomics_val = 'ugni'
else:
atomics_val = 'none'
elif flag == 'target':
atomics_val = overrides.get('CHPL_ATOMICS')
if not atomics_val:
compiler_val = chpl_compiler.get('target')
platform_val = chpl_platform.get('target')
# we currently support intrinsics for gcc, intel, cray and clang.
# gcc added initial support in 4.1, and added support for 64 bit
# atomics on 32 bit platforms with 4.8. clang and intel also
# support 64 bit atomics on 32 bit platforms and the cray compiler
# will never run on a 32 bit machine. For pgi or 32 bit platforms
# with an older gcc, we fall back to locks
if compiler_val in ['gnu', 'cray-prgenv-gnu', 'mpi-gnu']:
version = get_compiler_version('gnu')
if version >= CompVersion('4.8'):
atomics_val = 'intrinsics'
elif version >= CompVersion('4.1') and not platform_val.endswith('32'):
atomics_val = 'intrinsics'
elif compiler_val == 'aarch64-gnu':
atomics_val = 'cstdlib'
elif compiler_val == 'intel' or compiler_val == 'cray-prgenv-intel':
atomics_val = 'intrinsics'
elif compiler_val == 'cray-prgenv-cray':
atomics_val = 'intrinsics'
elif compiler_val == 'clang':
atomics_val = 'intrinsics'
elif compiler_val == 'clang-included':
atomics_val = 'intrinsics'
# we can't use intrinsics, fall back to locks
if not atomics_val:
atomics_val = 'locks'
else:
error("Invalid flag: '{0}'".format(flag), ValueError)
return atomics_val
def _main():
parser = optparse.OptionParser(usage='usage: %prog [--network|target])')
parser.add_option('--target', dest='flag', action='store_const',
const='target', default='target')
parser.add_option('--network', dest='flag', action='store_const',
const='network')
(options, args) = parser.parse_args()
atomics_val = get(options.flag)
sys.stdout.write("{0}\n".format(atomics_val))
if __name__ == '__main__':
_main()
|
py | 1a47964b99178449eee7bcb9d628ae146bd1d430 | class Solution:
"""
@param digits: a number represented as an array of digits
@return: the result
"""
def plusOne(self, digits):
if len(digits) == 0:
return digits
digits[-1] += 1
for i in range(len(digits) - 1, 0, -1):
if digits[i] == 10:
digits[i] = 0
digits[i - 1] += 1
if digits[0] == 10:
digits[0] = 0
digits.insert(0, 1)
return digits |
py | 1a47993047c9e18019da41ff9eec110393d95f96 | import os
import random
from dotenv import load_dotenv
from fastapi import FastAPI
load_dotenv()
os.environ["LOCAL_DB"] = "False"
# Set random seed, for random team matches
random.seed(5511)
app = FastAPI()
from src.process.process_main import process_main # noqa: E402
@app.get("/")
def hello_world():
return {"message": "Hello World!"}
@app.get("/data_refresh")
def data_refresh():
process_main(start_year=2022, end_year=2022, clean=False, fake_matches=False)
|
py | 1a4799bbb92bb933e4db093f891c15eef22ddec2 | from django.apps import AppConfig
class StatisticsAppConfig(AppConfig):
name = 'statistics_app'
|
py | 1a4799c699d5cd9abe1a88219f3c4af29087a370 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.test_utils."""
import numpy as np
import tensorflow as tf
from object_detection.utils import test_utils
class TestUtilsTest(tf.test.TestCase):
def test_diagonal_gradient_image(self):
"""Tests if a good pyramid image is created."""
pyramid_image = test_utils.create_diagonal_gradient_image(3, 4, 2)
# Test which is easy to understand.
expected_first_channel = np.array([[3, 2, 1, 0],
[4, 3, 2, 1],
[5, 4, 3, 2]], dtype=np.float32)
self.assertAllEqual(np.squeeze(pyramid_image[:, :, 0]),
expected_first_channel)
# Actual test.
expected_image = np.array([[[3, 30],
[2, 20],
[1, 10],
[0, 0]],
[[4, 40],
[3, 30],
[2, 20],
[1, 10]],
[[5, 50],
[4, 40],
[3, 30],
[2, 20]]], dtype=np.float32)
self.assertAllEqual(pyramid_image, expected_image)
def test_random_boxes(self):
"""Tests if valid random boxes are created."""
num_boxes = 1000
max_height = 3
max_width = 5
boxes = test_utils.create_random_boxes(num_boxes,
max_height,
max_width)
true_column = np.ones(shape=(num_boxes)) == 1
self.assertAllEqual(boxes[:, 0] < boxes[:, 2], true_column)
self.assertAllEqual(boxes[:, 1] < boxes[:, 3], true_column)
self.assertTrue(boxes[:, 0].min() >= 0)
self.assertTrue(boxes[:, 1].min() >= 0)
self.assertTrue(boxes[:, 2].max() <= max_height)
self.assertTrue(boxes[:, 3].max() <= max_width)
if __name__ == '__main__':
tf.test.main()
|
py | 1a4799e4680275d0ace16ab93ce86fda3952c077 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Merge source maps to build composite sources
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import yaml
from astropy.io import fits
from fermipy.skymap import HpxMap
from fermipy.utils import load_yaml
from fermipy.jobs.scatter_gather import ScatterGather
from fermipy.jobs.slac_impl import make_nfs_path
from fermipy.jobs.link import Link
from fermipy.jobs.chain import Chain
from fermipy.diffuse.binning import Component
from fermipy.diffuse.name_policy import NameFactory
from fermipy.diffuse import defaults as diffuse_defaults
from fermipy.diffuse.model_manager import make_library
NAME_FACTORY = NameFactory()
class InitModel(Link):
"""Small class to preprate files fermipy analysis.
Specifically this create the srcmap_manifest and fermipy_config_yaml files
"""
appname = 'fermipy-init-model'
linkname_default = 'init-model'
usage = '%s [options]' % (appname)
description = "Initialize model fitting directory"
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
library=diffuse_defaults.diffuse['library'],
models=diffuse_defaults.diffuse['models'],
hpx_order=diffuse_defaults.diffuse['hpx_order_fitting'])
def run_analysis(self, argv):
""" Build the manifest for all the models
"""
args = self._parser.parse_args(argv)
components = Component.build_from_yamlfile(args.comp)
NAME_FACTORY.update_base_dict(args.data)
model_dict = make_library(**args.__dict__)
model_manager = model_dict['ModelManager']
models = load_yaml(args.models)
data = args.data
hpx_order = args.hpx_order
for modelkey in models:
model_manager.make_srcmap_manifest(modelkey, components, data)
model_manager.make_fermipy_config_yaml(modelkey, components, data,
hpx_order=hpx_order,
irf_ver=NAME_FACTORY.irf_ver())
class AssembleModel(Link):
"""Small class to assemple source map files for fermipy analysis.
This is useful for re-merging after parallelizing source map creation.
"""
appname = 'fermipy-assemble-model'
linkname_default = 'assemble-model'
usage = '%s [options]' % (appname)
description = "Assemble sourcemaps for model fitting"
default_options = dict(input=(None, 'Input yaml file', str),
compname=(None, 'Component name.', str),
hpx_order=diffuse_defaults.diffuse['hpx_order_fitting'])
@staticmethod
def copy_ccube(ccube, outsrcmap, hpx_order):
"""Copy a counts cube into outsrcmap file
reducing the HEALPix order to hpx_order if needed.
"""
sys.stdout.write(" Copying counts cube from %s to %s\n" % (ccube, outsrcmap))
try:
hdulist_in = fits.open(ccube)
except IOError:
hdulist_in = fits.open("%s.gz" % ccube)
hpx_order_in = hdulist_in[1].header['ORDER']
if hpx_order_in > hpx_order:
hpxmap = HpxMap.create_from_hdulist(hdulist_in)
hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)
hpxlist_out = hdulist_in
#hpxlist_out['SKYMAP'] = hpxmap_out.create_image_hdu()
hpxlist_out[1] = hpxmap_out.create_image_hdu()
hpxlist_out[1].name = 'SKYMAP'
hpxlist_out.writeto(outsrcmap)
return hpx_order
else:
os.system('cp %s %s' % (ccube, outsrcmap))
#os.system('cp %s.gz %s.gz' % (ccube, outsrcmap))
#os.system('gunzip -f %s.gz' % (outsrcmap))
return None
@staticmethod
def open_outsrcmap(outsrcmap):
"""Open and return the outsrcmap file in append mode """
outhdulist = fits.open(outsrcmap, 'append')
return outhdulist
@staticmethod
def append_hdus(hdulist, srcmap_file, source_names, hpx_order):
"""Append HEALPix maps to a list
Parameters
----------
hdulist : list
The list being appended to
srcmap_file : str
Path to the file containing the HDUs
source_names : list of str
Names of the sources to extract from srcmap_file
hpx_order : int
Maximum order for maps
"""
sys.stdout.write(" Extracting %i sources from %s" % (len(source_names), srcmap_file))
try:
hdulist_in = fits.open(srcmap_file)
except IOError:
try:
hdulist_in = fits.open('%s.gz' % srcmap_file)
except IOError:
sys.stdout.write(" Missing file %s\n" % srcmap_file)
return
for source_name in source_names:
sys.stdout.write('.')
sys.stdout.flush()
if hpx_order is None:
hdulist.append(hdulist_in[source_name])
else:
try:
hpxmap = HpxMap.create_from_hdulist(hdulist_in, hdu=source_name)
except IndexError:
print(" Index error on source %s in file %s" % (source_name, srcmap_file))
continue
except KeyError:
print(" Key error on source %s in file %s" % (source_name, srcmap_file))
continue
hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)
hdulist.append(hpxmap_out.create_image_hdu(name=source_name))
sys.stdout.write("\n")
hdulist.flush()
hdulist_in.close()
@staticmethod
def assemble_component(compname, compinfo, hpx_order):
"""Assemble the source map file for one binning component
Parameters
----------
compname : str
The key for this component (e.g., E0_PSF3)
compinfo : dict
Information about this component
hpx_order : int
Maximum order for maps
"""
sys.stdout.write("Working on component %s\n" % compname)
ccube = compinfo['ccube']
outsrcmap = compinfo['outsrcmap']
source_dict = compinfo['source_dict']
hpx_order = AssembleModel.copy_ccube(ccube, outsrcmap, hpx_order)
hdulist = AssembleModel.open_outsrcmap(outsrcmap)
for comp_name in sorted(source_dict.keys()):
source_info = source_dict[comp_name]
source_names = source_info['source_names']
srcmap_file = source_info['srcmap_file']
AssembleModel.append_hdus(hdulist, srcmap_file,
source_names, hpx_order)
sys.stdout.write("Done!\n")
def run_analysis(self, argv):
"""Assemble the source map file for one binning component
FIXME
"""
args = self._parser.parse_args(argv)
manifest = yaml.safe_load(open(args.input))
compname = args.compname
value = manifest[compname]
self.assemble_component(compname, value, args.hpx_order)
class AssembleModel_SG(ScatterGather):
"""Small class to generate configurations for this script
Parameters
----------
--compname : binning component definition yaml file
--data : datset definition yaml file
--models : model definitino yaml file
args : Names of models to assemble source maps for
"""
appname = 'fermipy-assemble-model-sg'
usage = "%s [options]" % (appname)
description = "Copy source maps from the library to a analysis directory"
clientclass = AssembleModel
job_time = 300
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
hpx_order=diffuse_defaults.diffuse['hpx_order_fitting'],
models=diffuse_defaults.diffuse['models'])
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
models = load_yaml(args['models'])
for modelkey in models:
manifest = os.path.join('analysis', 'model_%s' % modelkey,
'srcmap_manifest_%s.yaml' % modelkey)
for comp in components:
key = comp.make_key('{ebin_name}_{evtype_name}')
fullkey = "%s_%s" % (modelkey, key)
outfile = NAME_FACTORY.merged_srcmaps(modelkey=modelkey,
component=key,
coordsys=comp.coordsys,
mktime='none',
irf_ver=NAME_FACTORY.irf_ver())
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[fullkey] = dict(input=manifest,
compname=key,
hpx_order=args['hpx_order'],
logfile=logfile)
return job_configs
class AssembleModelChain(Chain):
"""Small class to split, apply mktime and bin data according to some user-provided specification
"""
appname = 'fermipy-assemble-model-chain'
linkname_default = 'assemble-model-chain'
usage = '%s [options]' % (appname)
description = 'Run init-model and assemble-model'
default_options = dict(data=diffuse_defaults.diffuse['data'],
comp=diffuse_defaults.diffuse['comp'],
library=diffuse_defaults.diffuse['library'],
models=diffuse_defaults.diffuse['models'],
hpx_order=diffuse_defaults.diffuse['hpx_order_fitting'],
dry_run=diffuse_defaults.diffuse['dry_run'])
def __init__(self, **kwargs):
"""C'tor
"""
super(AssembleModelChain, self).__init__(**kwargs)
self.comp_dict = None
def _register_link_classes(self):
InitModel.register_class()
AssembleModel_SG.register_class()
def _map_arguments(self, input_dict):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = input_dict.get('data')
comp = input_dict.get('comp')
library = input_dict.get('library')
models = input_dict.get('models')
hpx_order = input_dict.get('hpx_order')
dry_run = input_dict.get('dry_run', False)
self._set_link('init-model', InitModel,
comp=comp, data=data,
library=library,
models=models,
hpx_order=hpx_order,
dry_run=dry_run)
self._set_link('assemble-model', AssembleModel_SG,
comp=comp, data=data,
hpx_order=hpx_order,
models=models)
def register_classes():
"""Register these classes with the `LinkFactory` """
InitModel.register_class()
AssembleModel.register_class()
AssembleModel_SG.register_class()
AssembleModelChain.register_class()
|
py | 1a479a7b39e1ce1dacb7351b285039704236e2b8 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetCharactersCharacterIdStatsOrbital(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'strike_characters_killed': 'int',
'strike_damage_to_players_armor_amount': 'int',
'strike_damage_to_players_shield_amount': 'int'
}
attribute_map = {
'strike_characters_killed': 'strike_characters_killed',
'strike_damage_to_players_armor_amount': 'strike_damage_to_players_armor_amount',
'strike_damage_to_players_shield_amount': 'strike_damage_to_players_shield_amount'
}
def __init__(self, strike_characters_killed=None, strike_damage_to_players_armor_amount=None, strike_damage_to_players_shield_amount=None): # noqa: E501
"""GetCharactersCharacterIdStatsOrbital - a model defined in Swagger""" # noqa: E501
self._strike_characters_killed = None
self._strike_damage_to_players_armor_amount = None
self._strike_damage_to_players_shield_amount = None
self.discriminator = None
if strike_characters_killed is not None:
self.strike_characters_killed = strike_characters_killed
if strike_damage_to_players_armor_amount is not None:
self.strike_damage_to_players_armor_amount = strike_damage_to_players_armor_amount
if strike_damage_to_players_shield_amount is not None:
self.strike_damage_to_players_shield_amount = strike_damage_to_players_shield_amount
@property
def strike_characters_killed(self):
"""Gets the strike_characters_killed of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
strike_characters_killed integer # noqa: E501
:return: The strike_characters_killed of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
:rtype: int
"""
return self._strike_characters_killed
@strike_characters_killed.setter
def strike_characters_killed(self, strike_characters_killed):
"""Sets the strike_characters_killed of this GetCharactersCharacterIdStatsOrbital.
strike_characters_killed integer # noqa: E501
:param strike_characters_killed: The strike_characters_killed of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
:type: int
"""
self._strike_characters_killed = strike_characters_killed
@property
def strike_damage_to_players_armor_amount(self):
"""Gets the strike_damage_to_players_armor_amount of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
strike_damage_to_players_armor_amount integer # noqa: E501
:return: The strike_damage_to_players_armor_amount of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
:rtype: int
"""
return self._strike_damage_to_players_armor_amount
@strike_damage_to_players_armor_amount.setter
def strike_damage_to_players_armor_amount(self, strike_damage_to_players_armor_amount):
"""Sets the strike_damage_to_players_armor_amount of this GetCharactersCharacterIdStatsOrbital.
strike_damage_to_players_armor_amount integer # noqa: E501
:param strike_damage_to_players_armor_amount: The strike_damage_to_players_armor_amount of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
:type: int
"""
self._strike_damage_to_players_armor_amount = strike_damage_to_players_armor_amount
@property
def strike_damage_to_players_shield_amount(self):
"""Gets the strike_damage_to_players_shield_amount of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
strike_damage_to_players_shield_amount integer # noqa: E501
:return: The strike_damage_to_players_shield_amount of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
:rtype: int
"""
return self._strike_damage_to_players_shield_amount
@strike_damage_to_players_shield_amount.setter
def strike_damage_to_players_shield_amount(self, strike_damage_to_players_shield_amount):
"""Sets the strike_damage_to_players_shield_amount of this GetCharactersCharacterIdStatsOrbital.
strike_damage_to_players_shield_amount integer # noqa: E501
:param strike_damage_to_players_shield_amount: The strike_damage_to_players_shield_amount of this GetCharactersCharacterIdStatsOrbital. # noqa: E501
:type: int
"""
self._strike_damage_to_players_shield_amount = strike_damage_to_players_shield_amount
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetCharactersCharacterIdStatsOrbital):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a479b58292704df1e7d37191e7fa9d240012d1c | # -*- coding: utf-8 -*-
"""
database.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~
BatteryDataBase data structures.
"""
from chemdataextractor_batteries.chemdataextractor import Document
import json
import copy
class BatteryDataBase():
def __init__(self, paper_root, save_root, filename):
self.dic = None
self.filename = filename
self.paper_root = paper_root
self.count = 0
self.save_root = save_root
def write_into_file(self):
with open('{}/{}.json'.format(self.save_root, self.filename), 'a', encoding='utf-8') as json_file:
json.dump(self.dic, json_file, ensure_ascii=False)
json_file.write('\n')
return
def extract(self, file):
"""
:param file: The parsing files (HTML/XML...)
:return: Write the record into the documents
"""
# try:
f = open(file, 'rb')
d = Document.from_file(f)
print('parsing ' + file)
rough = d.records.serialize()
print(rough)
data = []
for dic in rough:
if 'Compound' in dic:
continue
try:
dic['metadata'] = d.metadata[0].serialize()
if dic['metadata']['doi'] == "None":
pass
except BaseException:
pass
self.count += 1
if self.is_valid(dic):
dic_list = self.distribute(dic)
data += dic_list
if len(data) <= 3:
for i in data:
i['warning'] = 1
for new_dic in data:
self.dic = new_dic
self.write_into_file()
print(str(self.count) + ' relations in total')
print(file + ' is done')
f.close()
# except BaseException:
# pass
def is_valid(self, dic):
"""
Check if the data record is valid or not
:param dic:
:return:
"""
if "BatteryVolumeCapacity" in dic:
return False
else:
try:
if 'names' in next(iter(dic.values()))['compound']['Compound']:
return True
except BaseException:
return False
def distribute(self, dic):
"""
:param dic: A dictionary returned by CDE
:return: A list of dictionaries with valid records
"""
"""
Extract chemical names if a length of a list > 1
Create a new key: 'names' (list)
"""
# Create a key 'names' (list)
name_length = next(iter(dic.values()))['compound']['Compound']['names']
next(iter(dic.values()))['names'] = [name_length[0]]
if len(name_length) > 1:
for j in name_length[1:]:
if j.lower() not in [x.lower()
for x in next(iter(dic.values()))['names']]:
next(iter(dic.values()))['names'].append(j)
# Update the key 'value' as a list of float
next(iter(dic.values()))['value'] = json.loads(
next(iter(dic.values()))['value'])
# Distribute
dic_lists = self.distribute_value_and_names(dic)
return dic_lists
def distribute_value_and_names(self, dic):
"""
:param dic: A single dictionary, with keys 'names' and 'value' as 2 lists
:return: A list of dictionaries with single name and value
"""
dic_list = []
len_names = len(next(iter(dic.values()))['names'])
len_values = len(next(iter(dic.values()))['value'])
copydic = copy.deepcopy(dic)
if len_names == 1 and len_values == 1:
next(iter(copydic.values()))['value'] = next(
iter(dic.values()))['value'][0]
next(iter(copydic.values()))['names'] = next(
iter(dic.values()))['names'][0]
dic_list.append(copydic)
elif len_names == 1 and len_values > 1:
for j in range(len_values):
next(iter(copydic.values()))['value'] = float(
next(iter(dic.values()))['value'][j])
next(iter(copydic.values()))['names'] = next(
iter(dic.values()))['names'][0]
dic_list.append(copydic)
elif len_names > 1 and len_values == 1:
for j in range(len_names):
next(iter(copydic.values()))['value'] = float(
next(iter(dic.values()))['value'][0])
next(iter(copydic.values()))['names'] = next(
iter(dic.values()))['names'][j]
dic_list.append(copydic)
elif len_names == len_values and len_names > 1:
for j in range(len_names):
next(iter(copydic.values()))['value'] = float(
next(iter(dic.values()))['value'][j])
next(iter(copydic.values()))['names'] = next(
iter(dic.values()))['names'][j]
dic_list.append(copydic)
else:
for j in range(len_names):
for k in range(len_values):
next(iter(copydic.values()))['value'] = float(
next(iter(dic.values()))['value'][k])
next(
iter(
copydic.values()))['names'] = next(
iter(
dic.values()))['names'][j]
dic_list.append(copydic)
return dic_list
|
py | 1a479bc4f0331ec3fa2eaf7a0541d6913c6457bb | """snow URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic.base import RedirectView
urlpatterns = [
path('admin/', admin.site.urls),
path('favicon\.ico', RedirectView.as_view(url='/static/images/favicon.ico')),
#path('snow\.snow', RedirectView.as_view(url='/static/images/snow.png')),
path('', include('main.urls')),
]
#if settings.DEBUG:
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
py | 1a479c6372025f1f3abef88b62a3b6cfe3abbcd7 | #!/usr/bin/env python
# encoding: utf-8
"""
sms-recovery.py
Created by Brian DeRenzi on 2010-04-27.
Copyright (c) 2010 __MyCompanyName__. All rights reserved.
"""
import sys
import os
import MySQLdb
from datetime import datetime, timedelta
DB_HOST = "localhost"
DB_USER = "changeme"
DB_PASSWORD = "changeme"
DB_NAME = "changeme"
INSERT = "insert into logger_message set connection_id='%s', is_incoming='1', text='%s', date='%s'"
def german_to_est_time(input_string):
format_string = "%Y-%m-%d %H:%M:%S"
german_date = datetime.strptime(input_string, format_string)
delta = timedelta(hours=6)
est_date = german_date - delta
output_string = est_date.strftime(format_string)
print "%s to %s" % (input_string, output_string)
return output_string
def main():
# connect to DB
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
cursor = db.cursor()
counter = 0
error_count = 0
fin = open("sms-logs.txt", 'r')
for line in fin:
parts = line.partition(":")
values = parts[2].split("|")
# hardcode to ignore one we don't care about. this is a one
# time script, it's ok
if values[3] == '123':
continue
# values are in the format:
# timestamp, 0(?), dest#, from, message\n
message = values[4].strip()
date = german_to_est_time(values[0])
print "Adding message '%s' to db" % message
try:
sql = "select id from reporters_persistantconnection \
where identity='%s'" % values[3]
cursor.execute(sql)
results = cursor.fetchall()
conn_id = results[0][0] # first row, first column
sql = INSERT % (conn_id, message, date)
# print " sql: %s" % sql
cursor.execute(sql)
counter = counter + 1
except Exception, e:
print " ERROR adding record '%s' to db.\n %s" % (message, unicode(e))
error_count = error_count + 1
print "SUMMARY"
print "%s of 207 incoming messages added" % counter
print "%s errors logged" % error_count
if __name__ == '__main__':
main()
|
py | 1a479c807718870b13314d82a2a49cb95cf39986 | # coding:utf8
import warnings
class DefaultConfig(object):
#visualization parameter
env = 'default' # visdom environment
vis_port =8097 # visdom port
#load file parameter
train_data_root = './data/train'
test_data_root = './data/test'
load_model_path = None
pre_load_model_path = None
save_test_root = './results'
save_train_root = './checkpoints'
weights = './weights/weights_cr16.txt'
#training parameter
batch_size = 10 # batch size
num_workers = 4 # how many workers for loading data
print_freq = 20 # print info every N batch
max_epoch = 10
lr = 0.001 # initial learning rate
momentum = 0.9
lr_decay = 0.5 # when val_loss increase, lr = lr*lr_decay
lr_decay_ever = 3
weight_decay = 0 # 损失函数
#test related parameter
frame_num = 32
#model related parameter
cr = 1/16
height = 160
width = 160
blk_size = 16
ref_size = 32
alpha = 0.5
noise_snr = 0
device = 'cuda'
#refresh config
def _parse(self, kwargs):
for k, v in kwargs.items():
if not hasattr(self, k):
warnings.warn("Warning: opt has not attribut %s" % k)
setattr(self, k, v)
print('user config:')
for k, v in self.__class__.__dict__.items():
if not k.startswith('_'):
print(k, getattr(self, k))
#save config when training
def write_config(self,kwargs,save_root):
f = open(save_root+"/"+"config.txt","w")
for k,v in self.__class__.__dict__.items():
if not k.startswith('_'):
#print(k, getattr(self, k))
config_info = k + str(getattr(self,k))
f.write("%s"%config_info)
f.write("\n")
f.close()
opt = DefaultConfig()
|
py | 1a479c92ae0091997347c7a5a48fa6562c8502bd | ##Elias Howell | 10/24/2019 | Homework #3
#Compares two lists and returns a list of items shared by the two
def similar_items(list1, list2):
listOfItems = []
for item in list1:
if item in list2:
listOfItems.append(item)
return listOfItems
#Compares two lists and returns a list of items not shared by the two
def unique_items(list1, list2):
listOfItems = []
for item in list1:
if item not in list2:
listOfItems.append(item)
return listOfItems
#Takes the sum of all items in a list
def sum_items(myList):
summationOfItems = 0
for item in myList:
summationOfItems += item
return summationOfItems
#Takes the product of all items in a list
def multiply_items(myList):
productOfItems = 1
for item in myList:
productOfItems *= item
return productOfItems
#Finds and returns the smallest value in a list
def minimum_item(myList):
minValue = myList[0]
for item in myList:
if item < minValue:
minValue = item
return minValue
#Finds and returns the largest value in a list
def maximum_item(myList):
maxValue = myList[0]
for item in myList:
if item > maxValue:
maxValue = item
return maxValue
|
py | 1a479cb1130b621b4aa7c8eed0beaca26a0c7b98 | from datetime import datetime
from datetime import date
from typing import Optional
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm import dynamic
from flask_atomic.orm.database import db
from flask_atomic.orm.mixins.core import CoreMixin
def extract(model, fields=None, exclude: Optional[set] = None) -> dict:
resp = dict()
if exclude is None:
exclude = set()
if fields is None:
fields = model.keys()
restricted_fields = getattr(model, 'RESTRICTED_FIELDS', set())
if restricted_fields:
fields.discard(restricted_fields)
exclude = exclude.union(restricted_fields or set())
for column in set(fields).difference(set(exclude)):
if isinstance(getattr(model, column), datetime) or isinstance(getattr(model, column), date):
resp[column] = str(getattr(model, column))
else:
resp[column] = getattr(model, column)
return resp
class DeclarativeBase(db.Model, CoreMixin):
"""
Base model to be extended for use with Flask projects.
Core concept of the model is common functions to help wrap up database
interaction into a single interface. Testing can be rolled up easier this
way also. Inheriting from this class automatically sets id field and db
soft deletion field managed by active using the DYNA pattern (D, Y, N, A).
Basic usage::
from flask_atomic.sqlalchemy.declarative import DeclarativeBase
class MyNewModel(DeclarativeBase):
field_a = db.Column(db.String(256), nullable=True)
"""
__abstract__ = True
# active = db.Column(db.String(5), default='Y')
def __str__(self):
return self.whatami()
@classmethod
def identify_primary_key(cls):
return list(cls.__table__.primary_key).pop().name
@classmethod
def checkfilters(cls, filters):
resp = {}
for k, v in filters.items():
resp[cls.normalise(k)] = v
return resp
@classmethod
def getquery(cls):
return db.session.query
@classmethod
def makequery(cls, fields=None):
try:
# return db.session.query(cls, fields)
if not fields:
return cls.query
return db.session.query(cls, *fields)
except Exception as e:
logger.error(str(e))
db.session.rollback()
return db.session.query(cls, *fields)
@classmethod
def relations(cls, flag):
if flag == True:
return set(cls.__mapper__.relationships.keys())
elif isinstance(flag, list):
return set(flag)
return set()
@classmethod
def relationattrs(cls):
return set(cls.__mapper__.relationships.keys())
@classmethod
def objectcolumns(cls, include_relationships=False):
bound_columns = set(cls.__mapper__.columns)
if include_relationships:
rels = cls.__mapper__.relationships
return bound_columns.union(set([i.class_attribute for i in cls.__mapper__.relationships]))
return bound_columns
@classmethod
def keys(cls):
return set(cls.__table__.columns.keys())
@classmethod
def schema(cls, rel=True, exclude=None):
if exclude is None:
exclude = []
schema = []
for item in [key for key in cls.keys() if key not in exclude]:
schema.append(dict(name=item.replace('_', ' '), key=item))
return schema
@classmethod
def getkey(cls, field):
if isinstance(field, InstrumentedAttribute):
return getattr(cls, field.key)
return getattr(cls, field)
def relationships(self, root=''):
return list(filter(lambda r: r != root, self.__mapper__.relationships.keys()))
def columns(self, exc: Optional[list] = None) -> list:
"""
Gets a list of columns to work with, minus the excluded sublist (exc).
:param exc:
:return:
"""
if exc is None:
exc = list()
return [key for key in list(self.__table__.columns.keys()) if key not in exc]
def whatami(self) -> str:
"""
Self-describe the model.
:return: Descriptive name based on the tablename used at declaration.
"""
# I am not a number :)
return self.__tablename__
def process_relationships(self, root: str, exclude: set = None, rels=None):
resp = dict()
if rels is None or isinstance(rels, bool):
rels = self.relationships(root)
for idx, item in enumerate(rels):
# First check if it is a sub lookup
_lookup = None
if hasattr(self, '__i__' + item):
resp[item] = getattr(self, '__i__' + item)
continue
sublookup = False
if '.' in item:
sublookup = True
lookup = item.split('.')
_lookup = lookup.copy()
relationship_instance = getattr(getattr(self, lookup.pop(0), None), lookup.pop())
else:
relationship_instance = getattr(self, item, None)
if isinstance(relationship_instance, dynamic.AppenderMixin):
# TO handle dynamic relationships (lazy=dynamic)
fields = set(map(lambda x: x.key, relationship_instance._entity_zero().column_attrs)).difference(exclude)
resp[item] = []
if hasattr(self, '__i__' + item):
resp[item] = getattr(self, '__i__' + item)
else:
for index, entry in enumerate(relationship_instance.all()):
resp[item].append(extract(entry, fields))
elif isinstance(relationship_instance, list):
# if relationship_instance.uselist:
if sublookup:
parent = _lookup.pop(0)
attr = _lookup.pop()
else:
resp[item] = []
for index, entry in enumerate(relationship_instance):
fields = set(entry.keys()).difference(exclude)
if sublookup:
if not resp.get(parent, None):
resp[parent] = dict()
resp[parent].setdefault(attr, []).append(entry.extract(fields))
else:
resp[item].append(entry.extract(set(entry.keys()).difference(exclude)))
elif relationship_instance:
fields = set(relationship_instance.keys()).difference(exclude)
if _lookup:
resp[_lookup.pop(0)][_lookup.pop()] = relationship_instance.extract(fields)
else:
resp[item] = relationship_instance.extract(fields)
return resp
def extract(self, fields=None, exclude: Optional[set] = None, **kwargs) -> dict:
resp = dict()
if exclude is None:
exclude = set()
if fields is None:
fields = self.keys()
restricted_fields = getattr(self, 'RESTRICTED_FIELDS', set())
if restricted_fields and not kwargs.get('private', None):
fields.discard(restricted_fields)
exclude = exclude.union(restricted_fields or set())
for column in set(fields).difference(set(exclude)):
if isinstance(getattr(self, column), datetime) or isinstance(getattr(self, column), date):
resp[column] = str(getattr(self, column))
else:
resp[column] = getattr(self, column)
return resp
def serialize(self, fields=None, exc: Optional[set] = None, rels=False, root=None, exclude=None, functions=None,
**kwargs):
"""
This utility function dynamically converts Alchemy model classes into a
dict using introspective lookups. This saves on manually mapping each
model and all the fields. However, exclusions should be noted. Such as
passwords and protected properties.
:param functions:
:param fields: More of a whitelist of fields to include (preferred way)
:param rels: Whether or not to introspect to relationships
:param exc: Fields to exclude from query result set
:param root: Root model for processing relationships. This acts as a
recursive sentinel to prevent infinite recursion due to selecting oneself
as a related model, and then infinitely trying to traverse the roots
own relationships, from itself over and over.
:param exclude: Exclusion in set form. Currently in favour of exc param.
Only remedy to this is also to use one way relationships. Avoiding any
back referencing of models.
:return: json data structure of model
:rtype: dict
"""
if functions is None:
functions = {}
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
if not fields:
fields = set(self.fields())
if root is None:
root = self.whatami()
if exc is None:
exc = {'password'}
set(exclude).union(exc)
# Define our model properties here. Columns and Schema relationships
resp = self.extract(fields, exc, **kwargs)
if functions:
for key, value in functions.items():
resp[f'_{key}'] = value(getattr(self, key))
restricted_fields = set(fields).discard(getattr(self, 'RESTRICTED_FIELDS', set()))
if restricted_fields:
fields.discard(restricted_fields)
exclude = exclude.union(restricted_fields or set())
rels = rels or set(self.relationships()).intersection(fields)
if not rels or len(set(self.relationships())) < 1:
return resp
# for rel in rels:
# if rel in [i.split('__i__').pop() for i in self.__dict__ if '__i__' in i]:
# rels.remove(rel)
resp.update(self.process_relationships(root, rels=rels, exclude=exclude))
return resp
def __eq__(self, comparison):
if type(self) != type(comparison):
raise ValueError('Objects are not the same. Cannot compare')
base = self.columns()
base_dictionary = self.__dict__
comp_dictionary = self.__dict__
flag = True
for column_name in base:
if base_dictionary[column_name] != comp_dictionary[column_name]:
flag = False
break
return flag
|
py | 1a479d4fc1542b6ea7ba4755e6d13e5fad7a0835 | # -*- coding: utf-8 -*-
from collections import Counter
from typing import List
class Solution:
def canBeEqual(self, target: List[int], arr: List[int]) -> bool:
return Counter(target) == Counter(arr)
if __name__ == '__main__':
solution = Solution()
assert solution.canBeEqual([1, 2, 3, 4], [2, 4, 1, 3])
assert solution.canBeEqual([7], [7])
assert solution.canBeEqual([1, 12], [12, 1])
assert not solution.canBeEqual([3, 7, 9], [3, 7, 11])
assert solution.canBeEqual([1, 1, 1, 1, 1], [1, 1, 1, 1, 1])
|
py | 1a479d7f818ba870738c32d3a4b8277d18c38179 | from marshmallow import fields, Schema
class GetPhoneNumberRequestSchema(Schema):
address = fields.String(required=True, allow_none=False, validate=fields.validate.Length(min=1))
class GetPhoneNumberResponseSchema(Schema):
formatted_phone_number = fields.String()
|
py | 1a479dada083e46f1d588453f6fb22922f88b921 | """models.cipher
This module contains the ciphers that are stored in the database
"""
import json
from app import db
from models import funcs
from sqlalchemy import sql
class Cipher(db.Model):
"""
The Cipher class stores the cipher string for an individual site's info.
This also contains an enumeration of the different types of cipher
Attributes:
id (int): The id of this cipher
user_id (Foreign Key): The user associated with this cipher
folder_id (Foreign Key): The folder that contains this cipher
organization_id (str): ID of the organization this is associated with
cipher_type (int): The type of cipher
favorite (bool): If this cipher is a favorite or not
data (str): JSON serialized data contained in this cipher
fields (str): JSON serialized fields contained in this cipher
name (str): JSON serialized name of cipher
notes (str): JSON serialized note on cipher
login (str): JSON serialized login
secure_note (str): JSON serialized secure note
card (str): JSON serialized card
identity (str): JSON serialized identity
attachments (str): JSON serialized attachments
create_date (DateTime): The creation time of this cipher
update_date (DateTime): The time of the last update to this cipher
"""
# Type enumeration
TYPE_LOGIN = 1
TYPE_NOTE = 2
TYPE_CARD = 3
TYPE_IDENTITY = 4
# Member variables
id = db.Column(
db.String(64), name='id', primary_key=True,
default=funcs.generateSecureUUID
)
user_id = db.Column(
db.String(64), db.ForeignKey('user.id', ondelete='CASCADE')
)
folder_id = db.Column(
db.String(64), db.ForeignKey('folder.id', ondelete='CASCADE'),
nullable=True
)
organization_id = db.Column(db.String(64), nullable=True)
cipher_type = db.Column(db.Integer, nullable=False)
favorite = db.Column(db.Boolean(), default=False, nullable=False)
data = db.Column(db.JSON(), nullable=True)
name = db.Column(db.JSON(), nullable=True)
notes = db.Column(db.JSON(), nullable=True)
fields = db.Column(db.JSON(), nullable=True)
login = db.Column(db.JSON(), nullable=True)
secure_note = db.Column(db.JSON(), nullable=True)
card = db.Column(db.JSON(), nullable=True)
identity = db.Column(db.JSON(), nullable=True)
attachments = db.Column(db.JSON(), nullable=True)
create_date = db.Column(db.DateTime(), server_default=sql.func.now())
update_date = db.Column(
db.DateTime(), server_default=sql.func.now(), onupdate=sql.func.now()
)
# Functions
def type_str(in_type):
"""
Returns a string representation of the inputted type
Args:
:param in_type: The inputed type
Returns:
str: The string representation
"""
if(in_type is Cipher.TYPE_LOGIN):
return 'login'
elif(in_type is Cipher.TYPE_NOTE):
return 'note'
elif(in_type is Cipher.TYPE_CARD):
return 'card'
elif(in_type is Cipher.TYPE_IDENTITY):
return 'identity'
else:
return str(in_type)
def updateFromParams(self, params):
"""
This function will update a cipher based on the passed in parameters
Args:
:param self: This object
:param params: A dictionary of params
"""
self.folder_id = params['folderid']
self.organization_id = params['organizationid']
self.favorite = bool(params['favorite'])
self.type = int(params['type'])
self.name = params['name']
self.notes = params['notes']
self.fields = funcs.uppercaseFirstHash(params['fields'])
# Parse additional data based on cipher type
if(self.cipher_type is Cipher.TYPE_LOGIN):
login_data = funcs.uppercaseFirstHash(params['login'])
if(login_data['Uris'] and isinstance(login_data['Uris'], dict)):
login_data['Uris'] = funcs.uppercaseFirstHash(
login_data['Uris']
)
self.login = login_data
elif(self.cipher_type is Cipher.TYPE_NOTE):
self.secure_note = funcs.uppercaseFirstHash(params['securenote'])
elif(self.cipher_type is Cipher.TYPE_CARD):
self.card = funcs.uppercaseFirstHash(params['card'])
else:
# TODO: Implement more types
if(self.cipher_type is Cipher.TYPE_IDENTITY):
self.identity = funcs.uppercaseFirstHash(params['identity'])
def toHash(self):
"""
Returns the cipher as a hash.
Args:
:param self: The object
Returns:
dict: The hash representation of the object
"""
return {
'Id': self.id,
'Type': self.cipher_type,
'RevisionDate': self.update_date.strftime(
'%Y-%m-%dT%H:%M:%S.000000Z'
),
'FolderId': self.folder_id,
'Favorite': self.favorite,
'OrganizationId': self.organization_id,
'Attachments': self.attachments,
'OrganizationUserTotp': False,
'Object': 'cipher',
'Name': self.name,
'Notes': self.notes,
'Fields': self.fields,
'Login': self.login,
'Card': self.card,
'Identity': self.identity,
'SecureNote': self.secure_note
}
def migrateData(self):
"""
This function will migrate data from being an all in one and split it
into separate fields.
If there is no data, we will just return false. If the data is not able
to be turned into a JSON, we will raise a ValueError. If the data is
not a dict or a string, we will raise a TypeError.
Args:
:param self: The object
Raises:
TypeError: If this object's data is not a dict or string
ValueError: If this object can not become a JSON
NotImplementedError: If we try to migrate from a nonsupported type
"""
if(self.data is None):
return False
if(isinstance(self.data, str)):
try:
data = json.loads(self.data)
except(Exception):
raise ValueError
elif(isinstance(self.data, dict)):
data = self.data
else:
raise TypeError
self.name = data['Name']
del data['Name']
self.notes = data['Notes']
del data['Notes']
self.fields = data['Fields']
del data['Fields']
if(self.cipher_type is self.TYPE_LOGIN):
data['Uris'] = {
'Uri': data['Uri'],
'Match': None
}
del data['Uri']
self.login = data
elif(self.cipher_type is self.TYPE_NOTE):
self.secure_note = data
elif(self.cipher_type is self.TYPE_CARD):
self.card = data
elif(self.cipher_type is self.TYPE_IDENTITY):
self.identity = data
else:
raise NotImplementedError
|
py | 1a479e0f9cc58d2cc566300cfa7053d392934dd0 | # Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from process_coerce_usertype1 import ProcessCoerceUsertype1
class TestProcessCoerceUsertype1(unittest.TestCase):
def test_process_coerce_usertype1(self):
with ProcessCoerceUsertype1.from_file('src/process_coerce_bytes.bin') as r:
self.assertEqual(r.records[0].flag, 0)
self.assertEqual(r.records[0].buf.value, 1094795585)
self.assertEqual(r.records[1].flag, 1)
self.assertEqual(r.records[1].buf.value, 1111638594)
|
py | 1a479e35cf685eef717ef6b850a0fecc715e25f0 | """
보간 탐색 (Interpolation Search)
이진 탐색의 비효율성을 개선시킨 알고리즘이다. 이진 탐색의 경우 찾는 대상이 어디에 위치하건
일관되게 반씩 줄여가며 탐색을 진행한다. 반면 보간 탐색은 타겟이 상대적으로 앞에 위치한다고
판단을 하면 앞쪽에서 탐색을 진행한다. 따라서, 찾는 데이터와 가깝기 때문에 이진 탐색보다
속도가 뛰어나다.
"""
from __future__ import print_function
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
#보간탐색
def interpolation_search(sorted_collection, item):
"""
input값은 반드시 정렬 된 채로 주어져야 합니다.
그러지 않으면 원하지 않는 결과값이 나올 수 있습니다.
:param sorted_collection: 탐색을 진행할 정렬된 배열
:param item : 탐색을 진행할 키(key) 값
;return : 키 값이 있는 위치(index), 없을 경우 None
"""
left = 0
right = len(sorted_collection) - 1
while left <= right:
point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left])
#out of range check
if point<0 or point>=len(sorted_collection):
return None
current_item = sorted_collection[point]
if current_item == item:
return point
else:
if item < current_item:
right = point - 1
else:
left = point + 1
return None
#재귀를 이용한 보간탐색
def interpolation_search_by_recursion(sorted_collection, item, left, right):
"""
가장 처음 재귀는 left = 0, right=(len(sorted_collection)-1)을 초기값으로 줘야합니다.
:param left : 탐색 범위의 시작
:param right : 탐색 범위의 끝
"""
point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left])
#out of range check
if point<0 or point>=len(sorted_collection):
return None
if sorted_collection[point] == item:
return point
elif sorted_collection[point] > item:
return interpolation_search_by_recursion(sorted_collection, item, left, point-1)
else:
return interpolation_search_by_recursion(sorted_collection, item, point+1, right)
#입력값이 정렬이 됬는지 확인 해주는 함수
def __assert_sorted(collection):
if collection != sorted(collection):
print('error: Collection must be sorted')
raise ValueError('Collection must be sorted')
return True
if __name__ == '__main__':
import sys
user_input = raw_input('Enter numbers separated by comma:\n').strip()
collection = [int(item) for item in user_input.split(',')]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be sorted to apply interpolation search')
target_input = raw_input('Enter a single number to be found in the list:\n')
target = int(target_input)
#interpolation_search 함수 사용
result = interpolation_search(collection, target)
if result is not None:
print('{} interpolation search found at positions: {}'.format(target, result))
else:
print('Not found')
#interpolation_search_by_recursion 함수 사용
result = interpolation_search_by_recursion(collection, target, 0, len(collection)-1)
if result is not None:
print('{} interpolation search by recursion found at positions: {}'.format(target, result))
else:
print('Not found')
|
py | 1a479f9f12566afd235ebfb3014343817874d2a1 | x,y=list(map(int,input().split()))
z=abs(x-y)
if(z%2==0):
print("even")
else:
print("odd")
|
py | 1a479fbd17aca41c6c1d22c6d269d00060714466 | def calculate():
operation = input('''
---------------------------------------------------------------------------------------------
Calculadora do Killian San aka Matilha San V3.2 (em andamento)
Update de cores!!! (sem cores por enquanto :/)
Versão ainda em andamento, se ocorrer bugs, por favor, deixe um comentário no GitHub.
---------------------------------------------------------------------------------------------
Por favor, escolha uma das operações e digite o simbolo dela e depois aperte ENTER:
(+) adição | (3+) adição de 3 números | (4+) adição de 4 números
(-) subtração | (3-) subtração de 3 números | (4-) subtração de 4 números
(*) multiplicação | (3*) multiplicação de 3 números | (4*) multiplicação de 4 números
(/) divisão | (3/) divisão de 3 números | (4/) divisão de 4 números
(1) cm para polegadas | (2) polegadas para cm |
para as operações (1) e (2) digite 0 no "Primeiro Número" e no "Segundo Número".
---------------------------------------------------------------------------------------------
Para reiniciar o programa, digite denovo().
''')
number_1 = int(input('Primeiro Número: '))
number_2 = int(input('Segundo Número: '))
if operation == '3+':
number_3 = int(input('Terceiro Número: '))
if operation == '3-':
number_3 = int(input('Terceiro Número: '))
if operation == '3*':
number_3 = int(input('Terceiro Número: '))
if operation == '3/':
number_3 = int(input('Terceiro Número: '))
if operation == '4+':
number_3 = int(input('Terceiro Número :'))
number_4 = int(input('Quarto Número: '))
if operation == '4-':
number_3 = int(input('Terceiro Número :'))
number_4 = int(input('Quarto Número: '))
if operation == '4*':
number_3 = int(input('Terceiro Número :'))
number_4 = int(input('Quarto Número: '))
if operation == '4/':
number_3 = int(input('Terceiro Número :'))
number_4 = int(input('Quarto Número: '))
if operation == '1':
number_1 = int(input('Quantos Cm ? '))
if operation == '2':
number_1 = int(input('Quantas polegadas ? '))
if operation == '+':
print('{} + {} = '.format(number_1, number_2))
print(number_1 + number_2)
calculate()
elif operation == '1':
print('{} / 2.54'.format(number_1, 2.54))
print(number_1 / 2.54)
calculate()
elif operation == '2':
print('{} * 2.54'.format(number_1, 2.54))
print(number_1 * 2.54)
calculate()
elif operation == '3+':
print('{} + {} + {} = '.format(number_1, number_2, number_3))
print(number_1 + number_2 + number_3)
calculate()
elif operation == '3-':
print('{} - {} - {} = '.format(number_1, number_2, number_3))
print(number_1 - number_2 - number_3)
calculate()
elif operation == '3*':
print('{} * {} * {} = '.format(number_1, number_2, number_3))
print(number_1 * number_2 * number_3)
calculate()
elif operation == '3/':
print('{} / {} / {} = '.format(number_1, number_2, number_3))
print(number_1 / number_2 / number_3)
calculate()
elif operation == '4+':
print('{} + {} + {} + {} = '.format(number_1, number_2, number_3, number_4))
print(number_1 + number_2 + number_3 + number_4)
calculate()
elif operation == '4-':
print('{} - {} - {} - {} = '.format(number_1, number_2, number_3, number_4))
print(number_1 - number_2 - number_3 - number_4)
calculate()
elif operation == '4*':
print('{} * {} * {} * {} = '.format(number_1, number_2, number_3, number_4))
print(number_1 * number_2 * number_3 * number_4)
calculate()
elif operation == '4/':
print('{} / {} / {} / {} = '.format(number_1, number_2, number_3, number_4))
print(number_1 / number_2 / number_3 / number_4)
calculate()
elif operation == '-':
print('{} - {} = '.format(number_1, number_2))
print(number_1 - number_2)
calculate()
elif operation == '*':
print('{} * {} = '.format(number_1, number_2))
print(number_1 * number_2)
calculate()
elif operation == '/':
print('{} / {} = '.format(number_1, number_2))
print(number_1 / number_2)
calculate()
elif operation == 'adição':
print('{} + {} = '.format(number_1, number_2))
print(number_1 + number_2)
calculate()
elif operation == 'subtração':
print('{} - {} = '.format(number_1, number_2))
print(number_1 - number_2)
calculate()
elif operation == 'multiplicação':
print('{} * {} = '.format(number_1, number_2))
print(number_1 * number_2)
calculate()
elif operation == 'divisão':
print('{} / {} = '.format(number_1, number_2))
print(number_1 / number_2)
calculate()
else:
print('')
print('Você digitou uma operação inválida, reiniciando...')
calculate()
def denovo():
calc_denovo = input('''
Quer calcular denovo ?
SIM ou NAO ?
''')
if calc_denovo.upper() == 'SIM':
calculate()
elif calc_denovo.upper() == 'NAO':
print('Obrigado por baixar e usar meu primeiro software que funciona!')
quit()
elif calc_denovo.upper() == 'sim':
calculate()
elif calc_denovo.upper() == 'não':
print('Obrigado por baixar e usar meu primeiro software que funciona!')
quit()
elif calc_denovo.upper() == 'nao':
print('Obrigado por baixar e usar meu primeiro software que funciona!')
quit()
elif calc_denovo.upper() == 'Sim':
calculate()
elif calc_denovo.upper() == 'Nao':
print('Obrigado por baixar e usar meu primeiro software que funciona!')
quit()
elif calc_denovo.upper() == 'Não':
print('Obrigado por baixar e usar meu primeiro software que funciona!')
quit()
elif calc_denovo.upper() == 'n':
print('Obrigado por baixar e usar meu primeiro software que funciona!')
quit()
elif calc_denovo.upper() == 's':
calculate()
else:
denovo()
calculate()
#Versão ainda em andamento, se ocorrer bugs, por favor, deixe um comentário no GitHub
|
py | 1a47a0308c3ce23c5436d59206cc1e73b3a97783 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class AllocateReadWriteSplittingConnectionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'AllocateReadWriteSplittingConnection','rds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ConnectionStringPrefix(self):
return self.get_query_params().get('ConnectionStringPrefix')
def set_ConnectionStringPrefix(self,ConnectionStringPrefix):
self.add_query_param('ConnectionStringPrefix',ConnectionStringPrefix)
def get_DistributionType(self):
return self.get_query_params().get('DistributionType')
def set_DistributionType(self,DistributionType):
self.add_query_param('DistributionType',DistributionType)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_Weight(self):
return self.get_query_params().get('Weight')
def set_Weight(self,Weight):
self.add_query_param('Weight',Weight)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Port(self):
return self.get_query_params().get('Port')
def set_Port(self,Port):
self.add_query_param('Port',Port)
def get_NetType(self):
return self.get_query_params().get('NetType')
def set_NetType(self,NetType):
self.add_query_param('NetType',NetType)
def get_MaxDelayTime(self):
return self.get_query_params().get('MaxDelayTime')
def set_MaxDelayTime(self,MaxDelayTime):
self.add_query_param('MaxDelayTime',MaxDelayTime) |
py | 1a47a039920abdaf31fb9c834a765f1f76cbf1e1 | import _thread
import contextlib
import socketserver
import time
from http.server import BaseHTTPRequestHandler
from onlinepayments.sdk.communicator import Communicator
from onlinepayments.sdk.defaultimpl.default_authenticator import DefaultAuthenticator
from onlinepayments.sdk.defaultimpl.default_connection import DefaultConnection
from onlinepayments.sdk.endpoint_configuration import EndpointConfiguration
from onlinepayments.sdk.factory import Factory
from onlinepayments.sdk.meta_data_provider import MetaDataProvider
def create_handler(call_able):
"""Creates a handler that serves requests by calling the callable object
with this handler as argument
"""
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
call_able(self)
time.sleep(0.1) # sleep to avoid dropping the client before it can read the response
def do_POST(self):
call_able(self)
time.sleep(0.1) # sleep to avoid dropping the client before it can read the response
def do_HEAD(self):
pass
def do_DELETE(self):
call_able(self)
time.sleep(0.1) # sleep to avoid dropping the client before it can read the response
return RequestHandler
@contextlib.contextmanager
def create_server_listening(call_able):
"""Context manager that creates a thread with a server at localhost which listens for requests
and responds by calling the *call_able* function.
:param call_able: a callable function to handle incoming requests, when a request comes in
the function will be called with a SimpleHTTPRequestHandler to handle the request
:return the url where the server is listening (http://localhost:port)
"""
server = socketserver.TCPServer(('localhost', 0), create_handler(call_able), bind_and_activate=True)
try:
# frequent polling server for a faster server shutdown and faster tests
_thread.start_new(server.serve_forever, (0.1,))
yield 'http://localhost:' + str(server.server_address[1])
finally:
server.shutdown()
server.server_close()
def create_client(http_host, connect_timeout=0.500, socket_timeout=0.500,
max_connections=EndpointConfiguration.DEFAULT_MAX_CONNECTIONS):
connection = DefaultConnection(connect_timeout, socket_timeout, max_connections)
authenticator = DefaultAuthenticator("apiKey", "secret")
meta_data_provider = MetaDataProvider("OnlinePayments")
communicator = Communicator(
api_endpoint=http_host,
authenticator=authenticator,
meta_data_provider=meta_data_provider,
connection=connection)
return Factory.create_client_from_communicator(communicator)
|
py | 1a47a2a6fed40459c2943f784709560b5a73f000 | '''Count the number of occurrences of each word in the input. Not very smart;
mostly useful as example/testing.'''
# Copyright (c) Los Alamos National Security, LLC, and others.
from . import base
class Job(base.Line_Input_Job, base.Line_Output_Job):
def map(self, line):
for word in line.split():
yield (word, None)
def reduce(self, word, nones):
yield '%d %s' % (len(list(nones)) * self.params['factor'], word)
|
py | 1a47a2ad5de8ad3101c3321de8eeff680b07c733 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import MonitorManagementClientConfiguration
from .operations import GuestDiagnosticsSettingsAssociationOperations
from .operations import GuestDiagnosticsSettingsOperations
from . import models
class MonitorManagementClient(SDKClient):
"""Monitor Management Client
:ivar config: Configuration for client.
:vartype config: MonitorManagementClientConfiguration
:ivar guest_diagnostics_settings_association: GuestDiagnosticsSettingsAssociation operations
:vartype guest_diagnostics_settings_association: azure.mgmt.monitor.v2018_06_01_preview.operations.GuestDiagnosticsSettingsAssociationOperations
:ivar guest_diagnostics_settings: GuestDiagnosticsSettings operations
:vartype guest_diagnostics_settings: azure.mgmt.monitor.v2018_06_01_preview.operations.GuestDiagnosticsSettingsOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The Azure subscription Id.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = MonitorManagementClientConfiguration(credentials, subscription_id, base_url)
super(MonitorManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2018-06-01-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.guest_diagnostics_settings_association = GuestDiagnosticsSettingsAssociationOperations(
self._client, self.config, self._serialize, self._deserialize)
self.guest_diagnostics_settings = GuestDiagnosticsSettingsOperations(
self._client, self.config, self._serialize, self._deserialize)
|
py | 1a47a30038c581329281d9ed32db856960d9bfde | """
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
from ..utils.validation import _deprecate_positional_args
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
Maximum Likelihood Estimator of covariance.
precision : ndarray of shape (n_features, n_features)
The precision matrix of the covariance model to be tested.
Returns
-------
log_likelihood_ : float
Sample mean of the log-likelihood.
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
@_deprecate_positional_args
def empirical_covariance(X, *, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
Examples
--------
>>> from sklearn.covariance import empirical_covariance
>>> X = [[1,1,1],[1,1,1],[1,1,1],
... [0,0,0],[0,0,0],[0,0,0]]
>>> empirical_covariance(X)
array([[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25]])
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool, default=True
Specifies if the estimated precision is stored.
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
@_deprecate_positional_args
def __init__(self, *, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : array-like of shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like of shape (n_features, n_features)
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Ignored
Not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like of shape (n_samples, n_features)
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : Ignored
Not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like of shape (n_features, n_features)
The covariance to compare with.
norm : {"frobenius", "spectral"}, default="frobenius"
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool, default=True
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool, default=True
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
result : float
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
dist = pairwise_distances(X, self.location_[np.newaxis, :],
metric='mahalanobis', VI=precision)
return np.reshape(dist, (len(X),)) ** 2
|
py | 1a47a619909d8c68e7ff3f55a7292b76dc36728b | # encoding: utf-8
# author: BrikerMan
# contact: [email protected]
# blog: https://eliyar.biz
# file: abs_task_model.py
# time: 1:43 下午
import json
import os
import pathlib
from abc import ABC, abstractmethod
from typing import Dict, Any, TYPE_CHECKING, Union
import tensorflow as tf
import kashgari
from kashgari.embeddings import ABCEmbedding
from kashgari.logger import logger
from kashgari.processors.abc_processor import ABCProcessor
from kashgari.utils import load_data_object
from kashgari.layers import KConditionalRandomField
if TYPE_CHECKING:
from kashgari.tasks.labeling import ABCLabelingModel
from kashgari.tasks.classification import ABCClassificationModel
class ABCTaskModel(ABC):
def __init__(self) -> None:
self.tf_model: tf.keras.Model = None
self.embedding: ABCEmbedding = None
self.hyper_parameters: Dict[str, Any]
self.sequence_length: int
self.text_processor: ABCProcessor
self.label_processor: ABCProcessor
def to_dict(self) -> Dict[str, Any]:
model_json_str = self.tf_model.to_json()
return {
'tf_version': tf.__version__, # type: ignore
'kashgari_version': kashgari.__version__,
'__class_name__': self.__class__.__name__,
'__module__': self.__class__.__module__,
'config': {
'hyper_parameters': self.hyper_parameters, # type: ignore
'sequence_length': self.sequence_length # type: ignore
},
'embedding': self.embedding.to_dict(), # type: ignore
'text_processor': self.text_processor.to_dict(),
'label_processor': self.label_processor.to_dict(),
'tf_model': json.loads(model_json_str)
}
@classmethod
def default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
The default hyper parameters of the model dict, **all models must implement this function.**
You could easily change model's hyper-parameters.
For example, change the LSTM unit in BiLSTM_Model from 128 to 32.
>>> from kashgari.tasks.classification import BiLSTM_Model
>>> hyper = BiLSTM_Model.default_hyper_parameters()
>>> print(hyper)
{'layer_bi_lstm': {'units': 128, 'return_sequences': False}, 'layer_output': {}}
>>> hyper['layer_bi_lstm']['units'] = 32
>>> model = BiLSTM_Model(hyper_parameters=hyper)
Returns:
hyper params dict
"""
raise NotImplementedError
def save(self, model_path: str, encoding='utf-8') -> str:
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
model_path = os.path.abspath(model_path)
with open(os.path.join(model_path, 'model_config.json'), 'w', encoding=encoding) as f:
f.write(json.dumps(self.to_dict(), indent=2, ensure_ascii=False))
f.close()
self.embedding.embed_model.save_weights(os.path.join(model_path, 'embed_model_weights.h5'))
self.tf_model.save_weights(os.path.join(model_path, 'model_weights.h5')) # type: ignore
logger.info('model saved to {}'.format(os.path.abspath(model_path)))
return model_path
@classmethod
def load_model(cls, model_path: str, encoding='utf-8') -> Union["ABCLabelingModel", "ABCClassificationModel"]:
model_config_path = os.path.join(model_path, 'model_config.json')
model_config = json.loads(open(model_config_path, 'r', encoding=encoding).read())
model = load_data_object(model_config)
model.embedding = load_data_object(model_config['embedding'])
model.text_processor = load_data_object(model_config['text_processor'])
model.label_processor = load_data_object(model_config['label_processor'])
tf_model_str = json.dumps(model_config['tf_model'])
model.tf_model = tf.keras.models.model_from_json(tf_model_str,
custom_objects=kashgari.custom_objects)
if isinstance(model.tf_model.layers[-1], KConditionalRandomField):
model.crf_layer = model.tf_model.layers[-1]
model.tf_model.load_weights(os.path.join(model_path, 'model_weights.h5'))
model.embedding.embed_model.load_weights(os.path.join(model_path, 'embed_model_weights.h5'))
return model
@abstractmethod
def build_model(self,
x_data: Any,
y_data: Any) -> None:
raise NotImplementedError
|
py | 1a47a62920a2b304f4c57cd5d0c4b42cb01f1c46 | #!/usr/bin/env python
import networkx as nx
import subprocess as sp
import numpy as np
from eden.converter.fasta import seq_to_networkx
from eden.converter.rna import sequence_dotbracket_to_graph
from eden.util import is_iterable
def difference(seq_a, seq_b):
''' Compute the number of characters that are different between the two sequences.'''
return sum(1 if a != b else 0 for a, b in zip(seq_a, seq_b))
def difference_matrix(seqs):
''' Compute the matrix of differences between all pairs of sequences in input.'''
size = len(seqs)
diff_matrix = np.zeros((size, size))
for i in range(size):
for j in range(i + 1, size):
diff_matrix[i, j] = difference(seqs[i], seqs[j])
return diff_matrix + diff_matrix.T
def max_difference_subselection(seqs, scores=None, max_num=None):
# extract difference matrix
diff_matrix = difference_matrix(seqs)
size = len(seqs)
m = np.max(diff_matrix) + 1
# iterate size - k times, i.e. until only k instances are left
for t in range(size - max_num):
# find pairs with smallest difference
(min_i, min_j) = np.unravel_index(np.argmin(diff_matrix), diff_matrix.shape)
# choose instance with highest score
if scores[min_i] > scores[min_j]:
id = min_i
else:
id = min_j
# remove instance with highest score by setting all its pairwise differences to max value
diff_matrix[id, :] = m
diff_matrix[:, id] = m
# extract surviving elements, i.e. element that have 0 on the diagonal
return np.array([i for i, x in enumerate(np.diag(diff_matrix)) if x == 0])
def rnasubopt_wrapper(sequence, energy_range=None, max_num=None, max_num_subopts=None):
# command line
cmd = 'echo "%s" | RNAsubopt -e %d' % (sequence, energy_range)
out = sp.check_output(cmd, shell=True)
# parse output
text = out.strip().split('\n')
seq_struct_list = [line.split()[0] for line in text[1:max_num_subopts]]
energy_list = [line.split()[1] for line in text[1:max_num_subopts]]
selected_ids = max_difference_subselection(seq_struct_list, scores=energy_list, max_num=max_num)
np_seq_struct_list = np.array(seq_struct_list)
selected_seq_struct_list = list(np_seq_struct_list[selected_ids])
selected_energy_list = list(np.array(energy_list)[selected_ids])
return selected_seq_struct_list, selected_energy_list
def string_to_networkx(header, sequence, **options):
# defaults
energy_range = options.get('energy_range', 10)
max_num = options.get('max_num', 3)
max_num_subopts = options.get('max_num_subopts', 100)
split_components = options.get('split_components', False)
seq_struct_list, energy_list = rnasubopt_wrapper(sequence, energy_range=energy_range, max_num=max_num, max_num_subopts=max_num_subopts)
if split_components:
for seq_struct, energy in zip(seq_struct_list, energy_list):
graph = sequence_dotbracket_to_graph(seq_info=sequence, seq_struct=seq_struct)
graph.graph['info'] = 'RNAsubopt energy=%s max_num=%s' % (energy, max_num)
if graph.number_of_nodes() < 2:
graph = seq_to_networkx(header, sequence, **options)
graph.graph['id'] = header
graph.graph['sequence'] = sequence
graph.graph['structure'] = seq_struct
yield graph
else:
graph_global = nx.Graph()
graph_global.graph['id'] = header
graph_global.graph['info'] = 'RNAsubopt energy_range=%s max_num=%s' % (energy_range, max_num)
graph_global.graph['sequence'] = sequence
for seq_struct in seq_struct_list:
graph = sequence_dotbracket_to_graph(seq_info=sequence, seq_struct=seq_struct)
graph_global = nx.disjoint_union(graph_global, graph)
if graph_global.number_of_nodes() < 2:
graph_global = seq_to_networkx(header, sequence, **options)
yield graph_global
def rnasubopt_to_eden(iterable, **options):
assert(is_iterable(iterable)), 'Not iterable'
for header, seq in iterable:
try:
for graph in string_to_networkx(header, seq, **options):
yield graph
except Exception as e:
print e.__doc__
print e.message
print 'Error in: %s' % seq
graph = seq_to_networkx(header, seq, **options)
yield graph
|
py | 1a47a8035cb477ad5bf18498f1c617c23c80f5fb | # coding: utf-8
import pprint
import re
import six
class DeleteEdgeCloudRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'edgecloud_id': 'str'
}
attribute_map = {
'edgecloud_id': 'edgecloud_id'
}
def __init__(self, edgecloud_id=None):
"""DeleteEdgeCloudRequest - a model defined in huaweicloud sdk"""
self._edgecloud_id = None
self.discriminator = None
self.edgecloud_id = edgecloud_id
@property
def edgecloud_id(self):
"""Gets the edgecloud_id of this DeleteEdgeCloudRequest.
:return: The edgecloud_id of this DeleteEdgeCloudRequest.
:rtype: str
"""
return self._edgecloud_id
@edgecloud_id.setter
def edgecloud_id(self, edgecloud_id):
"""Sets the edgecloud_id of this DeleteEdgeCloudRequest.
:param edgecloud_id: The edgecloud_id of this DeleteEdgeCloudRequest.
:type: str
"""
self._edgecloud_id = edgecloud_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteEdgeCloudRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a47a87086927cb5cdd0e93ac5bcad4b0d143954 | #!/usr/bin/env python3
#
# Copyright (c) 2022 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Commissioning test.
import os
import sys
from optparse import OptionParser
from base import TestFail, TestTimeout, BaseTestHelper, FailIfNot, logger
from cluster_objects import NODE_ID, ClusterObjectTests
from network_commissioning import NetworkCommissioningTests
import asyncio
# The thread network dataset tlv for testing, splited into T-L-V.
TEST_THREAD_NETWORK_DATASET_TLV = "0e080000000000010000" + \
"000300000c" + \
"35060004001fffe0" + \
"0208fedcba9876543210" + \
"0708fd00000000001234" + \
"0510ffeeddccbbaa99887766554433221100" + \
"030e54657374696e674e6574776f726b" + \
"0102d252" + \
"041081cb3b2efa781cc778397497ff520fa50c0302a0ff"
# Network id, for the thread network, current a const value, will be changed to XPANID of the thread network.
TEST_THREAD_NETWORK_ID = "fedcba9876543210"
TEST_DISCRIMINATOR = 3840
ENDPOINT_ID = 0
LIGHTING_ENDPOINT_ID = 1
GROUP_ID = 0
def main():
optParser = OptionParser()
optParser.add_option(
"-t",
"--timeout",
action="store",
dest="testTimeout",
default=75,
type='int',
help="The program will return with timeout after specified seconds.",
metavar="<timeout-second>",
)
optParser.add_option(
"-a",
"--address",
action="store",
dest="deviceAddress1",
default='',
type='str',
help="Address of the first device",
)
optParser.add_option(
'--paa-trust-store-path',
dest="paaPath",
default='',
type='str',
help="Path that contains valid and trusted PAA Root Certificates."
)
optParser.add_option(
'--fail-on-report',
action="store_true",
dest="report",
default=False,
help='Use this flag to simulate a failure handling the report. Without this flag, failure is simulated on the stage'
)
(options, remainingArgs) = optParser.parse_args(sys.argv[1:])
timeoutTicker = TestTimeout(options.testTimeout)
timeoutTicker.start()
test = BaseTestHelper(nodeid=112233, testCommissioner=True,
paaTrustStorePath=options.paaPath)
FailIfNot(test.SetNetworkCommissioningParameters(dataset=TEST_THREAD_NETWORK_DATASET_TLV),
"Failed to set network commissioning parameters")
logger.info("Testing PASE connection to device")
# TODO: Start at stage 2 once handling for arming failsafe on pase is done.
if options.report:
for testFailureStage in range(3, 17):
FailIfNot(test.TestPaseOnly(ip=options.deviceAddress1,
setuppin=20202021,
nodeid=1),
"Failed to establish PASE connection with device")
FailIfNot(test.TestCommissionFailureOnReport(1, testFailureStage),
"Commissioning failure tests failed for simulated report failure on stage {}".format(testFailureStage))
else:
for testFailureStage in range(3, 17):
FailIfNot(test.TestPaseOnly(ip=options.deviceAddress1,
setuppin=20202021,
nodeid=1),
"Failed to establish PASE connection with device")
FailIfNot(test.TestCommissionFailure(1, testFailureStage),
"Commissioning failure tests failed for simulated stage failure on stage {}".format(testFailureStage))
# Ensure we can still commission for real
FailIfNot(test.TestPaseOnly(ip=options.deviceAddress1,
setuppin=20202021,
nodeid=1),
"Failed to establish PASE connection with device")
FailIfNot(test.TestCommissionFailure(1, 0), "Failed to commission device")
logger.info("Testing on off cluster")
FailIfNot(test.TestOnOffCluster(nodeid=1,
endpoint=LIGHTING_ENDPOINT_ID,
group=GROUP_ID), "Failed to test on off cluster")
timeoutTicker.stop()
logger.info("Test finished")
# TODO: Python device controller cannot be shutdown clean sometimes and will block on AsyncDNSResolverSockets shutdown.
# Call os._exit(0) to force close it.
os._exit(0)
if __name__ == "__main__":
try:
main()
except Exception as ex:
logger.exception(ex)
TestFail("Exception occurred when running tests.")
|
py | 1a47a876c373d930208083f45dc0b72fca8ba169 | from model.contact import Contact
testdata = [
Contact(firstname="firstname1", middlename="middlename1", lastname="lastname1", nickname="nickname1",
email="email1", email2="email21", email3="email3", homephone="homephone",
workphone="workphone"),
Contact(firstname="firstname2", middlename="middlename2", lastname="lastname2", nickname="nickname2",
email="email12", email2="email22", email3="email32", homephone="homephone2",
workphone="workphone2")
]
|
py | 1a47aae658a12367ed10c69a5babf85fa3d69a7d | import base64
import datetime
import json
import urllib
import flask
import requests
import src.config
redirectdownloadBP = flask.Blueprint(
"redirectdownload", __name__, url_prefix="/api/v1/redirectdownload"
)
@redirectdownloadBP.route("/<name>")
async def redirectdownloadFunction(name):
id = flask.request.args.get("id")
itag = flask.request.args.get("itag")
config = src.config.readConfig()
if config.get("kill_switch") == True:
return
if (
datetime.datetime.strptime(
config.get("token_expiry", datetime.datetime.utcnow()),
"%Y-%m-%d %H:%M:%S.%f",
)
<= datetime.datetime.utcnow()
):
config, drive = src.credentials.refreshCredentials(config)
with open("config.json", "w+") as w:
json.dump(obj=config, fp=w, sort_keys=True, indent=4)
tmp_metadata = src.metadata.jsonExtract(
src.metadata.readMetadata(config), "id", id, False
)
if tmp_metadata:
name = tmp_metadata.get("name", name)
args = "?"
for arg in flask.request.args:
args += "%s=%s&" % (
arg,
urllib.parse.quote(flask.request.args.get(arg, "").encode("utf-8")),
)
session = {"access_token": config.get("access_token")}
session["url"] = "https://www.googleapis.com/drive/v3/files/%s?alt=media" % (id)
if itag and itag != "" and config.get("transcoded") == True:
req = requests.get(
"https://drive.google.com/get_video_info?docid=%s" % (id),
headers={"Authorization": "Bearer %s" % (config.get("access_token"))},
)
parsed = urllib.parse.parse_qs(urllib.parse.unquote(req.text))
if parsed.get("status") == ["ok"]:
for stream in parsed["url"]:
if ("itag=%s" % (itag)) in stream:
url = stream
break
cookie_string = "; ".join(
[str(x) + "=" + str(y) for x, y in req.cookies.items()]
)
session["cookie"] = cookie_string
session["transcoded"] = config.get("transcoded")
session["url"] = url
sessionB64 = base64.b64encode(json.dumps(session).encode("ascii")).decode("ascii")
print(
"/api/v1/download/%s%ssession=%s&"
% (urllib.parse.quote(name.encode("utf-8")), args, sessionB64)
)
if config.get("cloudflare") and config.get("cloudflare") != "":
return flask.redirect(
config.get("cloudflare")
+ "/api/v1/download/%s%ssession=%s&" % (name, args, sessionB64),
code=302,
)
else:
return flask.redirect(
"/api/v1/download/%s%ssession=%s&"
% (urllib.parse.quote(name.encode("utf-8")), args, sessionB64),
code=302,
)
|
py | 1a47ac5b5e9b78e20114afa1841fe8546df96979 | """
This creates and poulates directories for ROMS runs on gaggle. It is
designed to work with the "BLANK" version of the .in file,
replacing things like $whatever$ with meaningful values.
"""
import os
import sys
fpth = os.path.abspath('../../')
if fpth not in sys.path:
sys.path.append(fpth)
import forcing_functions as ffun
Ldir, Lfun = ffun.intro()
#import netCDF4 as nc
#import numpy as np
from datetime import datetime, timedelta
fdt = datetime.strptime(Ldir['date_string'], '%Y.%m.%d')
fdt_yesterday = fdt - timedelta(1)
print('- dot_in.py creating files for LiveOcean for ' + Ldir['date_string'])
gtag = Ldir['gtag']
gtagex = gtag + '_' + Ldir['ex_name']
EX_NAME = Ldir['ex_name'].upper()
#### USER DEFINED VALUES ####
# which ROMS code to use
roms_name = 'LO_ROMS'
# account for differences when using biology
do_bio = False
multi_core = True # use more than one core
if Ldir['run_type'] == 'backfill':
days_to_run = 1.0
elif Ldir['run_type'] == 'forecast':
days_to_run = float(Ldir['forecast_days'])
# time step in seconds (should fit evenly into 3600 sec)
if Ldir['blow_ups'] == 0:
dtsec = 60
elif Ldir['blow_ups'] == 1:
dtsec = 50
elif Ldir['blow_ups'] == 2:
dtsec = 40
elif Ldir['blow_ups'] == 3:
dtsec = 30
elif Ldir['blow_ups'] == 4:
dtsec = 20
elif Ldir['blow_ups'] == 5:
dtsec = 10
elif Ldir['blow_ups'] == 6:
dtsec = 8
elif Ldir['blow_ups'] == 7:
dtsec = 5
else:
print('Unsupported number of blow ups: %d' % (Ldir['blow_ups']))
ndtfast = 20
restart_nrrec = '-1' # '-1' for a non-crash restart file, otherwise '1' or '2'
his_interval = 3600 # seconds to define and write to history files
rst_interval = 10 # days between writing to the restart file (e.g. 5)
# which forcings to look for
atm_dir = 'BLANK/' # which atm forcing files to use
ocn_dir = 'ocnA/' # which ocn forcing files to use
riv_dir = 'rivE/' # which riv forcing files to use
tide_dir = 'tideA/' # which tide forcing files to use
#### END USER DEFINED VALUES ####
# DERIVED VALUES
if multi_core:
if Ldir['np_num'] == 64: # for new mox nodes 2*32=64 2019_02
ntilei = '8' # number of tiles in I-direction
ntilej = '8' # number of tiles in J-direction
elif Ldir['np_num'] == 72:
ntilei = '6' # number of tiles in I-direction
ntilej = '12' # number of tiles in J-direction
elif Ldir['np_num'] == 144:
ntilei = '8' # number of tiles in I-direction
ntilej = '18' # number of tiles in J-direction
elif Ldir['np_num'] == 196:
ntilei = '14' # number of tiles in I-direction
ntilej = '14' # number of tiles in J-direction
elif Ldir['np_num'] == 392:
ntilei = '14' # number of tiles in I-direction
ntilej = '28' # number of tiles in J-direction
elif Ldir['np_num'] == 588:
ntilei = '21' # number of tiles in I-direction
ntilej = '28' # number of tiles in J-direction
else:
print('Unsupported number of processors: %d' % (Ldir['np_num']))
else:
ntilei = '1'
ntilej = '1'
# if np.mod(3600,dtsec) != 0:
# print('** WARNING: dtsec does not fit evenly into 1 hour **')
if dtsec == int(dtsec):
dt = str(dtsec) + '.0d0' # a string version of dtsec, for the .in file
else:
dt = str(dtsec) + 'd0' # a string version of dtsec, for the .in file
ninfo = int(his_interval/dtsec) # how often to write info to the log file (# of time steps)
nhis = int(his_interval/dtsec) # how often to write to the history files
ndefhis = int(nhis) # how often to create new history files
nrst = int(rst_interval*86400/dtsec)
ntimes = int(days_to_run*86400/dtsec)
# file location stuff
date_string = Ldir['date_string']
date_string_yesterday = fdt_yesterday.strftime('%Y.%m.%d')
dstart = str(int(Lfun.datetime_to_modtime(fdt) / 86400.))
f_string = 'f' + date_string
f_string_yesterday = 'f'+ date_string_yesterday
# where forcing files live (fjord, as seen from gaggle)
# NOTE: eventually this should not be hard-wired.
lo_dir = Ldir['parent'] + 'LiveOcean/'
loo_dir = Ldir['parent'] + 'LiveOcean_output/'
grid_dir = Ldir['parent'] + 'LiveOcean_data/grids/' + Ldir['gridname'] + '/'
force_dir = loo_dir + gtag + '/' + f_string + '/'
roms_dir = Ldir['parent'] + 'LiveOcean_roms/'
# determine grid size
# gfn = grid_dir + 'grid.nc'
# ds = nc.Dataset(gfn)
# h = ds['h'][:]
# nrows0, ncols0 = h.shape
# nrows = nrows0 - 2
# ncols = ncols0 - 2
#ds.close()
# hardwired because we don't have netCDF4
nrows = 385 - 2
ncols = 142 - 2
# determine number of layers
s_dict = Lfun.csv_to_dict(grid_dir + 'S_COORDINATE_INFO.csv')
nlayers = str(s_dict['N'])
if do_bio:
bio_tag = ''
else:
bio_tag = ''
# the .in file
dot_in_name = 'liveocean.in' # name of the .in file
dot_in_dir00 = Ldir['roms'] + 'output/'
Lfun.make_dir(dot_in_dir00) # make sure it exists
dot_in_dir0 = Ldir['roms'] + 'output/' + gtagex + '/'
Lfun.make_dir(dot_in_dir0) # make sure it exists
dot_in_dir = dot_in_dir0 + f_string +'/'
Lfun.make_dir(dot_in_dir, clean=True) # make sure it exists and is empty
# where to put the output files according to the .in file
out_dir0 = roms_dir + 'output/' + gtagex + '/'
out_dir = out_dir0 + f_string + '/'
if Ldir['start_type'] == 'continuation':
nrrec = '0' # '-1' for a hot restart
#ininame = 'ocean_rst.nc' # for a hot perfect restart
ininame = 'ocean_his_0025.nc' # for a hot restart
ini_fullname = out_dir0 + f_string_yesterday + '/' + ininame
elif Ldir['start_type'] == 'new':
nrrec = '0' # '0' for a history or ini file
ininame = 'ocean_ini' + bio_tag + '.nc' # could be an ini or history file
ini_fullname = force_dir + ocn_dir + ininame
# END DERIVED VALUES
## create .in ##########################
f = open('BLANK.in','r')
f2 = open(dot_in_dir + dot_in_name,'w')
in_varlist = ['base_dir','ntilei','ntilej','ntimes','dt','nrrec','ninfo',
'nhis','dstart','ndefhis','nrst','force_dir','grid_dir','roms_dir',
'atm_dir','ocn_dir','riv_dir','tide_dir','dot_in_dir',
'ini_fullname','out_dir','EX_NAME','roms_name','bio_tag',
'nrows','ncols', 'nlayers', 'ndtfast']
for line in f:
for var in in_varlist:
if '$'+var+'$' in line:
line2 = line.replace('$'+var+'$', str(eval(var)))
line = line2
else:
line2 = line
f2.write(line2)
f.close()
f2.close()
## npzd2o_Banas.in ###########
f = open('npzd2o_Banas_BLANK.in','r')
bio_dot_in_name = 'npzd2o_Banas.in'
f3 = open(dot_in_dir + bio_dot_in_name,'w')
in_varlist = ['force_dir','riv_dir','bio_tag']
for line in f:
for var in in_varlist:
if '$'+var+'$' in line:
line2 = line.replace('$'+var+'$', str(eval(var)))
line = line2
else:
line2 = line
f3.write(line2)
f.close()
f3.close()
|
py | 1a47accc9daf71210b4a8e926cfe0352242e631a | import numpy
import sympy
from sympy.diffgeom import Manifold, Patch
from pystein import geodesic, metric, coords
from pystein.utilities import tensor_pow as tpow
class TestGeodesic:
def test_numerical(self):
M = Manifold('M', dim=2)
P = Patch('origin', M)
rho, phi, a = sympy.symbols('rho phi a', nonnegative=True)
cs = coords.CoordSystem('schw', P, [rho, phi])
drho, dphi = cs.base_oneforms()
ds2 = a ** 2 * ((1 / (1 - rho ** 2)) * tpow(drho, 2) + rho ** 2 * tpow(dphi, 2))
g = metric.Metric(twoform=ds2)
init = (0.01, 0.01, 0.000001, 0.1)
ts = numpy.arange(0, 1000, 0.1)
df = geodesic.numerical_geodesic(g, init, ts)
print('yay')
def test_parallel(self):
M = Manifold('M', dim=2)
P = Patch('origin', M)
theta, phi, a = sympy.symbols('theta phi a', nonnegative=True)
cs = coords.CoordSystem('spherical', P, [theta, phi])
dtheta, dphi = cs.base_oneforms()
ds2 = a ** 2 * (tpow(dtheta, 2) + sympy.sin(theta) ** 2 * tpow(dphi, 2))
g2 = metric.Metric(twoform=ds2)
param = sympy.symbols('lambda')
curve = [
2 * sympy.pi * param,
sympy.pi / 4,
]
lhs_0 = geodesic.parallel_transport_equation(0, curve, param, g2)
print(lhs_0)
|
py | 1a47ad71e2bd0f2782e6f1ba5b7ce14f33354d5e |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'EndPortEnum' : _MetaInfoEnum('EndPortEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg',
{
'echo':'echo',
'discard':'discard',
'daytime':'daytime',
'chargen':'chargen',
'ftp-data':'ftp_data',
'ftp':'ftp',
'ssh':'ssh',
'telnet':'telnet',
'smtp':'smtp',
'time':'time',
'nicname':'nicname',
'tacacs':'tacacs',
'domain':'domain',
'gopher':'gopher',
'finger':'finger',
'www':'www',
'host-name':'host_name',
'pop2':'pop2',
'pop3':'pop3',
'sun-rpc':'sun_rpc',
'ident':'ident',
'nntp':'nntp',
'bgp':'bgp',
'irc':'irc',
'pim-auto-rp':'pim_auto_rp',
'exec':'exec_',
'login':'login',
'cmd':'cmd',
'lpd':'lpd',
'uucp':'uucp',
'klogin':'klogin',
'kshell':'kshell',
'talk':'talk',
'ldp':'ldp',
}, 'Cisco-IOS-XR-infra-objmgr-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg']),
'PortOperatorEnum' : _MetaInfoEnum('PortOperatorEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg',
{
'equal':'equal',
'not-equal':'not_equal',
'greater-than':'greater_than',
'less-than':'less_than',
}, 'Cisco-IOS-XR-infra-objmgr-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg']),
'PortEnum' : _MetaInfoEnum('PortEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg',
{
'echo':'echo',
'discard':'discard',
'daytime':'daytime',
'chargen':'chargen',
'ftp-data':'ftp_data',
'ftp':'ftp',
'ssh':'ssh',
'telnet':'telnet',
'smtp':'smtp',
'time':'time',
'nicname':'nicname',
'tacacs':'tacacs',
'domain':'domain',
'gopher':'gopher',
'finger':'finger',
'www':'www',
'host-name':'host_name',
'pop2':'pop2',
'pop3':'pop3',
'sun-rpc':'sun_rpc',
'ident':'ident',
'nntp':'nntp',
'bgp':'bgp',
'irc':'irc',
'pim-auto-rp':'pim_auto_rp',
'exec':'exec_',
'login':'login',
'cmd':'cmd',
'lpd':'lpd',
'uucp':'uucp',
'klogin':'klogin',
'kshell':'kshell',
'talk':'talk',
'ldp':'ldp',
}, 'Cisco-IOS-XR-infra-objmgr-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg']),
'StartPortEnum' : _MetaInfoEnum('StartPortEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg',
{
'echo':'echo',
'discard':'discard',
'daytime':'daytime',
'chargen':'chargen',
'ftp-data':'ftp_data',
'ftp':'ftp',
'ssh':'ssh',
'telnet':'telnet',
'smtp':'smtp',
'time':'time',
'nicname':'nicname',
'tacacs':'tacacs',
'domain':'domain',
'gopher':'gopher',
'finger':'finger',
'www':'www',
'host-name':'host_name',
'pop2':'pop2',
'pop3':'pop3',
'sun-rpc':'sun_rpc',
'ident':'ident',
'nntp':'nntp',
'bgp':'bgp',
'irc':'irc',
'pim-auto-rp':'pim_auto_rp',
'exec':'exec_',
'login':'login',
'cmd':'cmd',
'lpd':'lpd',
'uucp':'uucp',
'klogin':'klogin',
'kshell':'kshell',
'talk':'talk',
'ldp':'ldp',
}, 'Cisco-IOS-XR-infra-objmgr-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg']),
'ObjectGroup.Port.Objects.Object.Operators.Operator' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.Operators.Operator',
False,
[
_MetaInfoClassMember('operator-type', REFERENCE_ENUM_CLASS, 'PortOperatorEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'PortOperatorEnum',
[], [],
''' operation for ports
''',
'operator_type',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('port', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Port number
''',
'port',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('port', REFERENCE_ENUM_CLASS, 'PortEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'PortEnum',
[], [],
''' Port number
''',
'port',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Port number
''',
'port',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'operator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Port.Objects.Object.Operators' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.Operators',
False,
[
_MetaInfoClassMember('operator', REFERENCE_LIST, 'Operator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port.Objects.Object.Operators.Operator',
[], [],
''' op class
''',
'operator',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'operators',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Port.Objects.Object.NestedGroups.NestedGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.NestedGroups.NestedGroup',
False,
[
_MetaInfoClassMember('nested-group-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Name of a nested object group
''',
'nested_group_name',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'nested-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Port.Objects.Object.NestedGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.NestedGroups',
False,
[
_MetaInfoClassMember('nested-group', REFERENCE_LIST, 'NestedGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port.Objects.Object.NestedGroups.NestedGroup',
[], [],
''' nested object group
''',
'nested_group',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'nested-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Port.Objects.Object.PortRanges.PortRange' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.PortRanges.PortRange',
False,
[
_MetaInfoClassMember('end-port', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Port number
''',
'end_port',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('end-port', REFERENCE_ENUM_CLASS, 'EndPortEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'EndPortEnum',
[], [],
''' Port number
''',
'end_port',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('end-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Port number
''',
'end_port',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
_MetaInfoClassMember('start-port', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Port number
''',
'start_port',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('start-port', REFERENCE_ENUM_CLASS, 'StartPortEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'StartPortEnum',
[], [],
''' Port number
''',
'start_port',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('start-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Port number
''',
'start_port',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'port-range',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Port.Objects.Object.PortRanges' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.PortRanges',
False,
[
_MetaInfoClassMember('port-range', REFERENCE_LIST, 'PortRange' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port.Objects.Object.PortRanges.PortRange',
[], [],
''' Match only packets on a given port range
''',
'port_range',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'port-ranges',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Port.Objects.Object' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object',
False,
[
_MetaInfoClassMember('object-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Port object group name - maximum 64
characters
''',
'object_name',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 100)], [],
''' Up to 100 characters describing this object
''',
'description',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('nested-groups', REFERENCE_CLASS, 'NestedGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port.Objects.Object.NestedGroups',
[], [],
''' Table of nested port object groups
''',
'nested_groups',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('operators', REFERENCE_CLASS, 'Operators' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port.Objects.Object.Operators',
[], [],
''' Table of port operators
''',
'operators',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('port-ranges', REFERENCE_CLASS, 'PortRanges' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port.Objects.Object.PortRanges',
[], [],
''' Table of port range addresses
''',
'port_ranges',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'object',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Port.Objects' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects',
False,
[
_MetaInfoClassMember('object', REFERENCE_LIST, 'Object' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port.Objects.Object',
[], [],
''' Port object group
''',
'object',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'objects',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Port' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port',
False,
[
_MetaInfoClassMember('objects', REFERENCE_CLASS, 'Objects' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port.Objects',
[], [],
''' Table of port objects groups
''',
'objects',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'port',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups.NestedGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups.NestedGroup',
False,
[
_MetaInfoClassMember('nested-group-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Enter the name of a nested object group
''',
'nested_group_name',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'nested-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups',
False,
[
_MetaInfoClassMember('nested-group', REFERENCE_LIST, 'NestedGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups.NestedGroup',
[], [],
''' nested object group
''',
'nested_group',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'nested-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges.AddressRange' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges.AddressRange',
False,
[
_MetaInfoClassMember('end-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv6 address
''',
'end_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('end-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'end_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('end-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'end_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
_MetaInfoClassMember('start-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv6 address
''',
'start_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('start-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'start_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('start-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'start_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'address-range',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges',
False,
[
_MetaInfoClassMember('address-range', REFERENCE_LIST, 'AddressRange' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges.AddressRange',
[], [],
''' Range of host addresses
''',
'address_range',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'address-ranges',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.Addresses.Address' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.Addresses.Address',
False,
[
_MetaInfoClassMember('prefix', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv6 prefix x:x::x/y
''',
'prefix',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('prefix', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv6 prefix x:x::x/y
''',
'prefix',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('prefix', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 prefix x:x::x/y
''',
'prefix',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '128')], [],
''' Prefix of the IP Address
''',
'prefix_length',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'address',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.Addresses' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.Addresses',
False,
[
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object.Addresses.Address',
[], [],
''' IPv6 address
''',
'address',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'addresses',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.Hosts.Host' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.Hosts.Host',
False,
[
_MetaInfoClassMember('host-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' host ipv6 address
''',
'host_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('host-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' host ipv6 address
''',
'host_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('host-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' host ipv6 address
''',
'host_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'host',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.Hosts' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.Hosts',
False,
[
_MetaInfoClassMember('host', REFERENCE_LIST, 'Host' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object.Hosts.Host',
[], [],
''' A single host address
''',
'host',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'hosts',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object',
False,
[
_MetaInfoClassMember('object-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' IPv6 object group name - maximum 64
characters
''',
'object_name',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('address-ranges', REFERENCE_CLASS, 'AddressRanges' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges',
[], [],
''' Table of ipv6 address ranges
''',
'address_ranges',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('addresses', REFERENCE_CLASS, 'Addresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object.Addresses',
[], [],
''' Table of ipv6 addresses
''',
'addresses',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 100)], [],
''' Up to 100 characters describing this object
''',
'description',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('hosts', REFERENCE_CLASS, 'Hosts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object.Hosts',
[], [],
''' Table of ipv6 host addresses
''',
'hosts',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('nested-groups', REFERENCE_CLASS, 'NestedGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups',
[], [],
''' Table of nested ipv6 object groups
''',
'nested_groups',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'object',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6.Objects' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects',
False,
[
_MetaInfoClassMember('object', REFERENCE_LIST, 'Object' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects.Object',
[], [],
''' IPv6 object group
''',
'object',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'objects',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv6' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6',
False,
[
_MetaInfoClassMember('objects', REFERENCE_CLASS, 'Objects' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6.Objects',
[], [],
''' Table of ipv6 object groups
''',
'objects',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups.NestedGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups.NestedGroup',
False,
[
_MetaInfoClassMember('nested-group-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Nested object group
''',
'nested_group_name',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'nested-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups',
False,
[
_MetaInfoClassMember('nested-group', REFERENCE_LIST, 'NestedGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups.NestedGroup',
[], [],
''' Nested object group
''',
'nested_group',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'nested-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges.AddressRange' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges.AddressRange',
False,
[
_MetaInfoClassMember('end-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv4 address
''',
'end_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('end-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'end_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('end-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'end_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
_MetaInfoClassMember('start-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv4 address
''',
'start_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('start-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'start_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('start-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'start_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'address-range',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges',
False,
[
_MetaInfoClassMember('address-range', REFERENCE_LIST, 'AddressRange' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges.AddressRange',
[], [],
''' Range of host addresses
''',
'address_range',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'address-ranges',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.Addresses.Address' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.Addresses.Address',
False,
[
_MetaInfoClassMember('prefix', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IPv4 address/prefix
''',
'prefix',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('prefix', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address/prefix
''',
'prefix',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('prefix', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address/prefix
''',
'prefix',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '32')], [],
''' Prefix of the IP Address
''',
'prefix_length',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'address',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.Addresses' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.Addresses',
False,
[
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object.Addresses.Address',
[], [],
''' IPv4 address
''',
'address',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'addresses',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.Hosts.Host' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.Hosts.Host',
False,
[
_MetaInfoClassMember('host-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Host ipv4 address
''',
'host_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True, [
_MetaInfoClassMember('host-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Host ipv4 address
''',
'host_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('host-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Host ipv4 address
''',
'host_address',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
]),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'host',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.Hosts' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.Hosts',
False,
[
_MetaInfoClassMember('host', REFERENCE_LIST, 'Host' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object.Hosts.Host',
[], [],
''' A single host address
''',
'host',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'hosts',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object',
False,
[
_MetaInfoClassMember('object-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' IPv4 object group name - maximum 64
characters
''',
'object_name',
'Cisco-IOS-XR-infra-objmgr-cfg', True),
_MetaInfoClassMember('address-ranges', REFERENCE_CLASS, 'AddressRanges' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges',
[], [],
''' Table of ipv4 host address ranges
''',
'address_ranges',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('addresses', REFERENCE_CLASS, 'Addresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object.Addresses',
[], [],
''' Table of addresses
''',
'addresses',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 100)], [],
''' Up to 100 characters describing this object
''',
'description',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('hosts', REFERENCE_CLASS, 'Hosts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object.Hosts',
[], [],
''' Table of host addresses
''',
'hosts',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('nested-groups', REFERENCE_CLASS, 'NestedGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups',
[], [],
''' Table of nested ipv4 object groups
''',
'nested_groups',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'object',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4.Objects' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects',
False,
[
_MetaInfoClassMember('object', REFERENCE_LIST, 'Object' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects.Object',
[], [],
''' IPv4 object group
''',
'object',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'objects',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network.Ipv4' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4',
False,
[
_MetaInfoClassMember('objects', REFERENCE_CLASS, 'Objects' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4.Objects',
[], [],
''' Table of ipv4 object groups
''',
'objects',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'ipv4',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup.Network' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network',
False,
[
_MetaInfoClassMember('ipv4', REFERENCE_CLASS, 'Ipv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv4',
[], [],
''' IPv4 object group
''',
'ipv4',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('ipv6', REFERENCE_CLASS, 'Ipv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network.Ipv6',
[], [],
''' IPv6 object group
''',
'ipv6',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'network',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
'ObjectGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup',
False,
[
_MetaInfoClassMember('network', REFERENCE_CLASS, 'Network' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Network',
[], [],
''' Network object group
''',
'network',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
_MetaInfoClassMember('port', REFERENCE_CLASS, 'Port' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg', 'ObjectGroup.Port',
[], [],
''' Port object group
''',
'port',
'Cisco-IOS-XR-infra-objmgr-cfg', False),
],
'Cisco-IOS-XR-infra-objmgr-cfg',
'object-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg'
),
},
}
_meta_table['ObjectGroup.Port.Objects.Object.Operators.Operator']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object.Operators']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.NestedGroups.NestedGroup']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object.NestedGroups']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.PortRanges.PortRange']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object.PortRanges']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.Operators']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.NestedGroups']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.PortRanges']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects']['meta_info']
_meta_table['ObjectGroup.Port.Objects']['meta_info'].parent =_meta_table['ObjectGroup.Port']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups.NestedGroup']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges.AddressRange']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Addresses.Address']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Addresses']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Hosts.Host']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Hosts']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Addresses']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Hosts']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups.NestedGroup']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges.AddressRange']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Addresses.Address']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Addresses']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Hosts.Host']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Hosts']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Addresses']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Hosts']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6']['meta_info'].parent =_meta_table['ObjectGroup.Network']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4']['meta_info'].parent =_meta_table['ObjectGroup.Network']['meta_info']
_meta_table['ObjectGroup.Port']['meta_info'].parent =_meta_table['ObjectGroup']['meta_info']
_meta_table['ObjectGroup.Network']['meta_info'].parent =_meta_table['ObjectGroup']['meta_info']
|
py | 1a47ae1157f678e09ce3c11dac73a03cbc8ffac0 | r"""
`\ZZ`-Filtered Vector Spaces
This module implements filtered vector spaces, that is, a descending
sequence of vector spaces
.. math::
\cdots \supset F_d \supset F_{d+1} \supset F_{d+2} \supset \cdots
with degrees `d\in \ZZ`. It is not required that `F_d` is the entire
ambient space for `d\ll 0` (see
:meth:`~FilteredVectorSpace_class.is_exhaustive`) nor that `F_d=0` for
`d\gg 0` (see :meth:`~FilteredVectorSpace_class.is_separating`). To
construct a filtered vector space, use the :func:`FilteredVectorSpace`
command. It supports easy creation of simple filtrations, for example
the trivial one::
sage: FilteredVectorSpace(2, base_ring=RDF)
RDF^2
The next-simplest filtration has a single non-trivial inclusion
between `V_d` and `V_{d+1}`::
sage: d = 1
sage: V = FilteredVectorSpace(2, d); V
QQ^2 >= 0
sage: [V.get_degree(i).dimension() for i in range(0,4)]
[2, 2, 0, 0]
To construct general filtrations, you need tell Sage about generating
vectors for the nested subspaces. For example, a dictionary whose keys
are the degrees and values are a list of generators::
sage: r1 = (1, 0, 5)
sage: r2 = (0, 1, 2)
sage: r3 = (1, 2, 1)
sage: V = FilteredVectorSpace({0:[r1, r2, r3], 1:[r1, r2], 3:[r1]}); V
QQ^3 >= QQ^2 >= QQ^1 >= QQ^1 >= 0
For degrees `d` that are not specified, the associated vector subspace
is the same as the next-lower degree, that is, `V_d \simeq
V_{d-1}`. In the above example, this means that
* `V_d \simeq \QQ^3` for `d<0`
* `V_0 = \mathop{span}(r_1, r_2) \simeq \QQ^2`
* `V_1 = V_2 = \mathop{span}(r_3) \simeq \QQ`
* `V_d = 0` for `d \geq 3`
That is::
sage: V.get_degree(0) == V
True
sage: V.get_degree(1) == V.span([r1, r2])
True
sage: V.get_degree(2) == V.get_degree(3) == V.span([r1])
True
sage: V.get_degree(4) == V.get_degree(5) == V.span([])
True
If you have many generators you can just pass the generators once and
then refer to them by index::
sage: FilteredVectorSpace([r1, r2, r3], {0:[0,1,2], 1:[1,2], 3:[1]})
QQ^3 >= QQ^2 >= QQ^1 >= QQ^1 >= 0
Note that generators for the degree-`d` subspace of the filtration are
automatically generators for all lower degrees. For example, here we
do not have to specify the ray `r_2` separately in degree 1::
sage: FilteredVectorSpace([r1, r2, r3], {0:[0 ], 1:[1]})
QQ^2 >= QQ^1 >= 0 in QQ^3
sage: FilteredVectorSpace([r1, r2, r3], {0:[0, 1], 1:[1]})
QQ^2 >= QQ^1 >= 0 in QQ^3
The degree can be infinite (plus infinity), this allows construction
of filtered vector spaces that are not eventually zero in high
degree::
sage: FilteredVectorSpace([r1, r2, r3], {0:[0,1], oo:[1]})
QQ^2 >= QQ^1 in QQ^3
Any field can be used as the vector space base. For example a finite
field::
sage: F.<a> = GF(5^3)
sage: r1 = (a, 0, F(5)); r1
(a, 0, 0)
sage: FilteredVectorSpace([r1, r2, r3], {0:[0,1], oo:[1]}, base_ring=F)
GF(125)^2 >= GF(125)^1 in GF(125)^3
Or the algebraic field::
sage: r1 = (1, 0, 1+QQbar(I)); r1
(1, 0, I + 1)
sage: FilteredVectorSpace([r1, r2, r3], {0:[0,1], oo:[1]}, base_ring=QQbar)
Vector space of dimension 2 over Algebraic Field
>= Vector space of dimension 1 over Algebraic Field
in Vector space of dimension 3 over Algebraic Field
"""
#*****************************************************************************
# Copyright (C) 2013 Volker Braun <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.all import QQ, ZZ, RDF, RR, Integer
from sage.rings.infinity import InfinityRing, infinity, minus_infinity
from sage.categories.fields import Fields
from sage.modules.free_module import FreeModule_ambient_field, VectorSpace
from sage.matrix.constructor import vector, matrix
from sage.misc.all import uniq, cached_method
def is_FilteredVectorSpace(X):
"""
Test whether ``X`` is a filtered vector space.
This function is for library use only.
INPUT:
- ``X`` -- anything.
OUTPUT:
Boolean.
EXAMPLES::
sage: from sage.modules.filtered_vector_space import is_FilteredVectorSpace
sage: V = FilteredVectorSpace(2, 1)
sage: is_FilteredVectorSpace(V)
True
sage: is_FilteredVectorSpace('ceci n\'est pas une pipe')
False
"""
return isinstance(X, FilteredVectorSpace_class)
def FilteredVectorSpace(arg1, arg2=None, base_ring=QQ, check=True):
"""
Construct a filtered vector space.
INPUT:
This function accepts various input that determines the vector space and filtration.
- Just the dimensionFilteredVectorSpace(dimension): Return the trivial filtration
(where all vector spaces are isomorphic).
- Dimension and maximal degree, see
:func:`constructor_from_dim_degree` for arguments. Construct a
filtration with only one non-trivial step `V\supset 0` at the
given cutoff degree.
- A dictionary containing the degrees as keys and a list of vector
space generators as values, see
:func:`FilteredVectorSpace_from_generators`
- Generators and a dictionary containing the degrees as keys and
the indices of vector space generators as values, see
:func:`FilteredVectorSpace_from_generators_indices`
In addition, the following keyword arguments are supported:
- ``base_ring`` -- a field (optional, default `\QQ`). The base
field of the vector space. Must be a field.
EXAMPLES:
Just the dimension for the trivial filtration::
sage: FilteredVectorSpace(2)
QQ^2
Dimension and degree::
sage: FilteredVectorSpace(2, 1)
QQ^2 >= 0
Dictionary of generators::
sage: FilteredVectorSpace({1:[(1,0), (0,1)], 3:[(1,0)]})
QQ^2 >= QQ^1 >= QQ^1 >= 0
Generators and a dictionary referring to them by index::
sage: FilteredVectorSpace([(1,0), (0,1)], {1:[0,1], 3:[0]})
QQ^2 >= QQ^1 >= QQ^1 >= 0
"""
if base_ring not in Fields():
raise ValueError('the base_ring argument must be a field')
if arg1 in ZZ:
return construct_from_dim_degree(arg1, arg2, base_ring, check)
elif arg2 is None:
return construct_from_generators(arg1, base_ring, check)
else:
return construct_from_generators_indices(arg1, arg2, base_ring, check)
def normalize_degree(deg):
"""
Normalized the degree
- ``deg`` -- something that defines the degree (either integer or
infinity).
OUTPUT:
Plus/minus infinity or a Sage integer.
EXAMPLES::
sage: from sage.modules.filtered_vector_space import normalize_degree
sage: type(normalize_degree(int(1)))
<type 'sage.rings.integer.Integer'>
sage: normalize_degree(oo)
+Infinity
"""
try:
return ZZ(deg)
except TypeError:
pass
deg = InfinityRing(deg)
if deg == infinity:
return infinity
if deg == minus_infinity:
return minus_infinity
raise ValueError('not integer or infinity')
def construct_from_dim_degree(dim, max_degree, base_ring, check):
"""
Construct a filtered vector space.
INPUT:
- ``dim`` -- integer. The dimension.
- ``max_degree`` -- integer or infinity. The maximal degree where
the vector subspace of the filtration is still the entire space.
EXAMPLES::
sage: V = FilteredVectorSpace(2, 5); V
QQ^2 >= 0
sage: V.get_degree(5)
Vector space of degree 2 and dimension 2 over Rational Field
Basis matrix:
[1 0]
[0 1]
sage: V.get_degree(6)
Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
sage: FilteredVectorSpace(2, oo)
QQ^2
sage: FilteredVectorSpace(2, -oo)
0 in QQ^2
TESTS::
sage: from sage.modules.filtered_vector_space import construct_from_dim_degree
sage: V = construct_from_dim_degree(2, 5, QQ, True); V
QQ^2 >= 0
"""
if dim not in ZZ:
raise ValueError('dimension must be an integer')
dim = ZZ(dim)
from sage.matrix.constructor import identity_matrix
generators = identity_matrix(base_ring, dim).columns()
filtration = dict()
if max_degree is None:
max_degree = infinity
filtration[normalize_degree(max_degree)] = range(dim)
return construct_from_generators_indices(generators, filtration, base_ring, check)
def construct_from_generators(filtration, base_ring, check):
"""
Construct a filtered vector space.
INPUT:
- ``filtration`` -- a dictionary of filtration steps. Each
filtration step is a pair consisting of an integer degree and a
list/tuple/iterable of vector space generators. The integer
``degree`` stipulates that all filtration steps of degree higher
or equal than ``degree`` (up to the next filtration step) are
said subspace.
EXAMPLES::
sage: from sage.modules.filtered_vector_space import construct_from_generators
sage: r = [1, 2]
sage: construct_from_generators({1:[r]}, QQ, True)
QQ^1 >= 0 in QQ^2
"""
def normalize_gen(v):
return tuple(map(base_ring, v))
# convert generator notation to generator+indices
if len(filtration) == 0:
raise ValueError('you need to specify at least one ray to deduce the dimension')
generators = []
for gens in filtration.values():
generators += map(normalize_gen, gens)
generators = tuple(uniq(generators))
# normalize filtration data
normalized = dict()
for deg, gens_deg in filtration.iteritems():
indices = [generators.index(normalize_gen(v)) for v in gens_deg]
normalized[deg] = tuple(indices)
return construct_from_generators_indices(generators, normalized, base_ring, check)
def construct_from_generators_indices(generators, filtration, base_ring, check):
"""
Construct a filtered vector space.
INPUT:
- ``generators`` -- a list/tuple/iterable of vectors, or something
convertible to them. The generators spanning various
subspaces.
- ``filtration`` -- a list or iterable of filtration steps. Each
filtration step is a pair ``(degree, ray_indices)``. The
``ray_indices`` are a list or iterable of ray indices, which
span a subspace of the vector space. The integer ``degree``
stipulates that all filtration steps of degree higher or equal
than ``degree`` (up to the next filtration step) are said
subspace.
EXAMPLES::
sage: from sage.modules.filtered_vector_space import construct_from_generators_indices
sage: gens = [(1,0), (0,1), (-1,-1)]
sage: V = construct_from_generators_indices(gens, {1:[0,1], 3:[1]}, QQ, True); V
QQ^2 >= QQ^1 >= QQ^1 >= 0
TESTS::
sage: gens = [(int(1),int(0)), (0,1), (-1,-1)]
sage: construct_from_generators_indices(iter(gens), {int(0):[0, int(1)], 2:[2]}, QQ, True)
QQ^2 >= QQ^1 >= QQ^1 >= 0
"""
# normalize generators
generators = map(list, generators)
# deduce dimension
if len(generators) == 0:
dim = ZZ(0)
else:
dim = ZZ(len(generators[0]))
ambient = VectorSpace(base_ring, dim)
# complete generators to a generating set
if matrix(base_ring, generators).rank() < dim:
complement = ambient.span(generators).complement()
generators = generators + list(complement.gens())
# normalize generators II
generators = tuple(ambient(v) for v in generators)
for v in generators:
v.set_immutable()
# normalize filtration data
normalized = dict()
for deg, gens in filtration.iteritems():
deg = normalize_degree(deg)
gens = map(ZZ, gens)
if any(i < 0 or i >= len(generators) for i in gens):
raise ValueError('generator index out of bounds')
normalized[deg] = tuple(sorted(gens))
try:
del normalized[minus_infinity]
except KeyError:
pass
filtration = normalized
return FilteredVectorSpace_class(base_ring, dim, generators, filtration, check=check)
class FilteredVectorSpace_class(FreeModule_ambient_field):
def __init__(self, base_ring, dim, generators, filtration, check=True):
r"""
A descending filtration of a vector space
INPUT:
- ``base_ring`` -- a field. The base field of the ambient vector space.
- ``dim`` -- integer. The dimension of the ambient vector space.
- ``generators`` -- tuple of generators for the ambient vector
space. These will be used to span the subspaces of the
filtration.
- ``filtration`` -- a dictionary of filtration steps in ray
index notation. See
:func:`construct_from_generators_indices` for details.
- ``check`` -- boolean (optional; default: ``True``). Whether
to perform consistency checks.
TESTS::
sage: from sage.modules.filtered_vector_space import FilteredVectorSpace_class
sage: gens = [(1,0,0), (1,1,0), (1,2,0), (-1,-1, 0), (0,0,1)]
sage: FilteredVectorSpace_class(QQ, 3, gens, {2:(0,1), oo:(4,)})
QQ^3 >= QQ^1
sage: FilteredVectorSpace_class(QQ, 3, gens, {2:(0,1), 3:(4,)})
QQ^3 >= QQ^1 >= 0
The trivial filtration::
sage: FilteredVectorSpace_class(QQ, 3, gens, {}, QQ)
0 in QQ^3
The empty vector space::
sage: FilteredVectorSpace_class(QQ, 0, [], {})
0
Higher-degree generators are automatically generators in lower degrees::
sage: FilteredVectorSpace_class(QQ, 3, gens, {2:(4,), 3:(1,)})
QQ^2 >= QQ^1 >= 0 in QQ^3
"""
if check:
assert isinstance(dim, Integer)
assert base_ring in Fields()
super(FilteredVectorSpace_class, self).__init__(base_ring, dim)
if check:
assert matrix(generators).rank() == self.dimension()
assert isinstance(filtration, dict)
for degree, indices in filtration.iteritems():
assert isinstance(degree, Integer) or degree == infinity
assert isinstance(indices, tuple)
assert all(isinstance(r, Integer) for r in indices)
# Construct subspaces from the generators and store in self._filt
def make_subspace(indices):
return self.span([generators[i] for i in indices])
indices = set(filtration.pop(infinity, []))
V = make_subspace(indices)
filtered_subspaces = [(infinity, V)]
for deg in reversed(sorted(filtration.keys())):
next_V = V
indices.update(filtration[deg])
V = make_subspace(indices)
if V == next_V: # skip trivial filtrations
continue
filtered_subspaces.append((deg, V))
filtered_subspaces.append((minus_infinity, V))
filtered_subspaces.reverse()
self._filt = tuple(filtered_subspaces)
assert self._filt[0][0] is minus_infinity
def change_ring(self, base_ring):
"""
Return the same filtration over a different base ring.
INPUT:
- ``base_ring`` -- a ring. The new base ring.
OUTPUT:
This method returns a new filtered vector space whose
subspaces are defined by the same generators but over a
different base ring.
EXAMPLES::
sage: V = FilteredVectorSpace(1, 0); V
QQ^1 >= 0
sage: V.change_ring(RDF)
RDF^1 >= 0
"""
generators, filtration = self.presentation()
return FilteredVectorSpace(generators, filtration, base_ring=base_ring)
def ambient_vector_space(self):
"""
Return the ambient (unfiltered) vector space.
OUTPUT:
A vector space.
EXAMPLES::
sage: V = FilteredVectorSpace(1, 0)
sage: V.ambient_vector_space()
Vector space of dimension 1 over Rational Field
"""
return VectorSpace(self.base_ring(), self.dimension())
@cached_method
def is_constant(self):
"""
Return whether the filtration is constant.
OUTPUT:
Boolean. Whether the filtered vector spaces are identical in
all degrees.
EXAMPLES::
sage: V = FilteredVectorSpace(2); V
QQ^2
sage: V.is_constant()
True
sage: V = FilteredVectorSpace(1, 0); V
QQ^1 >= 0
sage: V.is_constant()
False
sage: V = FilteredVectorSpace({0:[(1,)]}); V
QQ^1 >= 0
sage: V.is_constant()
False
"""
f = self._filt
return (len(f) == 1) or (len(f) == 2 and f[1][0] == infinity)
def is_exhaustive(self):
"""
Return whether the filtration is exhaustive.
A filtration $\{F_d\}$ in an ambient vector space $V$ is
exhaustive if $\cup F_d = V$. See also :meth:`is_separating`.
OUTPUT:
Boolean.
EXAMPLES::
sage: F = FilteredVectorSpace({0:[(1,1)]}); F
QQ^1 >= 0 in QQ^2
sage: F.is_exhaustive()
False
sage: G = FilteredVectorSpace(2, 0); G
QQ^2 >= 0
sage: G.is_exhaustive()
True
"""
return self.get_degree(minus_infinity).dimension() == \
self.ambient_vector_space().dimension()
def is_separating(self):
"""
Return whether the filtration is separating.
A filtration $\{F_d\}$ in an ambient vector space $V$ is
exhaustive if $\cap F_d = 0$. See also :meth:`is_exhaustive`.
OUTPUT:
Boolean.
EXAMPLES::
sage: F = FilteredVectorSpace({0:[(1,1)]}); F
QQ^1 >= 0 in QQ^2
sage: F.is_separating()
True
sage: G = FilteredVectorSpace({0:[(1,1,0)], oo:[(0,0,1)]}); G
QQ^2 >= QQ^1 in QQ^3
sage: G.is_separating()
False
"""
return self.get_degree(infinity).dimension() == 0
@cached_method
def support(self):
"""
Return the degrees in which there are non-trivial generators.
OUTPUT:
A tuple of integers (and plus infinity) in ascending
order. The last entry is plus infinity if and only if the
filtration is not separating (see :meth:`is_separating`).
EXAMPLES::
sage: G = FilteredVectorSpace({0:[(1,1,0)], 3:[(0,1,0)]}); G
QQ^2 >= QQ^1 >= QQ^1 >= QQ^1 >= 0 in QQ^3
sage: G.support()
(0, 3)
sage: G = FilteredVectorSpace({0:[(1,1,0)], 3:[(0,1,0)], oo:[(0,0,1)]}); G
QQ^3 >= QQ^2 >= QQ^2 >= QQ^2 >= QQ^1
sage: G.support()
(0, 3, +Infinity)
"""
if self.is_separating():
filt = self._filt[1:-1]
else:
filt = self._filt[1:]
return tuple(f[0] for f in filt)
@cached_method
def min_degree(self):
r"""
Return the lowest degree of the filtration.
OUTPUT:
Integer or plus infinity. The largest degree `d` of the
(descending) filtration such that the filtered vector space
`F_d` is still equal to `F_{-\infty}`.
EXAMPLES::
sage: FilteredVectorSpace(1, 3).min_degree()
3
sage: FilteredVectorSpace(2).min_degree()
+Infinity
"""
if self.is_constant():
return infinity
return self._filt[1][0]
@cached_method
def max_degree(self):
r"""
Return the highest degree of the filtration.
OUTPUT:
Integer or minus infinity. The smallest degree of the
filtration such that the filtration is constant to the right.
EXAMPLES::
sage: FilteredVectorSpace(1, 3).max_degree()
4
sage: FilteredVectorSpace({0:[[1]]}).max_degree()
1
sage: FilteredVectorSpace(3).max_degree()
-Infinity
"""
f = self._filt
if len(f) == 1:
return minus_infinity
d = f[-1][0]
if d == infinity:
if len(f) == 1:
return minus_infinity
else:
return f[-2][0] + 1
else:
return d + 1
def get_degree(self, d):
r"""
Return the degree-``d`` entry of the filtration.
INPUT:
- ``d`` -- Integer. The desired degree of the filtration.
OUTPUT:
The degree-``d`` vector space in the filtration as subspace of
the ambient space.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2), (-1,-1)]
sage: F = FilteredVectorSpace(rays, {3:[1], 1:[1,2]})
sage: F.get_degree(2)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 1]
sage: F.get_degree(oo)
Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
sage: F.get_degree(-oo)
Vector space of degree 2 and dimension 2 over Rational Field
Basis matrix:
[1 0]
[0 1]
"""
d = normalize_degree(d)
for deg, Vdeg in self._filt:
if d <= deg:
return Vdeg
assert False # unreachable
def graded(self, d):
r"""
Return the associated graded vectorspace.
INPUT:
- ``d`` -- integer. The degree.
OUTPUT:
The quotient `G_d = F_d / F_{d+1}`.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2)]
sage: F = FilteredVectorSpace(rays, {3:[1], 1:[1,2]})
sage: F.graded(1)
Vector space quotient V/W of dimension 1 over Rational Field where
V: Vector space of degree 2 and dimension 2 over Rational Field
Basis matrix:
[1 0]
[0 1]
W: Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 1]
"""
return self.get_degree(d).quotient(self.get_degree(d+1))
def presentation(self):
"""
Return a presentation in term of generators of various degrees.
OUTPUT:
A pair consisting of generators and a filtration suitable as
input to :func:`~construct_from_generators_indices`.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2), (-1,-1)]
sage: F = FilteredVectorSpace(rays, {0:[1, 2], 2:[3]}); F
QQ^2 >= QQ^1 >= QQ^1 >= 0
sage: F.presentation()
(((0, 1), (1, 0), (1, 1)), {0: (1, 0), 2: (2,), +Infinity: ()})
"""
# this could be done more efficiently with (potentially) less generators
generators = set()
filt = self._filt[1:]
for d, V in filt:
generators.update(V.echelonized_basis())
generators = tuple(generators)
filtration = dict()
for d, V in filt:
indices = [ZZ(generators.index(v)) for v in V.echelonized_basis()]
filtration[d] = tuple(indices)
return generators, filtration
def _repr_field_name(self):
"""
Return an abbreviated field name as string
RAISES:
``NotImplementedError``: The field does not have an
abbreviated name defined.
EXAMPLES::
sage: FilteredVectorSpace(2, base_ring=QQ)._repr_field_name()
'QQ'
sage: F.<a> = GF(9)
sage: FilteredVectorSpace(2, base_ring=F)._repr_field_name()
'GF(9)'
sage: FilteredVectorSpace(2, base_ring=AA)._repr_field_name()
Traceback (most recent call last):
...
NotImplementedError
"""
if self.base_ring() == QQ:
return 'QQ'
elif self.base_ring() == RDF:
return 'RDF'
elif self.base_ring() == RR:
return 'RR'
from sage.categories.finite_fields import FiniteFields
if self.base_ring() in FiniteFields():
return 'GF({0})'.format(len(self.base_ring()))
else:
raise NotImplementedError()
def _repr_vector_space(self, dim):
"""
Return a string representation of the vector space of given dimension
INPUT:
- ``dim`` -- integer.
OUTPUT:
String representation of the vector space of dimension ``dim``.
EXAMPLES::
sage: F = FilteredVectorSpace(3, base_ring=RDF)
sage: F._repr_vector_space(1234)
'RDF^1234'
sage: F3 = FilteredVectorSpace(3, base_ring=GF(3))
sage: F3._repr_vector_space(1234)
'GF(3)^1234'
sage: F3 = FilteredVectorSpace(3, base_ring=AA)
sage: F3._repr_vector_space(1234)
'Vector space of dimension 1234 over Algebraic Real Field'
"""
if dim == 0:
return '0'
try:
return self._repr_field_name() + '^' + str(dim)
except NotImplementedError:
return repr(VectorSpace(self.base_ring(), dim))
def _repr_degrees(self, min_deg, max_deg):
"""
Return a string representation
This method is like :meth:`_repr_` except that the user can
select the range of degrees to be shown in the output.
INPUT:
- ``min_deg``, ``max_deg`` -- two integers.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2), (-1,-1)]
sage: F = FilteredVectorSpace(rays, {0:[1, 2], 2:[3]})
sage: F._repr_degrees(-2, 4)
['QQ^2', 'QQ^2', 'QQ^2', 'QQ^1', 'QQ^1', '0', '0', '0']
"""
degrees = range(min_deg, max_deg+1)
dims = []
for i in degrees + [infinity]:
d = self.get_degree(i).dimension()
dims.append(self._repr_vector_space(d))
return dims
def _repr_(self):
r"""
Return as string representation of ``self``.
OUTPUT:
A string.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2), (-1,-1)]
sage: FilteredVectorSpace(rays, {0:[1, 2], 2:[3]})._repr_()
'QQ^2 >= QQ^1 >= QQ^1 >= 0'
sage: FilteredVectorSpace(rays, {0:[1, 2], oo:[3]})
QQ^2 >= QQ^1
sage: FilteredVectorSpace(rays, {oo:[3]})
QQ^1 in QQ^2
sage: FilteredVectorSpace(rays, {0:[3]})
QQ^1 >= 0 in QQ^2
sage: FilteredVectorSpace({1:[(1,0), (-1,1)], 3:[(1,0)]}, base_ring=GF(3))
GF(3)^2 >= GF(3)^1 >= GF(3)^1 >= 0
sage: FilteredVectorSpace({1:[(1,0), (-1,1)], 3:[(1,0)]}, base_ring=AA)
Vector space of dimension 2 over Algebraic Real Field
>= Vector space of dimension 1 over Algebraic Real Field
>= Vector space of dimension 1 over Algebraic Real Field >= 0
"""
finite_support = [d for d in self.support() if d != infinity]
if len(finite_support) == 0:
dims = self._repr_degrees(0, -1)
else:
min_deg = finite_support[0]
max_deg = finite_support[-1]
dims = self._repr_degrees(min_deg, max_deg)
s = ' >= '.join(dims)
if not self.is_exhaustive():
s += ' in ' + self._repr_vector_space(self.degree())
return s
def __cmp__(self, other):
"""
Compare two filtered vector spaces.
EXAMPLES::
sage: V = FilteredVectorSpace(2, 0)
sage: W = FilteredVectorSpace([(1,0),(0,1)], {0:[0, 1]})
sage: V == W
True
sage: V is W
False
sage: W = FilteredVectorSpace([(1,0),(1,1)], {0:[1]})
sage: V == W
False
TESTS::
sage: P = toric_varieties.P2()
sage: T_P = P.sheaves.tangent_bundle()
sage: O_P = P.sheaves.trivial_bundle(1)
sage: S1 = T_P + O_P
sage: S2 = O_P + T_P
sage: S1._filt[0].is_isomorphic(S2._filt[0]) # known bug
True
sage: FilteredVectorSpace(2, base_ring=QQ) == FilteredVectorSpace(2, base_ring=GF(5))
False
"""
c = cmp(type(self), type(other))
if c!=0: return c
c = cmp(self.base_ring(), other.base_ring())
if c!=0: return c
c = cmp(self.dimension(), other.dimension())
if c!=0: return c
c = cmp(len(self._filt), len(other._filt))
if c!=0: return c
for self_filt, other_filt in zip(self._filt, other._filt):
c = cmp(self_filt[0], other_filt[0]) # compare degree
if c!=0: return c
c = cmp(self_filt[1].echelonized_basis_matrix(), # compare vector subspace
other_filt[1].echelonized_basis_matrix())
if c!=0: return c
return 0
def direct_sum(self, other):
"""
Return the direct sum.
INPUT:
- ``other`` -- a filtered vector space.
OUTPUT:
The direct sum as a filtered vector space.
EXAMPLES::
sage: V = FilteredVectorSpace(2, 0)
sage: W = FilteredVectorSpace({0:[(1,-1),(2,1)], 1:[(1,1)]})
sage: V.direct_sum(W)
QQ^4 >= QQ^1 >= 0
sage: V + W # syntactic sugar
QQ^4 >= QQ^1 >= 0
sage: V + V == FilteredVectorSpace(4, 0)
True
sage: W = FilteredVectorSpace([(1,-1),(2,1)], {1:[0,1], 2:[1]})
sage: V + W
QQ^4 >= QQ^2 >= QQ^1 >= 0
A suitable base ring is chosen if they do not match::
sage: v = [(1,0), (0,1)]
sage: F1 = FilteredVectorSpace(v, {0:[0], 1:[1]}, base_ring=QQ)
sage: F2 = FilteredVectorSpace(v, {0:[0], 1:[1]}, base_ring=RDF)
sage: F1 + F2
RDF^4 >= RDF^2 >= 0
"""
from sage.structure.element import get_coercion_model
base_ring = get_coercion_model().common_parent(self.base_ring(), other.base_ring())
# construct the generators
self_gens, self_filt = self.presentation()
other_gens, other_filt = other.presentation()
generators = \
[ list(v) + [base_ring.zero()]*other.dimension() for v in self_gens ] + \
[ [base_ring.zero()]*self.dimension() + list(v) for v in other_gens ]
# construct the filtration dictionary
def join_indices(self_indices, other_indices):
self_indices = tuple(self_indices)
other_indices = tuple(i + len(self_gens) for i in other_indices)
return self_indices + other_indices
filtration = dict()
self_indices = set()
other_indices = set()
for deg in reversed(uniq(self_filt.keys() + other_filt.keys())):
self_indices.update(self_filt.get(deg, []))
other_indices.update(other_filt.get(deg, []))
gens = join_indices(self_indices, other_indices)
filtration[deg] = gens
return FilteredVectorSpace(generators, filtration, base_ring=base_ring)
__add__ = direct_sum
def tensor_product(self, other):
r"""
Return the graded tensor product.
INPUT:
- ``other`` -- a filtered vector space.
OUTPUT:
The graded tensor product, that is, the tensor product of a
generator of degree `d_1` with a generator in degree `d_2` has
degree `d_1 + d_2`.
EXAMPLES::
sage: F1 = FilteredVectorSpace(1, 1)
sage: F2 = FilteredVectorSpace(1, 2)
sage: F1.tensor_product(F2)
QQ^1 >= 0
sage: F1 * F2
QQ^1 >= 0
sage: F1.min_degree()
1
sage: F2.min_degree()
2
sage: (F1*F2).min_degree()
3
A suitable base ring is chosen if they do not match::
sage: v = [(1,0), (0,1)]
sage: F1 = FilteredVectorSpace(v, {0:[0], 1:[1]}, base_ring=QQ)
sage: F2 = FilteredVectorSpace(v, {0:[0], 1:[1]}, base_ring=RDF)
sage: F1 * F2
RDF^4 >= RDF^3 >= RDF^1 >= 0
"""
V = self
W = other
from sage.structure.element import get_coercion_model
base_ring = get_coercion_model().common_parent(V.base_ring(), W.base_ring())
from sage.modules.tensor_operations import VectorCollection, TensorOperation
V_generators, V_indices = V.presentation()
W_generators, W_indices = W.presentation()
V_coll = VectorCollection(V_generators, base_ring, V.dimension())
W_coll = VectorCollection(W_generators, base_ring, W.dimension())
T = TensorOperation([V_coll, W_coll], 'product')
filtration = dict()
for V_deg in V.support():
for W_deg in W.support():
deg = V_deg + W_deg
indices = filtration.get(deg, set())
for i in V_indices[V_deg]:
for j in W_indices[W_deg]:
i_tensor_j = T.index_map(i, j)
indices.add(i_tensor_j)
filtration[deg] = indices
return FilteredVectorSpace(T.vectors(), filtration, base_ring=base_ring)
__mul__ = tensor_product
def _power_operation(self, n, operation):
"""
Return tensor power operation.
INPUT:
- ``n`` -- integer. the number of factors of ``self``.
- ``operation`` -- string. See
:class:`~sage.modules.tensor_operations.TensorOperation` for
details.
EXAMPLES::
sage: F = FilteredVectorSpace(1, 1) + FilteredVectorSpace(1, 2); F
QQ^2 >= QQ^1 >= 0
sage: F._power_operation(2, 'symmetric')
QQ^3 >= QQ^2 >= QQ^1 >= 0
sage: F._power_operation(2, 'antisymmetric')
QQ^1 >= 0
"""
from sage.modules.tensor_operations import VectorCollection, TensorOperation
generators, indices = self.presentation()
V = VectorCollection(generators, self.base_ring(), self.dimension())
T = TensorOperation([V] * n, operation)
iters = [self.support()] * n
filtration = dict()
from sage.categories.cartesian_product import cartesian_product
for degrees in cartesian_product(iters):
deg = sum(degrees)
filt_deg = filtration.get(deg, set())
for i in cartesian_product([indices.get(d) for d in degrees]):
pow_i = T.index_map(*i)
if pow_i is not None:
filt_deg.add(pow_i)
filtration[deg] = filt_deg
return FilteredVectorSpace(T.vectors(), filtration, base_ring=self.base_ring())
def exterior_power(self, n):
"""
Return the `n`-th graded exterior power.
INPUT:
- ``n`` -- integer. Exterior product of how many copies of
``self``.
OUTPUT:
The graded exterior product, that is, the wedge product of a
generator of degree `d_1` with a generator in degree `d_2` has
degree `d_1 + d_2`.
EXAMPLES::
sage: F = FilteredVectorSpace(1, 1) + FilteredVectorSpace(1, 2); F
QQ^2 >= QQ^1 >= 0
sage: F.exterior_power(1)
QQ^2 >= QQ^1 >= 0
sage: F.exterior_power(2)
QQ^1 >= 0
sage: F.exterior_power(3)
0
sage: F.wedge(2)
QQ^1 >= 0
"""
return self._power_operation(n, 'antisymmetric')
wedge = exterior_power
def symmetric_power(self, n):
"""
Return the `n`-th graded symmetric power.
INPUT:
- ``n`` -- integer. Symmetric product of how many copies of
``self``.
OUTPUT:
The graded symmetric product, that is, the symmetrization of a
generator of degree `d_1` with a generator in degree `d_2` has
degree `d_1 + d_2`.
EXAMPLES::
sage: F = FilteredVectorSpace(1, 1) + FilteredVectorSpace(1, 2); F
QQ^2 >= QQ^1 >= 0
sage: F.symmetric_power(2)
QQ^3 >= QQ^2 >= QQ^1 >= 0
"""
return self._power_operation(n, 'symmetric')
def dual(self):
"""
Return the dual filtered vector space.
OUTPUT:
The graded dual, that is, the dual of a degree-`d` subspace is
a set of linear constraints in degree `-d+1`. That is, the
dual generators live in degree `-d`.
EXAMPLES::
sage: gens = identity_matrix(3).rows()
sage: F = FilteredVectorSpace(gens, {0:[0,1,2], 2:[0]}); F
QQ^3 >= QQ^1 >= QQ^1 >= 0
sage: F.support()
(0, 2)
sage: F.dual()
QQ^3 >= QQ^2 >= QQ^2 >= 0
sage: F.dual().support()
(-2, 0)
"""
filtration = dict()
prev_deg = minus_infinity
for deg, V in self._filt[1:]:
filtration[-prev_deg] = V.complement().echelonized_basis()
prev_deg = deg
return FilteredVectorSpace(filtration, base_ring=self.base_ring())
def shift(self, deg):
"""
Return a filtered vector space with degrees shifted by a constant.
EXAMPLES::
sage: gens = identity_matrix(3).rows()
sage: F = FilteredVectorSpace(gens, {0:[0,1,2], 2:[0]}); F
QQ^3 >= QQ^1 >= QQ^1 >= 0
sage: F.support()
(0, 2)
sage: F.shift(-5).support()
(-5, -3)
"""
generators, filtration = self.presentation()
shifted = dict()
for d, indices in filtration.iteritems():
shifted[d + deg] = indices
return FilteredVectorSpace(generators, shifted, base_ring=self.base_ring())
def random_deformation(self, epsilon=None):
"""
Return a random deformation
INPUT:
- ``epsilon`` -- a number in the base ring.
OUTPUT:
A new filtered vector space where the generators of the
subspaces are moved by ``epsilon`` times a random vector.
EXAMPLES::
sage: gens = identity_matrix(3).rows()
sage: F = FilteredVectorSpace(gens, {0:[0,1,2], 2:[0]}); F
QQ^3 >= QQ^1 >= QQ^1 >= 0
sage: F.get_degree(2)
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[1 0 0]
sage: G = F.random_deformation(1/50); G
QQ^3 >= QQ^1 >= QQ^1 >= 0
sage: G.get_degree(2)
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[ 1 -15/304 0]
"""
from sage.modules.free_module_element import random_vector
R = self.base_ring()
if epsilon is None:
epsilon = R.one()
filtration = dict()
for deg, filt in self._filt[1:]:
generators = [v + epsilon * random_vector(R, self.rank())
for v in filt.echelonized_basis()]
filtration[deg] = generators
return FilteredVectorSpace(filtration, base_ring=R, check=True)
|
py | 1a47af85d51fe7efd5f76b6efb1e3931a1aa70ed | quantity = int(input("Type and Enter how many heights will be entered: "))
heightsSum = 0
for i in range(quantity):
heightsSum += float(input("Type and Enter the height in meters: "))
print("The average height of all", quantity, "people is", (heightsSum/quantity), "meters.")
##height = 0
##sumHeights = 0
##total = 0
##
##while height >= 0:
## height = float(input("Type and Enter the height in meters: "))
## if height >= 0:
## sumHeights += height
## total += 1
##
##print("The average height of all",total,"people is",(sumHeights/total),"meters.")
##chandler_stevens_final_prob3.txt
##Type and Enter how many heights will be entered: 4
##Type and Enter the height in meters: 1.88
##Type and Enter the height in meters: 2.03
##Type and Enter the height in meters: 2.28
##Type and Enter the height in meters: 1.8
##The average height of all 4 people is 1.9974999999999998 meters.
##Type and Enter the height in meters: 1.88
##Type and Enter the height in meters: 2.03
##Type and Enter the height in meters: 2.28
##Type and Enter the height in meters: 1.8
##Type and Enter the height in meters: -1
##The average height of all 4 people is 1.9974999999999998 meters.
|
py | 1a47b140e96d10a3be7a3b8aec8cc45ce817de0a | """
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.ezsigntemplatesignature_request_compound import EzsigntemplatesignatureRequestCompound
globals()['EzsigntemplatesignatureRequestCompound'] = EzsigntemplatesignatureRequestCompound
from eZmaxApi.model.ezsigntemplatedocument_edit_ezsigntemplatesignatures_v1_request import EzsigntemplatedocumentEditEzsigntemplatesignaturesV1Request
class TestEzsigntemplatedocumentEditEzsigntemplatesignaturesV1Request(unittest.TestCase):
"""EzsigntemplatedocumentEditEzsigntemplatesignaturesV1Request unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsigntemplatedocumentEditEzsigntemplatesignaturesV1Request(self):
"""Test EzsigntemplatedocumentEditEzsigntemplatesignaturesV1Request"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsigntemplatedocumentEditEzsigntemplatesignaturesV1Request() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a47b2cd640f65a9bdfd7575d6a653e707d32865 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init imports for easy access."""
from explainable_ai_sdk.metadata.tf.v2.saved_model_metadata_builder import SavedModelMetadataBuilder
|
py | 1a47b5b37e3d2541419a23dbca0ceceb2ad0b39f | """ test to_datetime """
import calendar
from collections import deque
from datetime import (
datetime,
timedelta,
)
from decimal import Decimal
import locale
from dateutil.parser import parse
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import tslib
from pandas._libs.tslibs import (
iNaT,
parsing,
)
from pandas.errors import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
)
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_datetime64_ns_dtype
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
NaT,
Series,
Timestamp,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
from pandas.core.tools import datetimes as tools
class TestTimeConversionFormats:
@pytest.mark.parametrize("readonly", [True, False])
def test_to_datetime_readonly(self, readonly):
# GH#34857
arr = np.array([], dtype=object)
if readonly:
arr.setflags(write=False)
result = to_datetime(arr)
expected = to_datetime([])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format(self, cache):
values = ["1/1/2000", "1/2/2000", "1/3/2000"]
results1 = [Timestamp("20000101"), Timestamp("20000201"), Timestamp("20000301")]
results2 = [Timestamp("20000101"), Timestamp("20000102"), Timestamp("20000103")]
for vals, expecteds in [
(values, (Index(results1), Index(results2))),
(Series(values), (Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])),
]:
for i, fmt in enumerate(["%d/%m/%Y", "%m/%d/%Y"]):
result = to_datetime(vals, format=fmt, cache=cache)
expected = expecteds[i]
if isinstance(expected, Series):
tm.assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_YYYYMMDD(self, cache):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# with NaT
expected = Series(
[Timestamp("19801222"), Timestamp("19801222")] + [Timestamp("19810105")] * 5
)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = "nat"
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = pd.to_datetime(s, format="%Y%m%d", errors="ignore", cache=cache)
expected = Series(
[datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object,
)
tm.assert_series_equal(result, expected)
result = pd.to_datetime(s, format="%Y%m%d", errors="coerce", cache=cache)
expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s",
[
# Null values with Strings
["19801222", "20010112", None],
["19801222", "20010112", np.nan],
["19801222", "20010112", pd.NaT],
["19801222", "20010112", "NaT"],
# Null values with Integers
[19801222, 20010112, None],
[19801222, 20010112, np.nan],
[19801222, 20010112, pd.NaT],
[19801222, 20010112, "NaT"],
],
)
def test_to_datetime_format_YYYYMMDD_with_none(self, input_s):
# GH 30011
# format='%Y%m%d'
# with None
expected = Series([Timestamp("19801222"), Timestamp("20010112"), pd.NaT])
result = Series(pd.to_datetime(input_s, format="%Y%m%d"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s, expected",
[
# NaN before strings with invalid date values
[
Series(["19801222", np.nan, "20010012", "10019999"]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN after strings with invalid date values
[
Series(["19801222", "20010012", "10019999", np.nan]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN before integers with invalid date values
[
Series([20190813, np.nan, 20010012, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
# NaN after integers with invalid date values
[
Series([20190813, 20010012, np.nan, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
],
)
def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected):
# GH 25512
# format='%Y%m%d', errors='coerce'
result = pd.to_datetime(input_s, format="%Y%m%d", errors="coerce")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_integer(self, cache):
# GH 10178
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y", cache=cache)
tm.assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m", cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"int_date, expected",
[
# valid date, length == 8
[20121030, datetime(2012, 10, 30)],
# short valid date, length == 6
[199934, datetime(1999, 3, 4)],
# long integer date partially parsed to datetime(2012,1,1), length > 8
[2012010101, 2012010101],
# invalid date partially parsed to datetime(2012,9,9), length == 8
[20129930, 20129930],
# short integer date partially parsed to datetime(2012,9,9), length < 8
[2012993, 2012993],
# short invalid date, length == 4
[2121, 2121],
],
)
def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected):
# GH 26583
result = to_datetime(int_date, format="%Y%m%d", errors="ignore")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_microsecond(self, cache):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = f"01-{month_abbr}-2011 00:00:01.978"
format = "%d-%b-%Y %H:%M:%S.%f"
result = to_datetime(val, format=format, cache=cache)
exp = datetime.strptime(val, format)
assert result == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_time(self, cache):
data = [
["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")],
["01/10/2010 05:43", "%m/%d/%Y %I:%M", Timestamp("2010-01-10 05:43")],
[
"01/10/2010 13:56:01",
"%m/%d/%Y %H:%M:%S",
Timestamp("2010-01-10 13:56:01"),
] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_non_exact(self, cache):
# GH 10834
# 8904
# exact kw
s = Series(
["19MAY11", "foobar19MAY11", "19MAY11:00:00:00", "19MAY11 00:00:00Z"]
)
result = to_datetime(s, format="%d%b%y", exact=False, cache=cache)
expected = to_datetime(
s.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_parse_nanoseconds_with_formula(self, cache):
# GH8989
# truncating the nanoseconds when a format was provided
for v in [
"2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000",
]:
expected = pd.to_datetime(v, cache=cache)
result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f", cache=cache)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_weeks(self, cache):
data = [
["2009324", "%Y%W%w", Timestamp("2009-08-13")],
["2013020", "%Y%U%w", Timestamp("2013-01-13")],
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@pytest.mark.parametrize(
"fmt,dates,expected_dates",
[
[
"%Y-%m-%d %H:%M:%S %Z",
["2010-01-01 12:00:00 UTC"] * 2,
[Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2,
],
[
"%Y-%m-%d %H:%M:%S %Z",
[
"2010-01-01 12:00:00 UTC",
"2010-01-01 12:00:00 GMT",
"2010-01-01 12:00:00 US/Pacific",
],
[
Timestamp("2010-01-01 12:00:00", tz="UTC"),
Timestamp("2010-01-01 12:00:00", tz="GMT"),
Timestamp("2010-01-01 12:00:00", tz="US/Pacific"),
],
],
[
"%Y-%m-%d %H:%M:%S%z",
["2010-01-01 12:00:00+0100"] * 2,
[Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100"] * 2,
[Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"],
[
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60)),
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(-60)),
],
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"],
[
Timestamp(
"2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)
), # pytz coerces to UTC
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)),
],
],
],
)
def test_to_datetime_parse_tzname_or_tzoffset(self, fmt, dates, expected_dates):
# GH 13486
result = pd.to_datetime(dates, format=fmt)
expected = Index(expected_dates)
tm.assert_equal(result, expected)
def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self):
# GH 32792
dates = [
"2010-01-01 12:00:00 +0100",
"2010-01-01 12:00:00 -0100",
"2010-01-01 12:00:00 +0300",
"2010-01-01 12:00:00 +0400",
]
expected_dates = [
"2010-01-01 11:00:00+00:00",
"2010-01-01 13:00:00+00:00",
"2010-01-01 09:00:00+00:00",
"2010-01-01 08:00:00+00:00",
]
fmt = "%Y-%m-%d %H:%M:%S %z"
result = pd.to_datetime(dates, format=fmt, utc=True)
expected = DatetimeIndex(expected_dates)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"offset", ["+0", "-1foo", "UTCbar", ":10", "+01:000:01", ""]
)
def test_to_datetime_parse_timezone_malformed(self, offset):
fmt = "%Y-%m-%d %H:%M:%S %z"
date = "2010-01-01 12:00:00 " + offset
msg = "does not match format|unconverted data remains"
with pytest.raises(ValueError, match=msg):
pd.to_datetime([date], format=fmt)
def test_to_datetime_parse_timezone_keeps_name(self):
# GH 21697
fmt = "%Y-%m-%d %H:%M:%S %z"
arg = Index(["2010-01-01 12:00:00 Z"], name="foo")
result = pd.to_datetime(arg, format=fmt)
expected = DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo")
tm.assert_index_equal(result, expected)
class TestToDatetime:
@pytest.mark.parametrize(
"s, _format, dt",
[
["2015-1-1", "%G-%V-%u", datetime(2014, 12, 29, 0, 0)],
["2015-1-4", "%G-%V-%u", datetime(2015, 1, 1, 0, 0)],
["2015-1-7", "%G-%V-%u", datetime(2015, 1, 4, 0, 0)],
],
)
def test_to_datetime_iso_week_year_format(self, s, _format, dt):
# See GH#16607
assert to_datetime(s, format=_format) == dt
@pytest.mark.parametrize(
"msg, s, _format",
[
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 50",
"%Y %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 51",
"%G %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Monday",
"%G %A",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Mon",
"%G %a",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %w",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %u",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"2051",
"%G",
],
[
"Day of the year directive '%j' is not compatible with ISO year "
"directive '%G'. Use '%Y' instead.",
"1999 51 6 256",
"%G %V %u %j",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sunday",
"%Y %V %A",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sun",
"%Y %V %a",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %w",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %u",
],
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"20",
"%V",
],
],
)
def test_error_iso_week_year(self, msg, s, _format):
# See GH#16607
# This test checks for errors thrown when giving the wrong format
# However, as discussed on PR#25541, overriding the locale
# causes a different error to be thrown due to the format being
# locale specific, but the test data is in english.
# Therefore, the tests only run when locale is not overwritten,
# as a sort of solution to this problem.
if locale.getlocale() != ("zh_CN", "UTF-8") and locale.getlocale() != (
"it_IT",
"UTF-8",
):
with pytest.raises(ValueError, match=msg):
to_datetime(s, format=_format)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_to_datetime_dtarr(self, tz):
# DatetimeArray
dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz)
arr = DatetimeArray(dti)
result = to_datetime(arr)
assert result is arr
result = to_datetime(arr)
assert result is arr
def test_to_datetime_pydatetime(self):
actual = pd.to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
def test_to_datetime_YYYYMMDD(self):
actual = pd.to_datetime("20080115")
assert actual == datetime(2008, 1, 15)
def test_to_datetime_unparseable_ignore(self):
# unparseable
s = "Month 1, 1999"
assert pd.to_datetime(s, errors="ignore") == s
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_now(self):
# See GH#18666
with tm.set_timezone("US/Eastern"):
npnow = np.datetime64("now").astype("datetime64[ns]")
pdnow = pd.to_datetime("now")
pdnow2 = pd.to_datetime(["now"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdnow.value - npnow.astype(np.int64)) < 1e10
assert abs(pdnow2.value - npnow.astype(np.int64)) < 1e10
assert pdnow.tzinfo is None
assert pdnow2.tzinfo is None
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_today(self):
# See GH#18666
# Test with one timezone far ahead of UTC and another far behind, so
# one of these will _almost_ always be in a different day from UTC.
# Unfortunately this test between 12 and 1 AM Samoa time
# this both of these timezones _and_ UTC will all be in the same day,
# so this test will not detect the regression introduced in #18666.
with tm.set_timezone("Pacific/Auckland"): # 12-13 hours ahead of UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = pd.to_datetime("today")
pdtoday2 = pd.to_datetime(["today"])[0]
tstoday = Timestamp("today")
tstoday2 = Timestamp.today()
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert abs(pdtoday.value - tstoday.value) < 1e10
assert abs(pdtoday.value - tstoday2.value) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
with tm.set_timezone("US/Samoa"): # 11 hours behind UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = pd.to_datetime("today")
pdtoday2 = pd.to_datetime(["today"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
def test_to_datetime_today_now_unicode_bytes(self):
to_datetime(["now"])
to_datetime(["today"])
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s(self, cache):
in_bound_dts = [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")]
for dt in in_bound_dts:
assert pd.to_datetime(dt, cache=cache) == Timestamp(dt)
@pytest.mark.parametrize(
"dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")]
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
msg = f"Out of bounds nanosecond timestamp: {dt}"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt, errors="raise")
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp(dt)
assert pd.to_datetime(dt, errors="coerce", cache=cache) is NaT
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize("unit", ["s", "D"])
def test_to_datetime_array_of_dt64s(self, cache, unit):
# https://github.com/pandas-dev/pandas/issues/31491
# Need at least 50 to ensure cache is used.
dts = [
np.datetime64("2000-01-01", unit),
np.datetime64("2000-01-02", unit),
] * 30
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_index_equal(
pd.to_datetime(dts, cache=cache),
DatetimeIndex([Timestamp(x).asm8 for x in dts]),
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64("9999-01-01")]
msg = "Out of bounds nanosecond timestamp: 9999-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dts_with_oob, errors="raise")
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="coerce", cache=cache),
DatetimeIndex(
[Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30
+ [pd.NaT],
),
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="ignore", cache=cache),
Index([dt.item() for dt in dts_with_oob]),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz(self, cache):
# xref 8260
# uniform returns a DatetimeIndex
arr = [
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
result = pd.to_datetime(arr, cache=cache)
expected = DatetimeIndex(
["2013-01-01 13:00:00", "2013-01-02 14:00:00"], tz="US/Pacific"
)
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [
Timestamp("2013-01-01 13:00:00", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00", tz="US/Eastern"),
]
msg = (
"Tz-aware datetime.datetime cannot be "
"converted to datetime64 unless utc=True"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_different_offsets(self, cache):
# inspired by asv timeseries.ToDatetimeNONISO8601 benchmark
# see GH-26097 for more
ts_string_1 = "March 1, 2018 12:00:00+0400"
ts_string_2 = "March 1, 2018 12:00:00+0500"
arr = [ts_string_1] * 5 + [ts_string_2] * 5
expected = Index([parse(x) for x in arr])
result = pd.to_datetime(arr, cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz_pytz(self, cache):
# see gh-8260
us_eastern = pytz.timezone("US/Eastern")
arr = np.array(
[
us_eastern.localize(
datetime(year=2000, month=1, day=1, hour=3, minute=0)
),
us_eastern.localize(
datetime(year=2000, month=6, day=1, hour=3, minute=0)
),
],
dtype=object,
)
result = pd.to_datetime(arr, utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"init_constructor, end_constructor, test_method",
[
(Index, DatetimeIndex, tm.assert_index_equal),
(list, DatetimeIndex, tm.assert_index_equal),
(np.array, DatetimeIndex, tm.assert_index_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_to_datetime_utc_true(
self, cache, init_constructor, end_constructor, test_method
):
# See gh-11934 & gh-6415
data = ["20100102 121314", "20100102 121315"]
expected_data = [
Timestamp("2010-01-02 12:13:14", tz="utc"),
Timestamp("2010-01-02 12:13:15", tz="utc"),
]
result = pd.to_datetime(
init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache
)
expected = end_constructor(expected_data)
test_method(result, expected)
# Test scalar case as well
for scalar, expected in zip(data, expected_data):
result = pd.to_datetime(
scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache
)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_single_value(self, cache):
# GH 15760 UTC=True with Series
ts = 1.5e18
result = pd.to_datetime(Series([ts]), utc=True, cache=cache)
expected = Series([Timestamp(ts, tz="utc")])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
ts = "2013-01-01 00:00:00-01:00"
expected_ts = "2013-01-01 01:00:00"
data = Series([ts] * 3)
result = pd.to_datetime(data, utc=True, cache=cache)
expected = Series([Timestamp(expected_ts, tz="utc")] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"date, dtype",
[
("2013-01-01 01:00:00", "datetime64[ns]"),
("2013-01-01 01:00:00", "datetime64[ns, UTC]"),
],
)
def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):
expected = Series([Timestamp("2013-01-01 01:00:00", tz="UTC")])
result = pd.to_datetime(Series([date], dtype=dtype), utc=True, cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@td.skip_if_no("psycopg2")
def test_to_datetime_tz_psycopg2(self, cache):
# xref 8260
import psycopg2
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array(
[
datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2),
],
dtype=object,
)
result = pd.to_datetime(arr, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
# dtype coercion
i = DatetimeIndex(
["2000-01-01 08:00:00"],
tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None),
)
assert is_datetime64_ns_dtype(i)
# tz coercion
result = pd.to_datetime(i, errors="coerce", cache=cache)
tm.assert_index_equal(result, i)
result = pd.to_datetime(i, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_datetime_bool(self, cache):
# GH13176
msg = r"dtype bool cannot be converted to datetime64\[ns\]"
with pytest.raises(TypeError, match=msg):
to_datetime(False)
assert to_datetime(False, errors="coerce", cache=cache) is NaT
assert to_datetime(False, errors="ignore", cache=cache) is False
with pytest.raises(TypeError, match=msg):
to_datetime(True)
assert to_datetime(True, errors="coerce", cache=cache) is NaT
assert to_datetime(True, errors="ignore", cache=cache) is True
msg = f"{type(cache)} is not convertible to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([False, datetime.today()], cache=cache)
with pytest.raises(TypeError, match=msg):
to_datetime(["20130101", True], cache=cache)
tm.assert_index_equal(
to_datetime([0, False, NaT, 0.0], errors="coerce", cache=cache),
DatetimeIndex(
[to_datetime(0, cache=cache), NaT, NaT, to_datetime(0, cache=cache)]
),
)
def test_datetime_invalid_datatype(self):
# GH13176
msg = "is not convertible to datetime"
with pytest.raises(TypeError, match=msg):
pd.to_datetime(bool)
with pytest.raises(TypeError, match=msg):
pd.to_datetime(pd.to_datetime)
@pytest.mark.parametrize("value", ["a", "00:01:99"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = pd.to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is pd.NaT
msg = (
"is a bad directive in format|"
"second must be in 0..59|"
"Given date string not likely a datetime"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("value", ["3000/12/11 00:00:00"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_outofbounds_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = pd.to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is pd.NaT
if format is not None:
msg = "is a bad directive in format|Out of bounds nanosecond timestamp"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
else:
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("values", [["a"], ["00:01:99"], ["a", "b", "99:00:00"]])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_index(self, values, format, infer):
# GH24763
res = pd.to_datetime(
values, errors="ignore", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, Index(values))
res = pd.to_datetime(
values, errors="coerce", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, DatetimeIndex([pd.NaT] * len(values)))
msg = (
"is a bad directive in format|"
"Given date string not likely a datetime|"
"second must be in 0..59"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
values, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
@pytest.mark.parametrize("constructor", [list, tuple, np.array, Index, deque])
def test_to_datetime_cache(self, utc, format, constructor):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = constructor(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike",
[
(deque([Timestamp("2010-06-02 09:30:00")] * 51)),
([Timestamp("2010-06-02 09:30:00")] * 51),
(tuple([Timestamp("2010-06-02 09:30:00")] * 51)),
],
)
def test_no_slicing_errors_in_should_cache(self, listlike):
# GH 29403
assert tools.should_cache(listlike) is True
def test_to_datetime_from_deque(self):
# GH 29403
result = pd.to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51))
expected = pd.to_datetime([Timestamp("2010-06-02 09:30:00")] * 51)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
def test_to_datetime_cache_series(self, utc, format):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = Series(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_series_equal(result, expected)
def test_to_datetime_cache_scalar(self):
date = "20130101 00:00:00"
result = pd.to_datetime(date, cache=True)
expected = Timestamp("20130101 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"date, format",
[
("2017-20", "%Y-%W"),
("20 Sunday", "%W %A"),
("20 Sun", "%W %a"),
("2017-21", "%Y-%U"),
("20 Sunday", "%U %A"),
("20 Sun", "%U %a"),
],
)
def test_week_without_day_and_calendar_year(self, date, format):
# GH16774
msg = "Cannot use '%W' or '%U' without day and year"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(date, format=format)
def test_to_datetime_coerce(self):
# GH 26122
ts_strings = [
"March 1, 2018 12:00:00+0400",
"March 1, 2018 12:00:00+0500",
"20100240",
]
result = to_datetime(ts_strings, errors="coerce")
expected = Index(
[
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 14400)),
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 18000)),
NaT,
]
)
tm.assert_index_equal(result, expected)
def test_to_datetime_coerce_malformed(self):
# GH 28299
ts_strings = ["200622-12-31", "111111-24-11"]
result = to_datetime(ts_strings, errors="coerce")
expected = Index([NaT, NaT])
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_same_offset(self):
# GH 17697, 11736
ts_str = "2015-11-18 15:30:00+05:30"
result = to_datetime(ts_str)
expected = Timestamp(ts_str)
assert result == expected
expected = DatetimeIndex([Timestamp(ts_str)] * 2)
result = to_datetime([ts_str] * 2)
tm.assert_index_equal(result, expected)
result = DatetimeIndex([ts_str] * 2)
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_different_offsets(self):
# GH 17697, 11736
ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT]
result = to_datetime(ts_strings)
expected = np.array(
[
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 16, 30, tzinfo=tzoffset(None, 23400)),
NaT,
],
dtype=object,
)
# GH 21864
expected = Index(expected)
tm.assert_index_equal(result, expected)
result = to_datetime(ts_strings, utc=True)
expected = DatetimeIndex(
[Timestamp(2015, 11, 18, 10), Timestamp(2015, 11, 18, 10), NaT], tz="UTC"
)
tm.assert_index_equal(result, expected)
def test_iso8601_strings_mixed_offsets_with_naive(self):
# GH 24992
result = pd.to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+12:00",
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+06:00",
"2018-11-28T00:00:00",
],
utc=True,
)
expected = pd.to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-27T12:00:00",
"2018-11-28T00:00:00",
"2018-11-27T18:00:00",
"2018-11-28T00:00:00",
],
utc=True,
)
tm.assert_index_equal(result, expected)
items = ["2018-11-28T00:00:00+12:00", "2018-11-28T00:00:00"]
result = pd.to_datetime(items, utc=True)
expected = pd.to_datetime(list(reversed(items)), utc=True)[::-1]
tm.assert_index_equal(result, expected)
def test_mixed_offsets_with_native_datetime_raises(self):
# GH 25978
s = Series(
[
"nan",
Timestamp("1990-01-01"),
"2015-03-14T16:15:14.123-08:00",
"2019-03-04T21:56:32.620-07:00",
None,
]
)
with pytest.raises(ValueError, match="Tz-aware datetime.datetime"):
pd.to_datetime(s)
def test_non_iso_strings_with_tz_offset(self):
result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2)
expected = DatetimeIndex(
[datetime(2018, 3, 1, 12, tzinfo=pytz.FixedOffset(240))] * 2
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"ts, expected",
[
(Timestamp("2018-01-01"), Timestamp("2018-01-01", tz="UTC")),
(
Timestamp("2018-01-01", tz="US/Pacific"),
Timestamp("2018-01-01 08:00", tz="UTC"),
),
],
)
def test_timestamp_utc_true(self, ts, expected):
# GH 24415
result = to_datetime(ts, utc=True)
assert result == expected
@pytest.mark.parametrize("dt_str", ["00010101", "13000101", "30000101", "99990101"])
def test_to_datetime_with_format_out_of_bounds(self, dt_str):
# GH 9107
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt_str, format="%Y%m%d")
def test_to_datetime_utc(self):
arr = np.array([parse("2012-06-13T01:39:00Z")], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_fixed_offset(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off
dates = [
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
result = to_datetime(dates)
assert result.tz == fixed_off
class TestToDatetimeUnit:
@pytest.mark.parametrize("cache", [True, False])
def test_unit(self, cache):
# GH 11758
# test proper behavior with errors
msg = "cannot specify both format and unit"
with pytest.raises(ValueError, match=msg):
to_datetime([1], unit="D", format="%Y%m%d", cache=cache)
values = [11111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""]
result = to_datetime(values, unit="D", errors="ignore", cache=cache)
expected = Index(
[
11111111,
Timestamp("1970-01-02"),
Timestamp("1970-01-02"),
NaT,
NaT,
NaT,
NaT,
NaT,
],
dtype=object,
)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit="D", errors="coerce", cache=cache)
expected = DatetimeIndex(
["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"]
)
tm.assert_index_equal(result, expected)
msg = "cannot convert input 11111111 with the unit 'D'"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
to_datetime(values, unit="D", errors="raise", cache=cache)
values = [1420043460000, iNaT, NaT, np.nan, "NaT"]
result = to_datetime(values, errors="ignore", unit="s", cache=cache)
expected = Index([1420043460000, NaT, NaT, NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors="coerce", unit="s", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "NaT", "NaT", "NaT"])
tm.assert_index_equal(result, expected)
msg = "cannot convert input 1420043460000 with the unit 's'"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
to_datetime(values, errors="raise", unit="s", cache=cache)
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ["foo", Timestamp("20130101")]:
try:
to_datetime(val, errors="raise", unit="s", cache=cache)
except tslib.OutOfBoundsDatetime as err:
raise AssertionError("incorrect exception raised") from err
except ValueError:
pass
@pytest.mark.parametrize("cache", [True, False])
def test_unit_consistency(self, cache):
# consistency of conversions
expected = Timestamp("1970-05-09 14:25:11")
result = pd.to_datetime(11111111, unit="s", errors="raise", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit="s", errors="coerce", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit="s", errors="ignore", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_with_numeric(self, cache):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(["2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr1 = [1.434692e18, 1.432766e18]
arr2 = np.array(arr1).astype("int64")
for errors in ["ignore", "raise", "coerce"]:
result = pd.to_datetime(arr1, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
result = pd.to_datetime(arr2, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(["NaT", "2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr = ["foo", 1.434692e18, 1.432766e18]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(
["2015-06-19 05:33:20", "2015-05-27 22:33:20", "NaT", "NaT"]
)
arr = [1.434692e18, 1.432766e18, "foo", "NaT"]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_mixed(self, cache):
# mixed integers/datetimes
expected = DatetimeIndex(["2013-01-01", "NaT", "NaT"])
arr = [Timestamp("20130101"), 1.434692e18, 1.432766e18]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
msg = "mixed datetimes and integers in passed array"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, errors="raise", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "2013-01-01"])
arr = [1.434692e18, 1.432766e18, Timestamp("20130101")]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, errors="raise", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_rounding(self, cache):
# GH 14156 & GH 20445: argument will incur floating point errors
# but no premature rounding
result = pd.to_datetime(1434743731.8770001, unit="s", cache=cache)
expected = Timestamp("2015-06-19 19:55:31.877000192")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_unit_ignore_keeps_name(self, cache):
# GH 21697
expected = Index([15e9] * 2, name="name")
result = pd.to_datetime(expected, errors="ignore", unit="s", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe(self, cache):
df = DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [58, 59],
"second": [10, 11],
"ms": [1, 1],
"us": [2, 2],
"ns": [3, 3],
}
)
result = to_datetime(
{"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache
)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:0:00")]
)
tm.assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[["year", "month", "day"]].to_dict(), cache=cache)
tm.assert_series_equal(result, expected)
# dict but with constructable
df2 = df[["year", "month", "day"]].to_dict()
df2["month"] = 2
result = to_datetime(df2, cache=cache)
expected2 = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160205 00:0:00")]
)
tm.assert_series_equal(result, expected2)
# unit mappings
units = [
{
"year": "years",
"month": "months",
"day": "days",
"hour": "hours",
"minute": "minutes",
"second": "seconds",
},
{
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d), cache=cache)
expected = Series(
[Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")]
)
tm.assert_series_equal(result, expected)
d = {
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
"ms": "ms",
"us": "us",
"ns": "ns",
}
result = to_datetime(df.rename(columns=d), cache=cache)
expected = Series(
[
Timestamp("20150204 06:58:10.001002003"),
Timestamp("20160305 07:59:11.001002003"),
]
)
tm.assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str), cache=cache)
tm.assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
msg = (
"cannot assemble the datetimes: time data .+ does not "
r"match format '%Y%m%d' \(match\)"
)
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
result = to_datetime(df2, errors="coerce", cache=cache)
expected = Series([Timestamp("20150204 00:00:00"), NaT])
tm.assert_series_equal(result, expected)
# extra columns
msg = r"extra keys have been passed to the datetime assemblage: \[foo\]"
with pytest.raises(ValueError, match=msg):
df2 = df.copy()
df2["foo"] = 1
to_datetime(df2, cache=cache)
# not enough
msg = (
r"to assemble mappings requires at least that \[year, month, "
r"day\] be specified: \[.+\] is missing"
)
for c in [
["year"],
["year", "month"],
["year", "month", "second"],
["month", "day"],
["year", "day", "second"],
]:
with pytest.raises(ValueError, match=msg):
to_datetime(df[c], cache=cache)
# duplicates
msg = "cannot assemble with duplicate keys"
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
df2.columns = ["year", "year", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
df2 = DataFrame(
{"year": [2015, 2016], "month": [2, 20], "day": [4, 5], "hour": [4, 5]}
)
df2.columns = ["year", "month", "day", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe_dtypes(self, cache):
# #13451
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
# int16
result = to_datetime(df.astype("int16"), cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# mixed dtypes
df["month"] = df["month"].astype("int8")
df["day"] = df["day"].astype("int8")
result = to_datetime(df, cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]})
msg = "cannot assemble the datetimes: unconverted data remains: 1"
with pytest.raises(ValueError, match=msg):
to_datetime(df, cache=cache)
def test_dataframe_utc_true(self):
# GH 23760
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
result = pd.to_datetime(df, utc=True)
expected = Series(
np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]")
).dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_to_datetime_errors_ignore_utc_true(self):
# GH 23758
result = pd.to_datetime([1], unit="s", utc=True, errors="ignore")
expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC")
tm.assert_index_equal(result, expected)
# TODO: this is moved from tests.series.test_timeseries, may be redundant
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([epoch + t for t in range(20)])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
# GH13834
s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t)
for t in np.arange(0, 2, 0.25)
]
+ [NaT]
)
# GH20455 argument will incur floating point errors but no premature rounding
result = result.round("ms")
tm.assert_series_equal(result, expected)
s = pd.concat(
[Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])],
ignore_index=True,
)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
result = to_datetime([1, 2, "NaT", pd.NaT, np.nan], unit="D")
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3
)
tm.assert_index_equal(result, expected)
msg = "non convertible value foo with the unit 'D'"
with pytest.raises(ValueError, match=msg):
to_datetime([1, 2, "foo"], unit="D")
msg = "cannot convert input 111111111 with the unit 'D'"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime([1, 2, 111111111], unit="D")
# coerce we can process
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1
)
result = to_datetime([1, 2, "foo"], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
result = to_datetime([1, 2, 111111111], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
class TestToDatetimeMisc:
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(arr)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601(self, cache):
result = to_datetime(["2012-01-01 00:00:00"], cache=cache)
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(["20121001"], cache=cache) # bad iso 8601
exp = Timestamp("2012-10-01")
assert result[0] == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_default(self, cache):
rs = to_datetime("2001", cache=cache)
xp = datetime(2001, 1, 1)
assert rs == xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_on_datetime64_series(self, cache):
# #2699
s = Series(date_range("1/1/2000", periods=10))
result = to_datetime(s, cache=cache)
assert result[0] == s[0]
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
s = Series(["10/18/2006", "10/18/2008", " "])
msg = r"(\(')?String does not contain a date(:', ' '\))?"
with pytest.raises(ValueError, match=msg):
to_datetime(s, errors="raise", cache=cache)
result_coerce = to_datetime(s, errors="coerce", cache=cache)
expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors="ignore", cache=cache)
tm.assert_series_equal(result_ignore, s)
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_apply(self, cache):
# this is only locale tested with US/None locales
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(["May 04", "Jun 02", "Dec 11"], index=[1, 2, 3])
expected = pd.to_datetime(td, format="%b %y", cache=cache)
result = td.apply(pd.to_datetime, format="%b %y", cache=cache)
tm.assert_series_equal(result, expected)
td = Series(["May 04", "Jun 02", ""], index=[1, 2, 3])
msg = r"time data '' does not match format '%b %y' \(match\)"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(td, format="%b %y", errors="raise", cache=cache)
with pytest.raises(ValueError, match=msg):
td.apply(pd.to_datetime, format="%b %y", errors="raise", cache=cache)
expected = pd.to_datetime(td, format="%b %y", errors="coerce", cache=cache)
result = td.apply(
lambda x: pd.to_datetime(x, format="%b %y", errors="coerce", cache=cache)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_types(self, cache):
# empty string
result = to_datetime("", cache=cache)
assert result is NaT
result = to_datetime(["", ""], cache=cache)
assert isna(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0, cache=cache)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(["2012"], cache=cache)[0]
result = to_datetime("2012", cache=cache)
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ["20120101", "20120101 12:01:01"]
expected = list(to_datetime(array, cache=cache))
result = [Timestamp(date_str) for date_str in array]
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_unprocessable_input(self, cache):
# GH 4928
# GH 21864
result = to_datetime([1, "1"], errors="ignore", cache=cache)
expected = Index(np.array([1, "1"], dtype="O"))
tm.assert_equal(result, expected)
msg = "invalid string coercion to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([1, "1"], errors="raise", cache=cache)
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view("M8[us]")
as_obj = scalar.astype("O")
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype("O")
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range("1/1/2000", periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
msg = "|".join(
[
"Python int too large to convert to C long",
"long too big to convert",
"int too big to convert",
]
)
with pytest.raises(OutOfBoundsTimedelta, match=msg):
date_range(start="1/1/1700", freq="B", periods=100000)
@pytest.mark.parametrize("cache", [True, False])
def test_string_na_nat_conversion(self, cache):
# GH #999, #858
strings = np.array(
["1/1/2000", "1/2/2000", np.nan, "1/4/2000, 12:34:56"], dtype=object
)
expected = np.empty(4, dtype="M8[ns]")
for i, val in enumerate(strings):
if isna(val):
expected[i] = iNaT
else:
expected[i] = parse(val)
result = tslib.array_to_datetime(strings)[0]
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings, cache=cache)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(["1/100/2000", np.nan], dtype=object)
# GH 10636, default is now 'raise'
msg = r"Unknown string format:|day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
result = to_datetime(malformed, errors="ignore", cache=cache)
# GH 21864
expected = Index(malformed)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
idx = ["a", "b", "c", "d", "e"]
series = Series(
["1/1/2000", np.nan, "1/3/2000", np.nan, "1/5/2000"], index=idx, name="foo"
)
dseries = Series(
[
to_datetime("1/1/2000", cache=cache),
np.nan,
to_datetime("1/3/2000", cache=cache),
np.nan,
to_datetime("1/5/2000", cache=cache),
],
index=idx,
name="foo",
)
result = to_datetime(series, cache=cache)
dresult = to_datetime(dseries, cache=cache)
expected = Series(np.empty(5, dtype="M8[ns]"), index=idx)
for i in range(5):
x = series[i]
if isna(x):
expected[i] = pd.NaT
else:
expected[i] = to_datetime(x, cache=cache)
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == "foo"
tm.assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == "foo"
@pytest.mark.parametrize(
"dtype",
[
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
@pytest.mark.parametrize("cache", [True, False])
def test_dti_constructor_numpy_timeunits(self, cache, dtype):
# GH 9114
base = pd.to_datetime(
["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache
)
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values, cache=cache), base)
@pytest.mark.parametrize("cache", [True, False])
def test_dayfirst(self, cache):
# GH 5917
arr = ["10/02/2014", "11/02/2014", "12/02/2014"]
expected = DatetimeIndex(
[datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)]
)
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True, cache=cache)
idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
@pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray])
def test_to_datetime_dta_tz(self, klass):
# GH#27733
dti = date_range("2015-04-05", periods=3).rename("foo")
expected = dti.tz_localize("UTC")
obj = klass(dti)
expected = klass(expected)
result = to_datetime(obj, utc=True)
tm.assert_equal(result, expected)
class TestGuessDatetimeFormat:
@td.skip_if_not_us_locale
def test_guess_datetime_format_for_array(self):
expected_format = "%Y-%m-%d %H:%M:%S.%f"
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype="O"),
np.array([np.nan, np.nan, dt_string], dtype="O"),
np.array([dt_string, "random_string"], dtype="O"),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array([np.nan, np.nan, np.nan], dtype="O")
)
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat:
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
s = Series(pd.date_range("20000101", periods=50, freq="H"))
test_formats = ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"]
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = pd.to_datetime(
s_as_dt_strings, format=test_format, cache=cache
)
no_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=False, cache=cache
)
yes_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=True, cache=cache
)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
s = Series(
np.array(
["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"]
)
)
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
s = Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
s = Series(
np.array(["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan])
)
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
s = Series(
np.array(
[
np.nan,
np.nan,
"01/01/2011 00:00:00",
"01/02/2011 00:00:00",
"01/03/2011 00:00:00",
]
)
)
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize(
"tz_name, offset", [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)]
)
def test_infer_datetime_format_tz_name(self, tz_name, offset):
# GH 33133
s = Series([f"2019-02-02 08:07:13 {tz_name}"])
result = to_datetime(s, infer_datetime_format=True)
expected = Series(
[Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
s = Series(["2014-1-1", "2014-2-2", "2015-3-3"])
expected = Series(
[
Timestamp("2014-01-01"),
Timestamp("2014-02-02"),
Timestamp("2015-03-03"),
]
)
tm.assert_series_equal(pd.to_datetime(s, cache=cache), expected)
tm.assert_series_equal(
pd.to_datetime(s, format="%Y-%m-%d", cache=cache), expected
)
class TestDaysInMonth:
# tests for issue #10154
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_coerce(self, cache):
assert isna(to_datetime("2015-02-29", errors="coerce", cache=cache))
assert isna(
to_datetime("2015-02-29", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-02-32", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-04-31", format="%Y-%m-%d", errors="coerce", cache=cache)
)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_raise(self, cache):
msg = "day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", cache=cache)
msg = "time data 2015-02-29 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-02-32 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-32", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-04-31 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-04-31", errors="raise", format="%Y-%m-%d", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_ignore(self, cache):
assert to_datetime("2015-02-29", errors="ignore", cache=cache) == "2015-02-29"
assert (
to_datetime("2015-02-29", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-29"
)
assert (
to_datetime("2015-02-32", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-32"
)
assert (
to_datetime("2015-04-31", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-04-31"
)
class TestDatetimeParsingWrappers:
@pytest.mark.parametrize(
"date_str,expected",
list(
{
"2011-01-01": datetime(2011, 1, 1),
"2Q2005": datetime(2005, 4, 1),
"2Q05": datetime(2005, 4, 1),
"2005Q1": datetime(2005, 1, 1),
"05Q1": datetime(2005, 1, 1),
"2011Q3": datetime(2011, 7, 1),
"11Q3": datetime(2011, 7, 1),
"3Q2011": datetime(2011, 7, 1),
"3Q11": datetime(2011, 7, 1),
# quarterly without space
"2000Q4": datetime(2000, 10, 1),
"00Q4": datetime(2000, 10, 1),
"4Q2000": datetime(2000, 10, 1),
"4Q00": datetime(2000, 10, 1),
"2000q4": datetime(2000, 10, 1),
"2000-Q4": datetime(2000, 10, 1),
"00-Q4": datetime(2000, 10, 1),
"4Q-2000": datetime(2000, 10, 1),
"4Q-00": datetime(2000, 10, 1),
"00q4": datetime(2000, 10, 1),
"2005": datetime(2005, 1, 1),
"2005-11": datetime(2005, 11, 1),
"2005 11": datetime(2005, 11, 1),
"11-2005": datetime(2005, 11, 1),
"11 2005": datetime(2005, 11, 1),
"200511": datetime(2020, 5, 11),
"20051109": datetime(2005, 11, 9),
"20051109 10:15": datetime(2005, 11, 9, 10, 15),
"20051109 08H": datetime(2005, 11, 9, 8, 0),
"2005-11-09 10:15": datetime(2005, 11, 9, 10, 15),
"2005-11-09 08H": datetime(2005, 11, 9, 8, 0),
"2005/11/09 10:15": datetime(2005, 11, 9, 10, 15),
"2005/11/09 08H": datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10, 36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GHE10537
"2014-06": datetime(2014, 6, 1),
"06-2014": datetime(2014, 6, 1),
"2014-6": datetime(2014, 6, 1),
"6-2014": datetime(2014, 6, 1),
"20010101 12": datetime(2001, 1, 1, 12),
"20010101 1234": datetime(2001, 1, 1, 12, 34),
"20010101 123456": datetime(2001, 1, 1, 12, 34, 56),
}.items()
),
)
@pytest.mark.parametrize("cache", [True, False])
def test_parsers(self, date_str, expected, cache):
# dateutil >= 2.5.0 defaults to yearfirst=True
# https://github.com/dateutil/dateutil/issues/217
yearfirst = True
result1, _ = parsing.parse_time_string(date_str, yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(
np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache
)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst)
assert result7 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_na_values_with_cache(
self, cache, unique_nulls_fixture, unique_nulls_fixture2
):
# GH22305
expected = Index([NaT, NaT], dtype="datetime64[ns]")
result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache)
tm.assert_index_equal(result, expected)
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
result1, _ = parsing.parse_time_string("NaT")
result2 = to_datetime("NaT")
result3 = Timestamp("NaT")
result4 = DatetimeIndex(["NaT"])[0]
assert result1 is NaT
assert result2 is NaT
assert result3 is NaT
assert result4 is NaT
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_dayfirst_yearfirst(self, cache):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# str : dayfirst, yearfirst, expected
cases = {
"10-11-12": [
(False, False, datetime(2012, 10, 11)),
(True, False, datetime(2012, 11, 10)),
(False, True, datetime(2010, 11, 12)),
(True, True, datetime(2010, 12, 11)),
],
"20/12/21": [
(False, False, datetime(2021, 12, 20)),
(True, False, datetime(2021, 12, 20)),
(False, True, datetime(2020, 12, 21)),
(True, True, datetime(2020, 12, 21)),
],
}
for date_str, values in cases.items():
for dayfirst, yearfirst, expected in values:
# compare with dateutil result
dateutil_result = parse(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
assert dateutil_result == expected
result1, _ = parsing.parse_time_string(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(
date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache
)
result4 = DatetimeIndex(
[date_str], dayfirst=dayfirst, yearfirst=yearfirst
)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_timestring(self, cache):
# must be the same as dateutil result
cases = {
"10:15": (parse("10:15"), datetime(1, 1, 1, 10, 15)),
"9:05": (parse("9:05"), datetime(1, 1, 1, 9, 5)),
}
for date_str, (exp_now, exp_def) in cases.items():
result1, _ = parsing.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"dt_string, tz, dt_string_repr",
[
(
"2013-01-01 05:45+0545",
pytz.FixedOffset(345),
"Timestamp('2013-01-01 05:45:00+0545', tz='pytz.FixedOffset(345)')",
),
(
"2013-01-01 05:30+0530",
pytz.FixedOffset(330),
"Timestamp('2013-01-01 05:30:00+0530', tz='pytz.FixedOffset(330)')",
),
],
)
def test_parsers_timezone_minute_offsets_roundtrip(
self, cache, dt_string, tz, dt_string_repr
):
# GH11708
base = to_datetime("2013-01-01 00:00:00", cache=cache)
base = base.tz_localize("UTC").tz_convert(tz)
dt_time = to_datetime(dt_string, cache=cache)
assert base == dt_time
assert dt_string_repr == repr(dt_time)
@pytest.fixture(params=["D", "s", "ms", "us", "ns"])
def units(request):
"""Day and some time units.
* D
* s
* ms
* us
* ns
"""
return request.param
@pytest.fixture
def epoch_1960():
"""Timestamp at 1960-01-01."""
return Timestamp("1960-01-01")
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=["timestamp", "pydatetime", "datetime64", "str_1960"])
def epochs(epoch_1960, request):
"""Timestamp at 1960-01-01 in various forms.
* Timestamp
* datetime.datetime
* numpy.datetime64
* str
"""
assert request.param in {"timestamp", "pydatetime", "datetime64", "str_1960"}
if request.param == "timestamp":
return epoch_1960
elif request.param == "pydatetime":
return epoch_1960.to_pydatetime()
elif request.param == "datetime64":
return epoch_1960.to_datetime64()
else:
return str(epoch_1960)
@pytest.fixture
def julian_dates():
return pd.date_range("2014-1-1", periods=10).to_julian_date().values
class TestOrigin:
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(pd.to_datetime(julian_dates, unit="D", origin="julian"))
expected = Series(
pd.to_datetime(julian_dates - Timestamp(0).to_julian_date(), unit="D")
)
tm.assert_series_equal(result, expected)
result = Series(pd.to_datetime([0, 1, 2], unit="D", origin="unix"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
# default
result = Series(pd.to_datetime([0, 1, 2], unit="D"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = pd.to_datetime(2456658, origin="julian", unit="D")
assert result.to_julian_date() == 2456658
# out-of-bounds
msg = "1 is Out of Bounds for origin='julian'"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(1, origin="julian", unit="D")
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != "D":
msg = "unit must be 'D' for origin='julian'"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(julian_dates, unit=units, origin="julian")
def test_invalid_origin(self):
# need to have a numeric specified
msg = "it must be numeric with a unit specified"
with pytest.raises(ValueError, match=msg):
pd.to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError, match=msg):
pd.to_datetime("2005-01-01", origin="1960-01-01", unit="D")
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) + epoch_1960 for x in units_from_epochs]
)
result = Series(pd.to_datetime(units_from_epochs, unit=units, origin=epochs))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"origin, exc",
[
("random_string", ValueError),
("epoch", ValueError),
("13-24-1990", ValueError),
(datetime(1, 1, 1), tslib.OutOfBoundsDatetime),
],
)
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
msg = f"origin {origin} (is Out of Bounds|cannot be converted to a Timestamp)"
with pytest.raises(exc, match=msg):
pd.to_datetime(units_from_epochs, unit=units, origin=origin)
def test_invalid_origins_tzinfo(self):
# GH16842
with pytest.raises(ValueError, match="must be tz-naive"):
pd.to_datetime(1, unit="D", origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
@pytest.mark.parametrize("format", [None, "%Y-%m-%d %H:%M:%S"])
def test_to_datetime_out_of_bounds_with_format_arg(self, format):
# see gh-23830
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime("2417-10-27 00:00:00", format=format)
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = pd.to_datetime(200 * 365, unit="D")
expected = Timestamp("2169-11-13 00:00:00")
assert result == expected
result = pd.to_datetime(200 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2069-11-13 00:00:00")
assert result == expected
result = pd.to_datetime(300 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2169-10-20 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"offset,utc,exp",
[
["Z", True, "2019-01-01T00:00:00.000Z"],
["Z", None, "2019-01-01T00:00:00.000Z"],
["-01:00", True, "2019-01-01T01:00:00.000Z"],
["-01:00", None, "2019-01-01T00:00:00.000-01:00"],
],
)
def test_arg_tz_ns_unit(self, offset, utc, exp):
# GH 25546
arg = "2019-01-01T00:00:00.000" + offset
result = to_datetime([arg], unit="ns", utc=utc)
expected = to_datetime([exp])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike,do_caching",
[([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True)],
)
def test_should_cache(listlike, do_caching):
assert (
tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7)
== do_caching
)
@pytest.mark.parametrize(
"unique_share,check_count, err_message",
[
(0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"),
(10, 2, r"unique_share must be in next bounds: \(0; 1\)"),
],
)
def test_should_cache_errors(unique_share, check_count, err_message):
arg = [5] * 10
with pytest.raises(AssertionError, match=err_message):
tools.should_cache(arg, unique_share, check_count)
def test_nullable_integer_to_datetime():
# Test for #30050
ser = Series([1, 2, None, 2 ** 61, None])
ser = ser.astype("Int64")
ser_copy = ser.copy()
res = pd.to_datetime(ser, unit="ns")
expected = Series(
[
np.datetime64("1970-01-01 00:00:00.000000001"),
np.datetime64("1970-01-01 00:00:00.000000002"),
np.datetime64("NaT"),
np.datetime64("2043-01-25 23:56:49.213693952"),
np.datetime64("NaT"),
]
)
tm.assert_series_equal(res, expected)
# Check that ser isn't mutated
tm.assert_series_equal(ser, ser_copy)
@pytest.mark.parametrize("klass", [np.array, list])
def test_na_to_datetime(nulls_fixture, klass):
if isinstance(nulls_fixture, Decimal):
with pytest.raises(TypeError, match="not convertible to datetime"):
pd.to_datetime(klass([nulls_fixture]))
else:
result = pd.to_datetime(klass([nulls_fixture]))
assert result[0] is pd.NaT
def test_empty_string_datetime_coerce__format():
# GH13044
td = Series(["03/24/2016", "03/25/2016", ""])
format = "%m/%d/%Y"
# coerce empty string to pd.NaT
result = pd.to_datetime(td, format=format, errors="coerce")
expected = Series(["2016-03-24", "2016-03-25", pd.NaT], dtype="datetime64[ns]")
tm.assert_series_equal(expected, result)
# raise an exception in case a format is given
with pytest.raises(ValueError, match="does not match format"):
result = pd.to_datetime(td, format=format, errors="raise")
# don't raise an expection in case no format is given
result = pd.to_datetime(td, errors="raise")
tm.assert_series_equal(result, expected)
def test_empty_string_datetime_coerce__unit():
# GH13044
# coerce empty string to pd.NaT
result = pd.to_datetime([1, ""], unit="s", errors="coerce")
expected = DatetimeIndex(["1970-01-01 00:00:01", "NaT"], dtype="datetime64[ns]")
tm.assert_index_equal(expected, result)
# verify that no exception is raised even when errors='raise' is set
result = pd.to_datetime([1, ""], unit="s", errors="raise")
tm.assert_index_equal(expected, result)
|
py | 1a47b5e350b2a64f63a79cecadf28676e85c03a9 | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
|
py | 1a47b7066b9123e1643819286aadc244b7874d4e | # -*- coding: utf-8 -*-
# @Time : 09/09/2021 03:40 PM
# @Author : Rodolfo Londero
# @Email : [email protected]
# @File : test_transformers.py
# @Software : VSCode
import pytest
class TestTransformers13Bus:
@pytest.fixture(scope='function')
def dss(self, solve_snap_13bus):
dss = solve_snap_13bus
dss.solution_solve()
dss.transformers_write_name('sub')
return dss
# ===================================================================
# Integer methods
# ===================================================================
def test_transformers_read_num_windings(self, dss):
expected = 2
actual = dss.transformers_read_num_windings()
assert actual == expected
def test_transformers_write_num_windings(self, dss):
expected = 3
dss.transformers_write_num_windings(expected)
actual = dss.transformers_read_num_windings()
assert actual == expected
def test_transformers_read_wdg(self, dss):
expected = 2
actual = dss.transformers_read_wdg()
assert actual == expected
def test_transformers_write_wdg(self, dss):
expected = 1
dss.transformers_write_wdg(expected)
actual = dss.transformers_read_wdg()
assert actual == expected
def test_transformers_read_num_taps(self, dss):
expected = 32
actual = dss.transformers_read_num_taps()
assert actual == expected
def test_transformers_write_num_taps(self, dss):
expected = 16
dss.transformers_write_num_taps(expected)
actual = dss.transformers_read_num_taps()
assert actual == expected
def test_transformers_read_is_delta(self, dss):
expected = 0
actual = dss.transformers_read_is_delta()
assert actual == expected
def test_transformers_write_is_delta(self, dss):
expected = 1
dss.transformers_write_is_delta(expected)
actual = dss.transformers_read_is_delta()
assert actual == expected
def test_transformers_first(self, dss):
expected = 1
actual = dss.transformers_first()
assert actual == expected
def test_transformers_next(self, dss):
expected = 2
actual = dss.transformers_next()
assert actual == expected
def test_transformers_count(self, dss):
expected = 5
actual = dss.transformers_count()
assert actual == expected
# ===================================================================
# Float methods
# ===================================================================
def test_transformers_read_r(self, dss):
expected = 0.0005
actual = dss.transformers_read_r()
assert actual == expected
def test_transformers_write_r(self, dss):
expected = 0.01
dss.transformers_write_r(expected)
actual = dss.transformers_read_r()
assert actual == expected
def test_transformers_read_tap(self, dss):
expected = 1
actual = dss.transformers_read_tap()
assert actual == expected
def test_transformers_write_tap(self, dss):
expected = 5
dss.transformers_write_tap(expected)
actual = dss.transformers_read_tap()
assert actual == expected
def test_transformers_read_min_tap(self, dss):
expected = 0.9
actual = dss.transformers_read_min_tap()
assert actual == expected
def test_transformers_write_min_tap(self, dss):
expected = 0.5
dss.transformers_write_min_tap(expected)
actual = dss.transformers_read_min_tap()
assert actual == expected
def test_transformers_read_max_tap(self, dss):
expected = 1.1
actual = dss.transformers_read_max_tap()
assert actual == expected
def test_transformers_write_max_tap(self, dss):
expected = 1.5
dss.transformers_write_max_tap(expected)
actual = dss.transformers_read_max_tap()
assert actual == expected
def test_transformers_read_kv(self, dss):
expected = 4.16
actual = dss.transformers_read_kv()
assert actual == expected
def test_transformers_write_kv(self, dss):
expected = 3.8
dss.transformers_write_kv(expected)
actual = dss.transformers_read_kv()
assert actual == expected
def test_transformers_read_kva(self, dss):
expected = 5000
actual = dss.transformers_read_kva()
assert actual == expected
def test_transformers_write_kva(self, dss):
expected = 10000
dss.transformers_write_kva(expected)
actual = dss.transformers_read_kva()
assert actual == expected
def test_transformers_read_x_neut(self, dss):
expected = 0
actual = dss.transformers_read_x_neut()
assert actual == expected
def test_transformers_write_x_neut(self, dss):
expected = 1
dss.transformers_write_x_neut(expected)
actual = dss.transformers_read_x_neut()
assert actual == expected
def test_transformers_read_r_neut(self, dss):
expected = -1
actual = dss.transformers_read_r_neut()
assert actual == expected
def test_transformers_write_r_neut(self, dss):
expected = 1
dss.transformers_write_r_neut(expected)
actual = dss.transformers_read_r_neut()
assert actual == expected
def test_transformers_read_xhl(self, dss):
expected = 0.008
actual = dss.transformers_read_xhl()
assert actual == expected
def test_transformers_write_xhl(self, dss):
expected = 0.008
dss.transformers_write_xhl(expected)
actual = dss.transformers_read_xhl()
assert actual == expected
def test_transformers_read_xht(self, dss):
expected = 4
actual = dss.transformers_read_xht()
assert actual == expected
def test_transformers_write_xht(self, dss):
expected = 5
dss.transformers_write_xht(expected)
actual = dss.transformers_read_xht()
assert actual == expected
def test_transformers_read_xlt(self, dss):
expected = 4
actual = dss.transformers_read_xlt()
assert actual == expected
def test_transformers_write_xlt(self, dss):
expected = 5
dss.transformers_write_xlt(expected)
actual = dss.transformers_read_xlt()
assert actual == expected
# ===================================================================
# String methods
# ===================================================================
def test_transformers_read_xfmr_code(self, dss):
expected = ''
actual = dss.transformers_read_xfmr_code()
assert actual == expected
def test_transformers_write_xfmr_code(self, dss):
dss.text(r'New XfmrCode.test phases=1 xhl=0.01 kvas=[1666 1666] kvs=[2.4 2.4] %LoadLoss=0.01 ')
expected = 'test'
dss.transformers_write_xfmr_code(expected)
actual = dss.transformers_read_xfmr_code()
assert actual == expected
def test_transformers_read_name(self, dss):
expected = 'sub'
actual = dss.transformers_read_name()
assert actual == expected
def test_transformers_write_name(self, dss):
expected = 'reg1'
dss.transformers_write_name(expected)
actual = dss.transformers_read_name()
assert actual == expected
def test_transformers_str_wdg_voltages(self, dss):
expected = '1'
actual = dss.transformers_str_wdg_voltages()
assert actual == expected
# ===================================================================
# Variant methods
# ===================================================================
def test_transformers_all_Names(self, dss):
expected = ['sub', 'reg1', 'reg2', 'reg3', 'xfm1']
actual = dss.transformers_all_Names()
assert actual == expected
def test_transformers_wdg_voltages(self, dss):
expected = [2401.5628121109403,
-0.4668923729244497,
-1201.237672392959,
-2079.717523220085,
-1200.311654294895,
2080.141951753078]
actual = dss.transformers_wdg_voltages()
assert actual == expected
def test_transformers_wdg_currents(self, dss):
expected = [10.886376124155504,
-5.958628293446964,
-10.886371940479876,
5.958628292748472,
-521.2527855311055,
285.3058254830539,
521.2527854638174,
-285.3061724174768,
-7.086427310190629,
-5.676542717425036,
7.086425217828946,
5.676539094769396,
339.30622922163457,
271.7999201430939,
-339.3065296616405,
-271.7997466106899,
-0.771484338270966,
13.030897319840733,
0.771482246927917,
-13.0308936964866,
36.940006016753614,
-623.934813240543,
-36.93970551621169,
623.9349866397679]
actual = dss.transformers_wdg_currents()
assert actual == expected
|
py | 1a47b7100cda0253099a9da30cd2aab3d5102de1 | #!C:\Users\Cliente\PycharmProjects\pythonbirds\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
py | 1a47b77e4b35ab05579eb0aec91ed2eefb969980 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import unittest
from datetime import datetime, timedelta
import pytest
from airflow import models
from airflow.api.common.experimental.mark_tasks import (
_create_dagruns, set_dag_run_state_to_failed, set_dag_run_state_to_running, set_dag_run_state_to_success,
set_state,
)
from airflow.models import DagRun
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from tests.test_utils.db import clear_db_runs
DEV_NULL = "/dev/null"
class TestMarkTasks(unittest.TestCase):
@classmethod
def setUpClass(cls):
dagbag = models.DagBag(include_examples=True)
cls.dag1 = dagbag.dags['example_bash_operator']
cls.dag1.sync_to_db()
cls.dag2 = dagbag.dags['example_subdag_operator']
cls.dag2.sync_to_db()
cls.dag3 = dagbag.dags['example_trigger_target_dag']
cls.dag3.sync_to_db()
cls.execution_dates = [days_ago(2), days_ago(1)]
start_date3 = cls.dag3.default_args["start_date"]
cls.dag3_execution_dates = [start_date3, start_date3 + timedelta(days=1),
start_date3 + timedelta(days=2)]
def setUp(self):
clear_db_runs()
drs = _create_dagruns(self.dag1, self.execution_dates,
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag1
dr.verify_integrity()
drs = _create_dagruns(self.dag2,
[self.dag2.default_args['start_date']],
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag2
dr.verify_integrity()
drs = _create_dagruns(self.dag3,
self.dag3_execution_dates,
state=State.SUCCESS,
run_id_template="manual__{}")
for dr in drs:
dr.dag = self.dag3
dr.verify_integrity()
def tearDown(self):
clear_db_runs()
@staticmethod
def snapshot_state(dag, execution_dates):
TI = models.TaskInstance
with create_session() as session:
return session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
@provide_session
def verify_state(self, dag, task_ids, execution_dates, state, old_tis, session=None):
TI = models.TaskInstance
tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.assertTrue(len(tis) > 0)
for ti in tis: # pylint: disable=too-many-nested-blocks
if ti.task_id in task_ids and ti.execution_date in execution_dates:
self.assertEqual(ti.state, state)
if state in State.finished():
self.assertIsNotNone(ti.end_date)
else:
for old_ti in old_tis:
if old_ti.task_id == ti.task_id and old_ti.execution_date == ti.execution_date:
self.assertEqual(ti.state, old_ti.state)
def test_mark_tasks_now(self):
# set one task to success but do not commit
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=False)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
None, snapshot)
# set one and only one task to success
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set no tasks
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 0)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set task to other than success
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.FAILED, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.FAILED, snapshot)
# dont alter other tasks
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_0")
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set one task as FAILED. dag3 has schedule_interval None
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(tasks=[task], execution_date=self.dag3_execution_dates[1],
upstream=False, downstream=False, future=False,
past=False, state=State.FAILED, commit=True)
# exactly one TaskInstance should have been altered
self.assertEqual(len(altered), 1)
# task should have been marked as failed
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[1]],
State.FAILED, snapshot)
# tasks on other days should be unchanged
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[0]],
None, snapshot)
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[2]],
None, snapshot)
def test_mark_downstream(self):
# test downstream
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 3)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot)
def test_mark_upstream(self):
# test upstream
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("run_after_loop")
relatives = task.get_flat_relatives(upstream=True)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=True, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 4)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_tasks_future(self):
# set one task to success towards end of scheduled dag runs
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=True,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(tasks=[task], execution_date=self.dag3_execution_dates[1],
upstream=False, downstream=False, future=True,
past=False, state=State.FAILED, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[0]], None, snapshot)
self.verify_state(self.dag3, [task.task_id], self.dag3_execution_dates[1:], State.FAILED, snapshot)
def test_mark_tasks_past(self):
# set one task to success towards end of scheduled dag runs
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(tasks=[task], execution_date=self.execution_dates[1],
upstream=False, downstream=False, future=False,
past=True, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(tasks=[task], execution_date=self.dag3_execution_dates[1],
upstream=False, downstream=False, future=False,
past=True, state=State.FAILED, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag3, [task.task_id], self.dag3_execution_dates[:2], State.FAILED, snapshot)
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[2]], None, snapshot)
def test_mark_tasks_multiple(self):
# set multiple tasks to success
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
tasks = [self.dag1.get_task("runme_1"), self.dag1.get_task("runme_2")]
altered = set_state(tasks=tasks, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id for task in tasks], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# TODO: this backend should be removed once a fixing solution is found later
# We skip it here because this test case is working with Postgres & SQLite
# but not with MySQL
@pytest.mark.backend("sqlite", "postgres")
def test_mark_tasks_subdag(self):
# set one task to success towards end of scheduled dag runs
task = self.dag2.get_task("section-1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(tasks=[task], execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 14)
# cannot use snapshot here as that will require drilling down the
# the sub dag tree essentially recreating the same code as in the
# tested logic.
self.verify_state(self.dag2, task_ids, [self.execution_dates[0]],
State.SUCCESS, [])
class TestMarkDAGRun(unittest.TestCase):
@classmethod
def setUpClass(cls):
dagbag = models.DagBag(include_examples=True)
cls.dag1 = dagbag.dags['example_bash_operator']
cls.dag1.sync_to_db()
cls.dag2 = dagbag.dags['example_subdag_operator']
cls.dag2.sync_to_db()
cls.execution_dates = [days_ago(2), days_ago(1), days_ago(0)]
def setUp(self):
clear_db_runs()
def _set_default_task_instance_states(self, dr):
# success task
dr.get_task_instance('runme_0').set_state(State.SUCCESS)
# skipped task
dr.get_task_instance('runme_1').set_state(State.SKIPPED)
# retry task
dr.get_task_instance('runme_2').set_state(State.UP_FOR_RETRY)
# queued task
dr.get_task_instance('also_run_this').set_state(State.QUEUED)
# running task
dr.get_task_instance('run_after_loop').set_state(State.RUNNING)
# failed task
dr.get_task_instance('run_this_last').set_state(State.FAILED)
def _verify_task_instance_states_remain_default(self, dr):
self.assertEqual(dr.get_task_instance('runme_0').state, State.SUCCESS)
self.assertEqual(dr.get_task_instance('runme_1').state, State.SKIPPED)
self.assertEqual(dr.get_task_instance('runme_2').state, State.UP_FOR_RETRY)
self.assertEqual(dr.get_task_instance('also_run_this').state, State.QUEUED)
self.assertEqual(dr.get_task_instance('run_after_loop').state, State.RUNNING)
self.assertEqual(dr.get_task_instance('run_this_last').state, State.FAILED)
@provide_session
def _verify_task_instance_states(self, dag, date, state, session=None):
TI = models.TaskInstance
tis = session.query(TI)\
.filter(TI.dag_id == dag.dag_id, TI.execution_date == date)
for ti in tis:
self.assertEqual(ti.state, state)
def _create_test_dag_run(self, state, date):
return self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=state,
execution_date=date
)
def _verify_dag_run_state(self, dag, date, state):
drs = models.DagRun.find(dag_id=dag.dag_id, execution_date=date)
dr = drs[0]
self.assertEqual(dr.get_state(), state)
@provide_session
def _verify_dag_run_dates(self, dag, date, state, middle_time, session=None):
# When target state is RUNNING, we should set start_date,
# otherwise we should set end_date.
DR = DagRun
dr = session.query(DR).filter(
DR.dag_id == dag.dag_id,
DR.execution_date == date
).one()
if state == State.RUNNING:
# Since the DAG is running, the start_date must be updated after creation
self.assertGreater(dr.start_date, middle_time)
# If the dag is still running, we don't have an end date
self.assertIsNone(dr.end_date)
else:
# If the dag is not running, there must be an end time
self.assertLess(dr.start_date, middle_time)
self.assertGreater(dr.end_date, middle_time)
def test_set_running_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
self.assertEqual(len(altered), 5)
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_running_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
self.assertEqual(len(altered), 1)
self._verify_dag_run_state(self.dag1, date, State.FAILED)
self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_running_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, only the dag itself
self.assertEqual(len(altered), 0)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_success_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
self.assertEqual(len(altered), 5)
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_success_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
self.assertEqual(len(altered), 1)
self._verify_dag_run_state(self.dag1, date, State.FAILED)
self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_success_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, but only the dag object should be changed
self.assertEqual(len(altered), 0)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_failed_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
self.assertEqual(len(altered), 5)
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_failed_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
self.assertEqual(len(altered), 1)
self._verify_dag_run_state(self.dag1, date, State.FAILED)
self.assertEqual(dr.get_task_instance('run_after_loop').state, State.FAILED)
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_failed_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
time.sleep(2)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, since we've only altered the DAG itself
self.assertEqual(len(altered), 0)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_state_without_commit(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
self._set_default_task_instance_states(dr)
will_be_altered = set_dag_run_state_to_running(self.dag1, date, commit=False)
# None of the tasks will be altered.
self.assertEqual(len(will_be_altered), 0)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
will_be_altered = set_dag_run_state_to_failed(self.dag1, date, commit=False)
# Only the running task will be altered.
self.assertEqual(len(will_be_altered), 1)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
will_be_altered = set_dag_run_state_to_success(self.dag1, date, commit=False)
# All except the SUCCESS task should be altered.
self.assertEqual(len(will_be_altered), 5)
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
@provide_session
def test_set_state_with_multiple_dagruns(self, session=None):
self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=self.execution_dates[0],
session=session
)
self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=self.execution_dates[1],
session=session
)
self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.RUNNING,
execution_date=self.execution_dates[2],
session=session
)
altered = set_dag_run_state_to_success(self.dag2, self.execution_dates[1], commit=True)
# Recursively count number of tasks in the dag
def count_dag_tasks(dag):
count = len(dag.tasks)
subdag_counts = [count_dag_tasks(subdag) for subdag in dag.subdags]
count += sum(subdag_counts)
return count
self.assertEqual(len(altered), count_dag_tasks(self.dag2))
self._verify_dag_run_state(self.dag2, self.execution_dates[1], State.SUCCESS)
# Make sure other dag status are not changed
models.DagRun.find(dag_id=self.dag2.dag_id,
execution_date=self.execution_dates[0])
self._verify_dag_run_state(self.dag2, self.execution_dates[0], State.FAILED)
models.DagRun.find(dag_id=self.dag2.dag_id,
execution_date=self.execution_dates[2])
self._verify_dag_run_state(self.dag2, self.execution_dates[2], State.RUNNING)
def test_set_dag_run_state_edge_cases(self):
# Dag does not exist
altered = set_dag_run_state_to_success(None, self.execution_dates[0])
self.assertEqual(len(altered), 0)
altered = set_dag_run_state_to_failed(None, self.execution_dates[0])
self.assertEqual(len(altered), 0)
altered = set_dag_run_state_to_running(None, self.execution_dates[0])
self.assertEqual(len(altered), 0)
# Invalid execution date
altered = set_dag_run_state_to_success(self.dag1, None)
self.assertEqual(len(altered), 0)
altered = set_dag_run_state_to_failed(self.dag1, None)
self.assertEqual(len(altered), 0)
altered = set_dag_run_state_to_running(self.dag1, None)
self.assertEqual(len(altered), 0)
# This will throw ValueError since dag.latest_execution_date
# need to be 0 does not exist.
self.assertRaises(ValueError, set_dag_run_state_to_success, self.dag2,
timezone.make_naive(self.execution_dates[0]))
# altered = set_dag_run_state_to_success(self.dag1, self.execution_dates[0])
# DagRun does not exist
# This will throw ValueError since dag.latest_execution_date does not exist
self.assertRaises(ValueError, set_dag_run_state_to_success,
self.dag2, self.execution_dates[0])
def test_set_dag_run_state_to_failed_no_running_tasks(self):
"""
set_dag_run_state_to_failed when there are no running tasks to update
"""
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
for task in self.dag1.tasks:
dr.get_task_instance(task.task_id).set_state(State.SUCCESS)
set_dag_run_state_to_failed(self.dag1, date)
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
if __name__ == '__main__':
unittest.main()
|
py | 1a47b79bf967225497409169742b86fa5619cc7c | # Generated by Django 3.2.7 on 2021-10-13 02:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='WalkthroughPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('walkthrough_title', models.CharField(max_length=100)),
('walkthrough_body', models.TextField()),
('walkthrough_img', models.ImageField(blank=True, null=True, upload_to='uploaded_imgages/')),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('for_game', models.CharField(max_length=100)),
('likes', models.IntegerField(default=0)),
('dislikes', models.IntegerField(default=0)),
('walkthrough_creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WalkthroughComment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_body', models.TextField()),
('post_img', models.ImageField(blank=True, null=True, upload_to='uploaded_imgages/')),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('for_game', models.CharField(max_length=100)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('walkthrough_post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.walkthroughpost')),
],
),
migrations.CreateModel(
name='QuestionPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_title', models.CharField(max_length=200)),
('question_body', models.CharField(max_length=250)),
('question_img', models.ImageField(blank=True, null=True, upload_to='uploaded_imgages/')),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('for_game', models.CharField(max_length=100)),
('question_creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='AnswerPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer_body', models.TextField()),
('answer_img', models.ImageField(blank=True, null=True, upload_to='uploaded_imgages/')),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('likes', models.IntegerField(default=0)),
('dislikes', models.IntegerField(default=0)),
('answer_creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.questionpost')),
],
),
]
|
py | 1a47b7d594d2f7673235fe2eab1973238c0c607d | from celery import shared_task
from django.conf import settings
from django.core.mail import send_mail
from django.urls import reverse
@shared_task()
def send_email_task(subject, message,
email_from, recipient_list):
send_mail(subject, message,
email_from, recipient_list, fail_silently=False)
# @shared_task()
# def send_activation_code_async(email_to, code):
# path = reverse('account:activate', args=(code,))
#
# send_mail(
# 'Your activation code',
# f'http://127.0.0.1:8000{path}',
# '[email protected]',
# [email_to],
# fail_silently=False,
# )
@shared_task()
def send_activation_code_sms(email_to, code):
send_mail(
'Your activation code',
code,
from_email=[settings.EMAIL_HOST_USER, ],
recipient_list=email_to,
fail_silently=False,
)
|
py | 1a47b8eb69b526b8e46b3bf58f662f5d5664a8bf | __all__ = [
"build_train_batch",
"build_valid_batch",
"build_infer_batch",
"train_dl",
"valid_dl",
"infer_dl",
]
from mmdet.core import BitmapMasks
from icevision.core import *
from icevision.imports import *
from icevision.models.utils import *
from icevision.models.mmdet.common.bbox.dataloaders import (
_img_tensor,
_img_meta,
_labels,
_bboxes,
)
def train_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
return transform_dl(
dataset=dataset,
build_batch=build_train_batch,
batch_tfms=batch_tfms,
**dataloader_kwargs
)
def valid_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
return transform_dl(
dataset=dataset,
build_batch=build_valid_batch,
batch_tfms=batch_tfms,
**dataloader_kwargs
)
def infer_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
"""A `DataLoader` with a custom `collate_fn` that batches items as required for inferring the model.
# Arguments
dataset: Possibly a `Dataset` object, but more generally, any `Sequence` that returns records.
batch_tfms: Transforms to be applied at the batch level.
**dataloader_kwargs: Keyword arguments that will be internally passed to a Pytorch `DataLoader`.
The parameter `collate_fn` is already defined internally and cannot be passed here.
# Returns
A Pytorch `DataLoader`.
"""
return transform_dl(
dataset=dataset,
build_batch=build_infer_batch,
batch_tfms=batch_tfms,
**dataloader_kwargs
)
def build_valid_batch(
records: Sequence[RecordType], batch_tfms=None
) -> Tuple[dict, List[Dict[str, torch.Tensor]]]:
return build_train_batch(records=records, batch_tfms=batch_tfms)
def build_train_batch(
records: Sequence[RecordType], batch_tfms=None
) -> Tuple[dict, List[Dict[str, torch.Tensor]]]:
records = common_build_batch(records=records, batch_tfms=batch_tfms)
images, labels, bboxes, masks, img_metas = [], [], [], [], []
for record in records:
images.append(_img_tensor(record))
img_metas.append(_img_meta_mask(record))
labels.append(_labels(record))
bboxes.append(_bboxes(record))
masks.append(_masks(record))
data = {
"img": torch.stack(images),
"img_metas": img_metas,
"gt_labels": labels,
"gt_bboxes": bboxes,
"gt_masks": masks,
}
return data, records
def build_infer_batch(records, batch_tfms=None):
records = common_build_batch(records, batch_tfms=batch_tfms)
imgs, img_metas = [], []
for record in records:
imgs.append(_img_tensor(record))
img_metas.append(_img_meta_mask(record))
data = {
"img": [torch.stack(imgs)],
"img_metas": [img_metas],
}
return data, records
def _img_meta_mask(record):
img_meta = _img_meta(record)
img_meta["ori_shape"] = img_meta["pad_shape"]
return img_meta
def _masks(record):
if len(record["masks"]) == 0:
raise RuntimeError("Negative samples still needs to be implemented")
else:
mask = record["masks"].data
_, h, w = mask.shape
return BitmapMasks(mask, height=h, width=w)
|
py | 1a47b9d90f5b0bf2d2b2dacb241115c97e7e18a6 | from unittest import TestCase
from tests.addresscodec.test_main_test_cases import test_cases
from xrpl.core import addresscodec
from xrpl.core.addresscodec.main import MAX_32_BIT_UNSIGNED_INT
class TestMain(TestCase):
def test_classic_address_to_xaddress(self):
for test_case in test_cases:
(
classic_address,
tag,
expected_main_xaddress,
expected_test_xaddress,
) = test_case
# test
xaddress = addresscodec.classic_address_to_xaddress(
classic_address, tag, True
)
self.assertEqual(xaddress, expected_test_xaddress)
# main
xaddress = addresscodec.classic_address_to_xaddress(
classic_address, tag, False
)
self.assertEqual(xaddress, expected_main_xaddress)
def test_xaddress_to_classic_address(self):
for test_case in test_cases:
(
expected_classic_address,
expected_tag,
main_xaddress,
test_xaddress,
) = test_case
# test
classic_address, tag, is_test = addresscodec.xaddress_to_classic_address(
test_xaddress
)
self.assertEqual(classic_address, expected_classic_address)
self.assertEqual(tag, expected_tag)
self.assertTrue(is_test)
# main
classic_address, tag, is_test = addresscodec.xaddress_to_classic_address(
main_xaddress
)
self.assertEqual(classic_address, expected_classic_address)
self.assertEqual(tag, expected_tag)
self.assertFalse(is_test)
def test_classic_address_to_xaddress_invalid_tag(self):
classic_address = "rGWrZyQqhTp9Xu7G5Pkayo7bXjH4k4QYpf"
tag = MAX_32_BIT_UNSIGNED_INT + 1
self.assertRaises(
addresscodec.XRPLAddressCodecException,
addresscodec.classic_address_to_xaddress,
classic_address,
tag,
True,
)
self.assertRaises(
addresscodec.XRPLAddressCodecException,
addresscodec.classic_address_to_xaddress,
classic_address,
tag,
False,
)
def test_classic_address_to_xaddress_bad_classic_address(self):
classic_address = "r"
self.assertRaises(
ValueError,
addresscodec.classic_address_to_xaddress,
classic_address,
None,
True,
)
self.assertRaises(
ValueError,
addresscodec.classic_address_to_xaddress,
classic_address,
None,
False,
)
def test_is_valid_classic_address_secp256k1(self):
classic_address = "rU6K7V3Po4snVhBBaU29sesqs2qTQJWDw1"
result = addresscodec.is_valid_classic_address(classic_address)
self.assertTrue(result)
def test_is_valid_classic_address_ed25519(self):
classic_address = "rLUEXYuLiQptky37CqLcm9USQpPiz5rkpD"
result = addresscodec.is_valid_classic_address(classic_address)
self.assertTrue(result)
def test_is_valid_classic_address_invalid(self):
classic_address = "rU6K7V3Po4snVhBBaU29sesqs2qTQJWDw2"
result = addresscodec.is_valid_classic_address(classic_address)
self.assertFalse(result)
def test_is_valid_classic_address_empty(self):
classic_address = ""
result = addresscodec.is_valid_classic_address(classic_address)
self.assertFalse(result)
def test_is_valid_xaddress_valid(self):
xaddress = "X7AcgcsBL6XDcUb289X4mJ8djcdyKaB5hJDWMArnXr61cqZ"
result = addresscodec.is_valid_xaddress(xaddress)
self.assertTrue(result)
def test_is_valid_xaddress_invalid(self):
xaddress = "XVLhHMPHU98es4dbozjVtdWzVrDjtV18pX8zeUygYrCgrPh"
result = addresscodec.is_valid_xaddress(xaddress)
self.assertFalse(result)
def test_is_valid_xaddress_empty(self):
xaddress = ""
result = addresscodec.is_valid_xaddress(xaddress)
self.assertFalse(result)
|
py | 1a47ba1b97c42850a0f72eed266a1cba80f86159 | import logging
import traceback
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional
from functools import lru_cache
from chiavdf import create_discriminant, verify_n_wesolowski
from shibgreen.consensus.constants import ConsensusConstants
from shibgreen.types.blockchain_format.classgroup import ClassgroupElement
from shibgreen.types.blockchain_format.sized_bytes import bytes32, bytes100
from shibgreen.util.ints import uint8, uint64
from shibgreen.util.streamable import Streamable, streamable
log = logging.getLogger(__name__)
@lru_cache(maxsize=200)
def get_discriminant(challenge, size_bites) -> int:
return int(
create_discriminant(challenge, size_bites),
16,
)
@lru_cache(maxsize=1000)
def verify_vdf(
disc: int,
input_el: bytes100,
output: bytes,
number_of_iterations: uint64,
discriminant_size: int,
witness_type: uint8,
):
return verify_n_wesolowski(
str(disc),
input_el,
output,
number_of_iterations,
discriminant_size,
witness_type,
)
@dataclass(frozen=True)
@streamable
class VDFInfo(Streamable):
challenge: bytes32 # Used to generate the discriminant (VDF group)
number_of_iterations: uint64
output: ClassgroupElement
@dataclass(frozen=True)
@streamable
class VDFProof(Streamable):
witness_type: uint8
witness: bytes
normalized_to_identity: bool
def is_valid(
self,
constants: ConsensusConstants,
input_el: ClassgroupElement,
info: VDFInfo,
target_vdf_info: Optional[VDFInfo] = None,
) -> bool:
"""
If target_vdf_info is passed in, it is compared with info.
"""
if target_vdf_info is not None and info != target_vdf_info:
tb = traceback.format_stack()
log.error(f"{tb} INVALID VDF INFO. Have: {info} Expected: {target_vdf_info}")
return False
if self.witness_type + 1 > constants.MAX_VDF_WITNESS_SIZE:
return False
try:
disc: int = get_discriminant(info.challenge, constants.DISCRIMINANT_SIZE_BITS)
# TODO: parallelize somehow, this might included multiple mini proofs (n weso)
return verify_vdf(
disc,
input_el.data,
info.output.data + bytes(self.witness),
info.number_of_iterations,
constants.DISCRIMINANT_SIZE_BITS,
self.witness_type,
)
except Exception:
return False
# Stores, for a given VDF, the field that uses it.
class CompressibleVDFField(IntEnum):
CC_EOS_VDF = 1
ICC_EOS_VDF = 2
CC_SP_VDF = 3
CC_IP_VDF = 4
|
py | 1a47ba2fc2114c4ed92d7278a0800fc8c168a2f7 |
__all__ = ['Serializer', 'SerializerError']
from .error import YAMLError
from .events import *
from .nodes import *
class SerializerError(YAMLError):
pass
class Serializer:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
|
py | 1a47ba6edb87ebf549f056fb4b5249abd0796075 | import os
TEST_LOG_PATH = os.path.dirname(os.path.realpath(__file__))
|
py | 1a47ba80806511c0b84b91caed2c3dba7c82867a | import os
import unittest
from recipe_scrapers.geniuskitchen import GeniusKitchen
class TestAllRecipesScraper(unittest.TestCase):
def setUp(self):
# tests are run from tests.py
with open(os.path.join(
os.getcwd(),
'recipe_scrapers',
'tests',
'test_data',
'geniuskitchen.testhtml'
)) as file_opened:
self.harvester_class = GeniusKitchen(file_opened, test=True)
def test_host(self):
self.assertEqual(
'geniuskitchen.com',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
'Quiche Lorraine Cups'
)
def test_total_time(self):
self.assertEqual(
40,
self.harvester_class.total_time()
)
def test_ingredients(self):
self.assertCountEqual(
[
'12 cooked crepes (, see All Purpose Dinner Crepes Batter)',
'4 slices bacon, cooked crisp &,crumbled',
'1 cup swiss cheese, grated',
'2 tablespoons flour',
'1⁄4 teaspoon salt',
'2 eggs',
'1 cup milk'
],
self.harvester_class.ingredients()
)
def test_instructions(self):
return self.assertEqual(
'Lightly grease a 12 muffin pan or 12 custard cups.\nLine each with a crepe, fluting them.\nSprinkle bacon into the crepes.\nDivide the cheese between the crepes.\nMix together the flour, salt.\nMix the beaten eggs and milk, add to the flour.\nBlend well and pour into the crepes on top of the cheese.\nBake in 350F oven for 15-20 minutes or until firm.\nCool 5 minutes before removing from pan.',
self.harvester_class.instructions()
)
def test_ratings(self):
self.assertEqual(
5.0,
self.harvester_class.ratings()
)
|
py | 1a47ba8c1bb222d8a36be3af899ceed2e8af7a0f | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from lucid.modelzoo.vision_base import Model, _layers_from_list_of_dicts
def _populate_inception_bottlenecks(scope):
"""Add Inception bottlenecks and their pre-Relu versions to the graph."""
graph = tf.get_default_graph()
for op in graph.get_operations():
if op.name.startswith(scope+'/') and 'Concat' in op.type:
name = op.name.split('/')[1]
pre_relus = []
for tower in op.inputs[1:]:
if tower.op.type == 'Relu':
tower = tower.op.inputs[0]
pre_relus.append(tower)
concat_name = scope + '/' + name + '_pre_relu'
_ = tf.concat(pre_relus, -1, name=concat_name)
class InceptionV1(Model):
"""InceptionV1 (or 'GoogLeNet')
This is a (re?)implementation of InceptionV1
https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf
The weights were trained at Google and released in an early TensorFlow
tutorial. It is possible the parameters are the original weights
(trained in TensorFlow's predecessor), but we haven't been able to
confirm this.
As far as we can tell, it is exactly the same as the model described in
the original paper, where as the slim and caffe implementations have
minor implementation differences (such as eliding the heads).
"""
model_path = 'gs://modelzoo/vision/other_models/InceptionV1.pb'
labels_path = 'gs://modelzoo/labels/ImageNet_alternate.txt'
synsets_path = 'gs://modelzoo/labels/ImageNet_alternate_synsets.txt'
dataset = 'ImageNet'
image_shape = [224, 224, 3]
image_value_range = (-117, 255-117)
input_name = 'input'
def post_import(self, scope):
_populate_inception_bottlenecks(scope)
InceptionV1.layers = _layers_from_list_of_dicts(InceptionV1(), [
{'tags': ['conv'], 'name': 'conv2d0', 'depth': 64},
{'tags': ['conv'], 'name': 'conv2d1', 'depth': 64},
{'tags': ['conv'], 'name': 'conv2d2', 'depth': 192},
{'tags': ['conv'], 'name': 'mixed3a', 'depth': 256},
{'tags': ['conv'], 'name': 'mixed3b', 'depth': 480},
{'tags': ['conv'], 'name': 'mixed4a', 'depth': 508},
{'tags': ['conv'], 'name': 'mixed4b', 'depth': 512},
{'tags': ['conv'], 'name': 'mixed4c', 'depth': 512},
{'tags': ['conv'], 'name': 'mixed4d', 'depth': 528},
{'tags': ['conv'], 'name': 'mixed4e', 'depth': 832},
{'tags': ['conv'], 'name': 'mixed5a', 'depth': 832},
{'tags': ['conv'], 'name': 'mixed5b', 'depth': 1024},
{'tags': ['conv'], 'name': 'head0_bottleneck', 'depth': 128},
{'tags': ['dense'], 'name': 'nn0', 'depth': 1024},
{'tags': ['dense'], 'name': 'softmax0', 'depth': 1008},
{'tags': ['conv'], 'name': 'head1_bottleneck', 'depth': 128},
{'tags': ['dense'], 'name': 'nn1', 'depth': 1024},
{'tags': ['dense'], 'name': 'softmax1', 'depth': 1008},
{'tags': ['dense'], 'name': 'softmax2', 'depth': 1008},
])
class InceptionV1_adv_finetuned(InceptionV1):
"""adversarially fine-tuned InceptionV1
This model is based on InceptionV1 and has been fine-tuned with
PGD-generated adversarial examples (https://arxiv.org/pdf/1706.06083.pdf).
The PGD-attack was L2-bounded with an epsilon of 255 (1.0 for normalized images).
After fine-tuning, this model achieves a robust top-5 accuracy of ~67%
for eps. 255 L2-bounded adversarial examples compared to ~4% before fine-tuning.
"""
model_path = 'gs://modelzoo/vision/other_models/InceptionV1_adv_finetuned.pb'
|
py | 1a47bab6b15a665f95af5779422e2f99d68901d9 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for integer division by zero."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
class ZeroDivisionTest(test.TestCase):
def testZeros(self):
with self.test_session(use_gpu=True):
for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:
zero = constant_op.constant(0, dtype=dtype)
one = constant_op.constant(1, dtype=dtype)
bads = [one // zero]
if dtype in (dtypes.int32, dtypes.int64):
bads.append(one % zero)
for bad in bads:
try:
result = bad.eval()
except errors_impl.OpError as e:
# Ideally, we'd get a nice exception. In theory, this should only
# happen on CPU, but 32 bit integer GPU division is actually on
# CPU due to a placer bug.
# TODO (irving): Make stricter once the placer bug is fixed. id:3347
# https://github.com/imdone/tensorflow/issues/3346
self.assertIn('Integer division by zero', str(e))
else:
# On the GPU, integer division by zero produces all bits set.
# But apparently on some GPUs "all bits set" for 64 bit division
# means 32 bits set, so we allow 0xffffffff as well. This isn't
# very portable, so we may need to expand this list if other GPUs
# do different things.
self.assertTrue(test.is_gpu_available())
self.assertIn(result, (-1, 0xff, 0xffffffff))
if __name__ == '__main__':
test.main()
|
py | 1a47bb0cf97d24340e85dd0b9f667c151ee914ac | from django.contrib.auth.models import User, Group
from django.conf import settings
from django.db import models
from django.db import connection
from django.db.models.signals import post_save
from pbs.prescription.models import Region, District
from smart_selects.db_fields import ChainedForeignKey
import logging
logger = logging.getLogger("log." + __name__)
class Profile(models.Model):
DEFAULT_GROUP = "Users"
user = models.OneToOneField(User)
region = models.ForeignKey(Region, blank=True, null=True, on_delete=models.PROTECT)
district = ChainedForeignKey(District,
chained_field="region", chained_model_field="region",
show_all=False, auto_choose=True, blank=True, null=True,
on_delete=models.PROTECT)
def is_fpc_user(self):
return self.user.email.lower().endswith(settings.FPC_EMAIL_EXT)
def user_post_save(sender, instance, created, **kwargs):
"""Create a user profile when a new user account is created"""
if (created and
Profile._meta.db_table in connection.introspection.table_names()):
p = Profile()
p.user = instance
p.save()
# add the default user group (fail_silently=True)
try:
group = Group.objects.get(name__iexact=p.DEFAULT_GROUP)
except Group.DoesNotExist:
logger.warning("Failed to assign group `%s' to user `%s', "
"group `%s' does not exist.", p.DEFAULT_GROUP,
p.user.username, p.DEFAULT_GROUP)
else:
p.user.groups.add(group)
post_save.connect(user_post_save, sender=User)
def prescription_modified(sender, instance, created, **kwargs):
if hasattr(instance, 'prescription'):
prescription = instance.prescription
if prescription is not None:
prescription.save() # update the modified and modifier fields
post_save.connect(prescription_modified)
|
py | 1a47bd5fe31acca3483ac79dca8f538ec515e3c9 | def eq_len(length):
return lambda x: len(x) == length
def ne_len(length):
return lambda x: len(x) != length
def le_length(length):
return lambda x: len(x) <= length
def ge_length(length):
return lambda x: len(x) >= length
def lt_length(length):
return lambda x: len(x) < length
def gt_length(length):
return lambda x: len(x) > length
def eq(value_to_match):
return lambda x: x is value_to_match or x == value_to_match
def eq_ref(value_to_match):
return lambda x: x is value_to_match
def contains(value_to_contain):
return lambda x: value_to_contain in x
def not_contains(value_to_contain):
return lambda x: value_to_contain not in x
def contains_eq_count(*values_to_contain, count=None):
count = count if count is not None else len(values_to_contain)
return lambda x: sum(1 for v in values_to_contain if v in x) == count
def contains_ne_count(*values_to_contain, count=None):
count = count if count is not None else len(values_to_contain)
return lambda x: sum(1 for v in values_to_contain if v in x) != count
def contains_all(*values_to_contain):
return lambda x: all(v in x for v in values_to_contain)
def not_contains_all(*values_to_contain):
return lambda x: all(v not in x for v in values_to_contain)
def combine(*preds):
return lambda x: all(pred(x) for pred in preds)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.